File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / bird / nest / rt-table.c
Revision 1.1.1.2 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Wed Mar 17 19:50:23 2021 UTC (3 years, 3 months ago) by misho
Branches: bird, MAIN
CVS tags: v1_6_8p3, HEAD
bird 1.6.8

    1: /*
    2:  *	BIRD -- Routing Tables
    3:  *
    4:  *	(c) 1998--2000 Martin Mares <mj@ucw.cz>
    5:  *
    6:  *	Can be freely distributed and used under the terms of the GNU GPL.
    7:  */
    8: 
    9: /**
   10:  * DOC: Routing tables
   11:  *
   12:  * Routing tables are probably the most important structures BIRD uses. They
   13:  * hold all the information about known networks, the associated routes and
   14:  * their attributes.
   15:  *
   16:  * There are multiple routing tables (a primary one together with any
   17:  * number of secondary ones if requested by the configuration). Each table
   18:  * is basically a FIB containing entries describing the individual
   19:  * destination networks. For each network (represented by structure &net),
   20:  * there is a one-way linked list of route entries (&rte), the first entry
   21:  * on the list being the best one (i.e., the one we currently use
   22:  * for routing), the order of the other ones is undetermined.
   23:  *
   24:  * The &rte contains information specific to the route (preference, protocol
   25:  * metrics, time of last modification etc.) and a pointer to a &rta structure
   26:  * (see the route attribute module for a precise explanation) holding the
   27:  * remaining route attributes which are expected to be shared by multiple
   28:  * routes in order to conserve memory.
   29:  */
   30: 
   31: #undef LOCAL_DEBUG
   32: 
   33: #include "nest/bird.h"
   34: #include "nest/route.h"
   35: #include "nest/protocol.h"
   36: #include "nest/cli.h"
   37: #include "nest/iface.h"
   38: #include "lib/resource.h"
   39: #include "lib/event.h"
   40: #include "lib/string.h"
   41: #include "conf/conf.h"
   42: #include "filter/filter.h"
   43: #include "lib/string.h"
   44: #include "lib/alloca.h"
   45: 
   46: pool *rt_table_pool;
   47: 
   48: static slab *rte_slab;
   49: static linpool *rte_update_pool;
   50: 
   51: list routing_tables;
   52: 
   53: static byte *rt_format_via(rte *e);
   54: static void rt_free_hostcache(rtable *tab);
   55: static void rt_notify_hostcache(rtable *tab, net *net);
   56: static void rt_update_hostcache(rtable *tab);
   57: static void rt_next_hop_update(rtable *tab);
   58: static inline int rt_prune_table(rtable *tab);
   59: static inline void rt_schedule_gc(rtable *tab);
   60: static inline void rt_schedule_prune(rtable *tab);
   61: 
   62: 
   63: /* Like fib_route(), but skips empty net entries */
   64: static net *
   65: net_route(rtable *tab, ip_addr a, int len)
   66: {
   67:   ip_addr a0;
   68:   net *n;
   69: 
   70:   while (len >= 0)
   71:     {
   72:       a0 = ipa_and(a, ipa_mkmask(len));
   73:       n = fib_find(&tab->fib, &a0, len);
   74:       if (n && rte_is_valid(n->routes))
   75: 	return n;
   76:       len--;
   77:     }
   78:   return NULL;
   79: }
   80: 
   81: static void
   82: rte_init(struct fib_node *N)
   83: {
   84:   net *n = (net *) N;
   85: 
   86:   N->flags = 0;
   87:   n->routes = NULL;
   88: }
   89: 
   90: /**
   91:  * rte_find - find a route
   92:  * @net: network node
   93:  * @src: route source
   94:  *
   95:  * The rte_find() function returns a route for destination @net
   96:  * which is from route source @src.
   97:  */
   98: rte *
   99: rte_find(net *net, struct rte_src *src)
  100: {
  101:   rte *e = net->routes;
  102: 
  103:   while (e && e->attrs->src != src)
  104:     e = e->next;
  105:   return e;
  106: }
  107: 
  108: /**
  109:  * rte_get_temp - get a temporary &rte
  110:  * @a: attributes to assign to the new route (a &rta; in case it's
  111:  * un-cached, rte_update() will create a cached copy automatically)
  112:  *
  113:  * Create a temporary &rte and bind it with the attributes @a.
  114:  * Also set route preference to the default preference set for
  115:  * the protocol.
  116:  */
  117: rte *
  118: rte_get_temp(rta *a)
  119: {
  120:   rte *e = sl_alloc(rte_slab);
  121: 
  122:   e->attrs = a;
  123:   e->flags = 0;
  124:   e->pref = a->src->proto->preference;
  125:   return e;
  126: }
  127: 
  128: rte *
  129: rte_do_cow(rte *r)
  130: {
  131:   rte *e = sl_alloc(rte_slab);
  132: 
  133:   memcpy(e, r, sizeof(rte));
  134:   e->attrs = rta_clone(r->attrs);
  135:   e->flags = 0;
  136:   return e;
  137: }
  138: 
  139: /**
  140:  * rte_cow_rta - get a private writable copy of &rte with writable &rta
  141:  * @r: a route entry to be copied
  142:  * @lp: a linpool from which to allocate &rta
  143:  *
  144:  * rte_cow_rta() takes a &rte and prepares it and associated &rta for
  145:  * modification. There are three possibilities: First, both &rte and &rta are
  146:  * private copies, in that case they are returned unchanged.  Second, &rte is
  147:  * private copy, but &rta is cached, in that case &rta is duplicated using
  148:  * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
  149:  * both structures are duplicated by rte_do_cow() and rta_do_cow().
  150:  *
  151:  * Note that in the second case, cached &rta loses one reference, while private
  152:  * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
  153:  * nexthops, ...) with it. To work properly, original shared &rta should have
  154:  * another reference during the life of created private copy.
  155:  *
  156:  * Result: a pointer to the new writable &rte with writable &rta.
  157:  */
  158: rte *
  159: rte_cow_rta(rte *r, linpool *lp)
  160: {
  161:   if (!rta_is_cached(r->attrs))
  162:     return r;
  163: 
  164:   rte *e = rte_cow(r);
  165:   rta *a = rta_do_cow(r->attrs, lp);
  166:   rta_free(e->attrs);
  167:   e->attrs = a;
  168:   return e;
  169: }
  170: 
  171: static int				/* Actually better or at least as good as */
  172: rte_better(rte *new, rte *old)
  173: {
  174:   int (*better)(rte *, rte *);
  175: 
  176:   if (!rte_is_valid(old))
  177:     return 1;
  178:   if (!rte_is_valid(new))
  179:     return 0;
  180: 
  181:   if (new->pref > old->pref)
  182:     return 1;
  183:   if (new->pref < old->pref)
  184:     return 0;
  185:   if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
  186:     {
  187:       /*
  188:        *  If the user has configured protocol preferences, so that two different protocols
  189:        *  have the same preference, try to break the tie by comparing addresses. Not too
  190:        *  useful, but keeps the ordering of routes unambiguous.
  191:        */
  192:       return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
  193:     }
  194:   if (better = new->attrs->src->proto->rte_better)
  195:     return better(new, old);
  196:   return 0;
  197: }
  198: 
  199: static int
  200: rte_mergable(rte *pri, rte *sec)
  201: {
  202:   int (*mergable)(rte *, rte *);
  203: 
  204:   if (!rte_is_valid(pri) || !rte_is_valid(sec))
  205:     return 0;
  206: 
  207:   if (pri->pref != sec->pref)
  208:     return 0;
  209: 
  210:   if (pri->attrs->src->proto->proto != sec->attrs->src->proto->proto)
  211:     return 0;
  212: 
  213:   if (mergable = pri->attrs->src->proto->rte_mergable)
  214:     return mergable(pri, sec);
  215: 
  216:   return 0;
  217: }
  218: 
  219: static void
  220: rte_trace(struct proto *p, rte *e, int dir, char *msg)
  221: {
  222:   log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, rt_format_via(e));
  223: }
  224: 
  225: static inline void
  226: rte_trace_in(uint flag, struct proto *p, rte *e, char *msg)
  227: {
  228:   if (p->debug & flag)
  229:     rte_trace(p, e, '>', msg);
  230: }
  231: 
  232: static inline void
  233: rte_trace_out(uint flag, struct proto *p, rte *e, char *msg)
  234: {
  235:   if (p->debug & flag)
  236:     rte_trace(p, e, '<', msg);
  237: }
  238: 
  239: static rte *
  240: export_filter_(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, linpool *pool, int silent)
  241: {
  242:   struct proto *p = ah->proto;
  243:   struct filter *filter = ah->out_filter;
  244:   struct proto_stats *stats = ah->stats;
  245:   ea_list *tmpb = NULL;
  246:   rte *rt;
  247:   int v;
  248: 
  249:   rt = rt0;
  250:   *rt_free = NULL;
  251: 
  252:   if (!tmpa)
  253:     tmpa = &tmpb;
  254: 
  255:   *tmpa = rte_make_tmp_attrs(rt, pool);
  256: 
  257:   v = p->import_control ? p->import_control(p, &rt, tmpa, pool) : 0;
  258:   if (v < 0)
  259:     {
  260:       if (silent)
  261: 	goto reject;
  262: 
  263:       stats->exp_updates_rejected++;
  264:       if (v == RIC_REJECT)
  265: 	rte_trace_out(D_FILTERS, p, rt, "rejected by protocol");
  266:       goto reject;
  267:     }
  268:   if (v > 0)
  269:     {
  270:       if (!silent)
  271: 	rte_trace_out(D_FILTERS, p, rt, "forced accept by protocol");
  272:       goto accept;
  273:     }
  274: 
  275:   v = filter && ((filter == FILTER_REJECT) ||
  276: 		 (f_run(filter, &rt, tmpa, pool,
  277: 			FF_FORCE_TMPATTR | (silent ? FF_SILENT : 0)) > F_ACCEPT));
  278:   if (v)
  279:     {
  280:       if (silent)
  281: 	goto reject;
  282: 
  283:       stats->exp_updates_filtered++;
  284:       rte_trace_out(D_FILTERS, p, rt, "filtered out");
  285:       goto reject;
  286:     }
  287: 
  288:  accept:
  289:   if (rt != rt0)
  290:     *rt_free = rt;
  291:   return rt;
  292: 
  293:  reject:
  294:   /* Discard temporary rte */
  295:   if (rt != rt0)
  296:     rte_free(rt);
  297:   return NULL;
  298: }
  299: 
  300: static inline rte *
  301: export_filter(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, int silent)
  302: {
  303:   return export_filter_(ah, rt0, rt_free, tmpa, rte_update_pool, silent);
  304: }
  305: 
  306: static void
  307: do_rt_notify(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
  308: {
  309:   struct proto *p = ah->proto;
  310:   struct proto_stats *stats = ah->stats;
  311: 
  312: 
  313:   /*
  314:    * First, apply export limit.
  315:    *
  316:    * Export route limits has several problems. Because exp_routes
  317:    * counter is reset before refeed, we don't really know whether
  318:    * limit is breached and whether the update is new or not. Therefore
  319:    * the number of really exported routes may exceed the limit
  320:    * temporarily (routes exported before and new routes in refeed).
  321:    *
  322:    * Minor advantage is that if the limit is decreased and refeed is
  323:    * requested, the number of exported routes really decrease.
  324:    *
  325:    * Second problem is that with export limits, we don't know whether
  326:    * old was really exported (it might be blocked by limit). When a
  327:    * withdraw is exported, we announce it even when the previous
  328:    * update was blocked. This is not a big issue, but the same problem
  329:    * is in updating exp_routes counter. Therefore, to be consistent in
  330:    * increases and decreases of exp_routes, we count exported routes
  331:    * regardless of blocking by limits.
  332:    *
  333:    * Similar problem is in handling updates - when a new route is
  334:    * received and blocking is active, the route would be blocked, but
  335:    * when an update for the route will be received later, the update
  336:    * would be propagated (as old != NULL). Therefore, we have to block
  337:    * also non-new updates (contrary to import blocking).
  338:    */
  339: 
  340:   struct proto_limit *l = ah->out_limit;
  341:   if (l && new)
  342:     {
  343:       if ((!old || refeed) && (stats->exp_routes >= l->limit))
  344: 	proto_notify_limit(ah, l, PLD_OUT, stats->exp_routes);
  345: 
  346:       if (l->state == PLS_BLOCKED)
  347: 	{
  348: 	  stats->exp_routes++;	/* see note above */
  349: 	  stats->exp_updates_rejected++;
  350: 	  rte_trace_out(D_FILTERS, p, new, "rejected [limit]");
  351: 	  new = NULL;
  352: 
  353: 	  if (!old)
  354: 	    return;
  355: 	}
  356:     }
  357: 
  358: 
  359:   if (new)
  360:     stats->exp_updates_accepted++;
  361:   else
  362:     stats->exp_withdraws_accepted++;
  363: 
  364:   /* Hack: We do not decrease exp_routes during refeed, we instead
  365:      reset exp_routes at the start of refeed. */
  366:   if (new)
  367:     stats->exp_routes++;
  368:   if (old && !refeed)
  369:     stats->exp_routes--;
  370: 
  371:   if (p->debug & D_ROUTES)
  372:     {
  373:       if (new && old)
  374: 	rte_trace_out(D_ROUTES, p, new, "replaced");
  375:       else if (new)
  376: 	rte_trace_out(D_ROUTES, p, new, "added");
  377:       else if (old)
  378: 	rte_trace_out(D_ROUTES, p, old, "removed");
  379:     }
  380:   if (!new)
  381:     p->rt_notify(p, ah->table, net, NULL, old, NULL);
  382:   else if (tmpa)
  383:     {
  384:       ea_list *t = tmpa;
  385:       while (t->next)
  386: 	t = t->next;
  387:       t->next = new->attrs->eattrs;
  388:       p->rt_notify(p, ah->table, net, new, old, tmpa);
  389:       t->next = NULL;
  390:     }
  391:   else
  392:     p->rt_notify(p, ah->table, net, new, old, new->attrs->eattrs);
  393: }
  394: 
  395: static void
  396: rt_notify_basic(struct announce_hook *ah, net *net, rte *new0, rte *old0, int refeed)
  397: {
  398:   struct proto *p = ah->proto;
  399:   struct proto_stats *stats = ah->stats;
  400: 
  401:   rte *new = new0;
  402:   rte *old = old0;
  403:   rte *new_free = NULL;
  404:   rte *old_free = NULL;
  405:   ea_list *tmpa = NULL;
  406: 
  407:   if (new)
  408:     stats->exp_updates_received++;
  409:   else
  410:     stats->exp_withdraws_received++;
  411: 
  412:   /*
  413:    * This is a tricky part - we don't know whether route 'old' was
  414:    * exported to protocol 'p' or was filtered by the export filter.
  415:    * We try to run the export filter to know this to have a correct
  416:    * value in 'old' argument of rte_update (and proper filter value)
  417:    *
  418:    * FIXME - this is broken because 'configure soft' may change
  419:    * filters but keep routes. Refeed is expected to be called after
  420:    * change of the filters and with old == new, therefore we do not
  421:    * even try to run the filter on an old route, This may lead to
  422:    * 'spurious withdraws' but ensure that there are no 'missing
  423:    * withdraws'.
  424:    *
  425:    * This is not completely safe as there is a window between
  426:    * reconfiguration and the end of refeed - if a newly filtered
  427:    * route disappears during this period, proper withdraw is not
  428:    * sent (because old would be also filtered) and the route is
  429:    * not refeeded (because it disappeared before that). This is
  430:    * handled below as a special case.
  431:    */
  432: 
  433:   if (new)
  434:     new = export_filter(ah, new, &new_free, &tmpa, 0);
  435: 
  436:   if (old && !refeed)
  437:     old = export_filter(ah, old, &old_free, NULL, 1);
  438: 
  439:   if (!new && !old)
  440:   {
  441:     /*
  442:      * As mentioned above, 'old' value may be incorrect in some race conditions.
  443:      * We generally ignore it with two exceptions:
  444:      *
  445:      * First, withdraw to pipe protocol. In that case we rather propagate
  446:      * unfiltered withdraws regardless of export filters to ensure that when a
  447:      * protocol is flushed, its routes are removed from all tables. Possible
  448:      * spurious unfiltered withdraws are not problem here as they are ignored if
  449:      * there is no corresponding route at the other end of the pipe.
  450:      *
  451:      * Second, recent filter change. If old route is older than filter change,
  452:      * then it was previously evaluated by a different filter and we do not know
  453:      * whether it was really propagated. In that case we rather send spurious
  454:      * withdraw than do nothing and possibly cause phantom routes.
  455:      *
  456:      * In both cases wqe directly call rt_notify() hook instead of
  457:      * do_rt_notify() to avoid logging and stat counters.
  458:      */
  459: 
  460:     int pipe_withdraw = 0, filter_change = 0;
  461: #ifdef CONFIG_PIPE
  462:     pipe_withdraw = (p->proto == &proto_pipe) && !new0;
  463: #endif
  464:     filter_change = old0 && (old0->lastmod <= ah->last_out_filter_change);
  465: 
  466:     if ((pipe_withdraw || filter_change) && (p != old0->sender->proto))
  467:     {
  468:       stats->exp_withdraws_accepted++;
  469:       p->rt_notify(p, ah->table, net, NULL, old0, NULL);
  470:     }
  471: 
  472:     return;
  473:   }
  474: 
  475:   do_rt_notify(ah, net, new, old, tmpa, refeed);
  476: 
  477:   /* Discard temporary rte's */
  478:   if (new_free)
  479:     rte_free(new_free);
  480:   if (old_free)
  481:     rte_free(old_free);
  482: }
  483: 
  484: static void
  485: rt_notify_accepted(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed, rte *before_old, int feed)
  486: {
  487:   struct proto *p = ah->proto;
  488:   struct proto_stats *stats = ah->stats;
  489: 
  490:   rte *r;
  491:   rte *new_best = NULL;
  492:   rte *old_best = NULL;
  493:   rte *new_free = NULL;
  494:   rte *old_free = NULL;
  495:   ea_list *tmpa = NULL;
  496: 
  497:   /* Used to track whether we met old_changed position. If before_old is NULL
  498:      old_changed was the first and we met it implicitly before current best route. */
  499:   int old_meet = old_changed && !before_old;
  500: 
  501:   /* Note that before_old is either NULL or valid (not rejected) route.
  502:      If old_changed is valid, before_old have to be too. If old changed route
  503:      was not valid, caller must use NULL for both old_changed and before_old. */
  504: 
  505:   if (new_changed)
  506:     stats->exp_updates_received++;
  507:   else
  508:     stats->exp_withdraws_received++;
  509: 
  510:   /* First, find the new_best route - first accepted by filters */
  511:   for (r=net->routes; rte_is_valid(r); r=r->next)
  512:     {
  513:       if (new_best = export_filter(ah, r, &new_free, &tmpa, 0))
  514: 	break;
  515: 
  516:       /* Note if we walked around the position of old_changed route */
  517:       if (r == before_old)
  518: 	old_meet = 1;
  519:     }
  520: 
  521:   /* 
  522:    * Second, handle the feed case. That means we do not care for
  523:    * old_best. It is NULL for feed, and the new_best for refeed. 
  524:    * For refeed, there is a hack similar to one in rt_notify_basic()
  525:    * to ensure withdraws in case of changed filters
  526:    */
  527:   if (feed)
  528:     {
  529:       if (feed == 2)	/* refeed */
  530: 	old_best = new_best ? new_best :
  531: 	  (rte_is_valid(net->routes) ? net->routes : NULL);
  532:       else
  533: 	old_best = NULL;
  534: 
  535:       if (!new_best && !old_best)
  536: 	return;
  537: 
  538:       goto found;
  539:     }
  540: 
  541:   /*
  542:    * Now, we find the old_best route. Generally, it is the same as the
  543:    * new_best, unless new_best is the same as new_changed or
  544:    * old_changed is accepted before new_best.
  545:    *
  546:    * There are four cases:
  547:    *
  548:    * - We would find and accept old_changed before new_best, therefore
  549:    *   old_changed is old_best. In remaining cases we suppose this
  550:    *   is not true.
  551:    *
  552:    * - We found no new_best, therefore there is also no old_best and
  553:    *   we ignore this withdraw.
  554:    *
  555:    * - We found new_best different than new_changed, therefore
  556:    *   old_best is the same as new_best and we ignore this update.
  557:    *
  558:    * - We found new_best the same as new_changed, therefore it cannot
  559:    *   be old_best and we have to continue search for old_best.
  560:    *
  561:    * There is also a hack to ensure consistency in case of changed filters.
  562:    * It does not find the proper old_best, just selects a non-NULL route.
  563:    */
  564: 
  565:   /* Hack for changed filters */
  566:   if (old_changed &&
  567:       (p != old_changed->sender->proto) &&
  568:       (old_changed->lastmod <= ah->last_out_filter_change))
  569:     {
  570:       old_best = old_changed;
  571:       goto found;
  572:     }
  573: 
  574:   /* First case */
  575:   if (old_meet)
  576:     if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
  577:       goto found;
  578: 
  579:   /* Second case */
  580:   if (!new_best)
  581:     return;
  582: 
  583:   /* Third case, we use r instead of new_best, because export_filter() could change it */
  584:   if (r != new_changed)
  585:     {
  586:       if (new_free)
  587: 	rte_free(new_free);
  588:       return;
  589:     }
  590: 
  591:   /* Fourth case */
  592:   for (r=r->next; rte_is_valid(r); r=r->next)
  593:     {
  594:       if (old_best = export_filter(ah, r, &old_free, NULL, 1))
  595: 	goto found;
  596: 
  597:       if (r == before_old)
  598: 	if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
  599: 	  goto found;
  600:     }
  601: 
  602:   /* Implicitly, old_best is NULL and new_best is non-NULL */
  603: 
  604:  found:
  605:   do_rt_notify(ah, net, new_best, old_best, tmpa, (feed == 2));
  606: 
  607:   /* Discard temporary rte's */
  608:   if (new_free)
  609:     rte_free(new_free);
  610:   if (old_free)
  611:     rte_free(old_free);
  612: }
  613: 
  614: 
  615: static struct mpnh *
  616: mpnh_merge_rta(struct mpnh *nhs, rta *a, linpool *pool, int max)
  617: {
  618:   struct mpnh nh = { .gw = a->gw, .iface = a->iface };
  619:   struct mpnh *nh2 = (a->dest == RTD_MULTIPATH) ? a->nexthops : &nh;
  620:   return mpnh_merge(nhs, nh2, 1, 0, max, pool);
  621: }
  622: 
  623: rte *
  624: rt_export_merged(struct announce_hook *ah, net *net, rte **rt_free, ea_list **tmpa, linpool *pool, int silent)
  625: {
  626:   // struct proto *p = ah->proto;
  627:   struct mpnh *nhs = NULL;
  628:   rte *best0, *best, *rt0, *rt, *tmp;
  629: 
  630:   best0 = net->routes;
  631:   *rt_free = NULL;
  632: 
  633:   if (!rte_is_valid(best0))
  634:     return NULL;
  635: 
  636:   best = export_filter_(ah, best0, rt_free, tmpa, pool, silent);
  637: 
  638:   if (!best || !rte_is_reachable(best))
  639:     return best;
  640: 
  641:   for (rt0 = best0->next; rt0; rt0 = rt0->next)
  642:   {
  643:     if (!rte_mergable(best0, rt0))
  644:       continue;
  645: 
  646:     rt = export_filter_(ah, rt0, &tmp, NULL, pool, 1);
  647: 
  648:     if (!rt)
  649:       continue;
  650: 
  651:     if (rte_is_reachable(rt))
  652:       nhs = mpnh_merge_rta(nhs, rt->attrs, pool, ah->proto->merge_limit);
  653: 
  654:     if (tmp)
  655:       rte_free(tmp);
  656:   }
  657: 
  658:   if (nhs)
  659:   {
  660:     nhs = mpnh_merge_rta(nhs, best->attrs, pool, ah->proto->merge_limit);
  661: 
  662:     if (nhs->next)
  663:     {
  664:       best = rte_cow_rta(best, pool);
  665:       best->attrs->dest = RTD_MULTIPATH;
  666:       best->attrs->nexthops = nhs;
  667:     }
  668:   }
  669: 
  670:   if (best != best0)
  671:     *rt_free = best;
  672: 
  673:   return best;
  674: }
  675: 
  676: 
  677: static void
  678: rt_notify_merged(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed,
  679: 		 rte *new_best, rte*old_best, int refeed)
  680: {
  681:   // struct proto *p = ah->proto;
  682: 
  683:   rte *new_best_free = NULL;
  684:   rte *old_best_free = NULL;
  685:   rte *new_changed_free = NULL;
  686:   rte *old_changed_free = NULL;
  687:   ea_list *tmpa = NULL;
  688: 
  689:   /* We assume that all rte arguments are either NULL or rte_is_valid() */
  690: 
  691:   /* This check should be done by the caller */
  692:   if (!new_best && !old_best)
  693:     return;
  694: 
  695:   /* Check whether the change is relevant to the merged route */
  696:   if ((new_best == old_best) && !refeed)
  697:   {
  698:     new_changed = rte_mergable(new_best, new_changed) ?
  699:       export_filter(ah, new_changed, &new_changed_free, NULL, 1) : NULL;
  700: 
  701:     old_changed = rte_mergable(old_best, old_changed) ?
  702:       export_filter(ah, old_changed, &old_changed_free, NULL, 1) : NULL;
  703: 
  704:     if (!new_changed && !old_changed)
  705:       return;
  706:   }
  707: 
  708:   if (new_best)
  709:     ah->stats->exp_updates_received++;
  710:   else
  711:     ah->stats->exp_withdraws_received++;
  712: 
  713:   /* Prepare new merged route */
  714:   if (new_best)
  715:     new_best = rt_export_merged(ah, net, &new_best_free, &tmpa, rte_update_pool, 0);
  716: 
  717:   /* Prepare old merged route (without proper merged next hops) */
  718:   /* There are some issues with running filter on old route - see rt_notify_basic() */
  719:   if (old_best && !refeed)
  720:     old_best = export_filter(ah, old_best, &old_best_free, NULL, 1);
  721: 
  722:   if (new_best || old_best)
  723:     do_rt_notify(ah, net, new_best, old_best, tmpa, refeed);
  724: 
  725:   /* Discard temporary rte's */
  726:   if (new_best_free)
  727:     rte_free(new_best_free);
  728:   if (old_best_free)
  729:     rte_free(old_best_free);
  730:   if (new_changed_free)
  731:     rte_free(new_changed_free);
  732:   if (old_changed_free)
  733:     rte_free(old_changed_free);
  734: }
  735: 
  736: 
  737: /**
  738:  * rte_announce - announce a routing table change
  739:  * @tab: table the route has been added to
  740:  * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
  741:  * @net: network in question
  742:  * @new: the new route to be announced
  743:  * @old: the previous route for the same network
  744:  * @new_best: the new best route for the same network
  745:  * @old_best: the previous best route for the same network
  746:  * @before_old: The previous route before @old for the same network.
  747:  * 		If @before_old is NULL @old was the first.
  748:  *
  749:  * This function gets a routing table update and announces it
  750:  * to all protocols that acccepts given type of route announcement
  751:  * and are connected to the same table by their announcement hooks.
  752:  *
  753:  * Route announcement of type %RA_OPTIMAL si generated when optimal
  754:  * route (in routing table @tab) changes. In that case @old stores the
  755:  * old optimal route.
  756:  *
  757:  * Route announcement of type %RA_ANY si generated when any route (in
  758:  * routing table @tab) changes In that case @old stores the old route
  759:  * from the same protocol.
  760:  *
  761:  * For each appropriate protocol, we first call its import_control()
  762:  * hook which performs basic checks on the route (each protocol has a
  763:  * right to veto or force accept of the route before any filter is
  764:  * asked) and adds default values of attributes specific to the new
  765:  * protocol (metrics, tags etc.).  Then it consults the protocol's
  766:  * export filter and if it accepts the route, the rt_notify() hook of
  767:  * the protocol gets called.
  768:  */
  769: static void
  770: rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old,
  771: 	     rte *new_best, rte *old_best, rte *before_old)
  772: {
  773:   if (!rte_is_valid(new))
  774:     new = NULL;
  775: 
  776:   if (!rte_is_valid(old))
  777:     old = before_old = NULL;
  778: 
  779:   if (!rte_is_valid(new_best))
  780:     new_best = NULL;
  781: 
  782:   if (!rte_is_valid(old_best))
  783:     old_best = NULL;
  784: 
  785:   if (!old && !new)
  786:     return;
  787: 
  788:   if (type == RA_OPTIMAL)
  789:     {
  790:       if (new)
  791: 	new->attrs->src->proto->stats.pref_routes++;
  792:       if (old)
  793: 	old->attrs->src->proto->stats.pref_routes--;
  794: 
  795:       if (tab->hostcache)
  796: 	rt_notify_hostcache(tab, net);
  797:     }
  798: 
  799:   struct announce_hook *a;
  800:   WALK_LIST(a, tab->hooks)
  801:     {
  802:       ASSERT(a->proto->export_state != ES_DOWN);
  803:       if (a->proto->accept_ra_types == type)
  804: 	if (type == RA_ACCEPTED)
  805: 	  rt_notify_accepted(a, net, new, old, before_old, 0);
  806: 	else if (type == RA_MERGED)
  807: 	  rt_notify_merged(a, net, new, old, new_best, old_best, 0);
  808: 	else
  809: 	  rt_notify_basic(a, net, new, old, 0);
  810:     }
  811: }
  812: 
  813: static inline int
  814: rte_validate(rte *e)
  815: {
  816:   int c;
  817:   net *n = e->net;
  818: 
  819:   if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
  820:     {
  821:       log(L_WARN "Ignoring bogus prefix %I/%d received via %s",
  822: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  823:       return 0;
  824:     }
  825: 
  826:   c = ipa_classify_net(n->n.prefix);
  827:   if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
  828:     {
  829:       log(L_WARN "Ignoring bogus route %I/%d received via %s",
  830: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  831:       return 0;
  832:     }
  833: 
  834:   if ((e->attrs->dest == RTD_MULTIPATH) && !mpnh_is_sorted(e->attrs->nexthops))
  835:     {
  836:       log(L_WARN "Ignoring unsorted multipath route %I/%d received via %s",
  837: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  838:       return 0;
  839:     }
  840: 
  841:   return 1;
  842: }
  843: 
  844: /**
  845:  * rte_free - delete a &rte
  846:  * @e: &rte to be deleted
  847:  *
  848:  * rte_free() deletes the given &rte from the routing table it's linked to.
  849:  */
  850: void
  851: rte_free(rte *e)
  852: {
  853:   if (rta_is_cached(e->attrs))
  854:     rta_free(e->attrs);
  855:   sl_free(rte_slab, e);
  856: }
  857: 
  858: static inline void
  859: rte_free_quick(rte *e)
  860: {
  861:   rta_free(e->attrs);
  862:   sl_free(rte_slab, e);
  863: }
  864: 
  865: static int
  866: rte_same(rte *x, rte *y)
  867: {
  868:   /* rte.flags are not checked, as they are mostly internal to rtable */
  869:   return
  870:     x->attrs == y->attrs &&
  871:     x->pflags == y->pflags &&
  872:     x->pref == y->pref &&
  873:     (!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y)) &&
  874:     rte_is_filtered(x) == rte_is_filtered(y);
  875: }
  876: 
  877: static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
  878: 
  879: static void
  880: rte_recalculate(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
  881: {
  882:   struct proto *p = ah->proto;
  883:   struct rtable *table = ah->table;
  884:   struct proto_stats *stats = ah->stats;
  885:   static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS;
  886:   rte *before_old = NULL;
  887:   rte *old_best = net->routes;
  888:   rte *old = NULL;
  889:   rte **k;
  890: 
  891:   k = &net->routes;			/* Find and remove original route from the same protocol */
  892:   while (old = *k)
  893:     {
  894:       if (old->attrs->src == src)
  895: 	{
  896: 	  /* If there is the same route in the routing table but from
  897: 	   * a different sender, then there are two paths from the
  898: 	   * source protocol to this routing table through transparent
  899: 	   * pipes, which is not allowed.
  900: 	   *
  901: 	   * We log that and ignore the route. If it is withdraw, we
  902: 	   * ignore it completely (there might be 'spurious withdraws',
  903: 	   * see FIXME in do_rte_announce())
  904: 	   */
  905: 	  if (old->sender->proto != p)
  906: 	    {
  907: 	      if (new)
  908: 		{
  909: 		  log_rl(&rl_pipe, L_ERR "Pipe collision detected when sending %I/%d to table %s",
  910: 		      net->n.prefix, net->n.pxlen, table->name);
  911: 		  rte_free_quick(new);
  912: 		}
  913: 	      return;
  914: 	    }
  915: 
  916: 	  if (new && rte_same(old, new))
  917: 	    {
  918: 	      /* No changes, ignore the new route and refresh the old one */
  919: 
  920: 	      old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
  921: 
  922: 	      if (!rte_is_filtered(new))
  923: 		{
  924: 		  stats->imp_updates_ignored++;
  925: 		  rte_trace_in(D_ROUTES, p, new, "ignored");
  926: 		}
  927: 
  928: 	      rte_free_quick(new);
  929: 	      return;
  930: 	    }
  931: 	  *k = old->next;
  932: 	  break;
  933: 	}
  934:       k = &old->next;
  935:       before_old = old;
  936:     }
  937: 
  938:   if (!old)
  939:     before_old = NULL;
  940: 
  941:   if (!old && !new)
  942:     {
  943:       stats->imp_withdraws_ignored++;
  944:       return;
  945:     }
  946: 
  947:   int new_ok = rte_is_ok(new);
  948:   int old_ok = rte_is_ok(old);
  949: 
  950:   struct proto_limit *l = ah->rx_limit;
  951:   if (l && !old && new)
  952:     {
  953:       u32 all_routes = stats->imp_routes + stats->filt_routes;
  954: 
  955:       if (all_routes >= l->limit)
  956: 	proto_notify_limit(ah, l, PLD_RX, all_routes);
  957: 
  958:       if (l->state == PLS_BLOCKED)
  959: 	{
  960: 	  /* In receive limit the situation is simple, old is NULL so
  961: 	     we just free new and exit like nothing happened */
  962: 
  963: 	  stats->imp_updates_ignored++;
  964: 	  rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
  965: 	  rte_free_quick(new);
  966: 	  return;
  967: 	}
  968:     }
  969: 
  970:   l = ah->in_limit;
  971:   if (l && !old_ok && new_ok)
  972:     {
  973:       if (stats->imp_routes >= l->limit)
  974: 	proto_notify_limit(ah, l, PLD_IN, stats->imp_routes);
  975: 
  976:       if (l->state == PLS_BLOCKED)
  977: 	{
  978: 	  /* In import limit the situation is more complicated. We
  979: 	     shouldn't just drop the route, we should handle it like
  980: 	     it was filtered. We also have to continue the route
  981: 	     processing if old or new is non-NULL, but we should exit
  982: 	     if both are NULL as this case is probably assumed to be
  983: 	     already handled. */
  984: 
  985: 	  stats->imp_updates_ignored++;
  986: 	  rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
  987: 
  988: 	  if (ah->in_keep_filtered)
  989: 	    new->flags |= REF_FILTERED;
  990: 	  else
  991: 	    { rte_free_quick(new); new = NULL; }
  992: 
  993: 	  /* Note that old && !new could be possible when
  994: 	     ah->in_keep_filtered changed in the recent past. */
  995: 
  996: 	  if (!old && !new)
  997: 	    return;
  998: 
  999: 	  new_ok = 0;
 1000: 	  goto skip_stats1;
 1001: 	}
 1002:     }
 1003: 
 1004:   if (new_ok)
 1005:     stats->imp_updates_accepted++;
 1006:   else if (old_ok)
 1007:     stats->imp_withdraws_accepted++;
 1008:   else
 1009:     stats->imp_withdraws_ignored++;
 1010: 
 1011:  skip_stats1:
 1012: 
 1013:   if (new)
 1014:     rte_is_filtered(new) ? stats->filt_routes++ : stats->imp_routes++;
 1015:   if (old)
 1016:     rte_is_filtered(old) ? stats->filt_routes-- : stats->imp_routes--;
 1017: 
 1018:   if (table->config->sorted)
 1019:     {
 1020:       /* If routes are sorted, just insert new route to appropriate position */
 1021:       if (new)
 1022: 	{
 1023: 	  if (before_old && !rte_better(new, before_old))
 1024: 	    k = &before_old->next;
 1025: 	  else
 1026: 	    k = &net->routes;
 1027: 
 1028: 	  for (; *k; k=&(*k)->next)
 1029: 	    if (rte_better(new, *k))
 1030: 	      break;
 1031: 
 1032: 	  new->next = *k;
 1033: 	  *k = new;
 1034: 	}
 1035:     }
 1036:   else
 1037:     {
 1038:       /* If routes are not sorted, find the best route and move it on
 1039: 	 the first position. There are several optimized cases. */
 1040: 
 1041:       if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
 1042: 	goto do_recalculate;
 1043: 
 1044:       if (new && rte_better(new, old_best))
 1045: 	{
 1046: 	  /* The first case - the new route is cleary optimal,
 1047: 	     we link it at the first position */
 1048: 
 1049: 	  new->next = net->routes;
 1050: 	  net->routes = new;
 1051: 	}
 1052:       else if (old == old_best)
 1053: 	{
 1054: 	  /* The second case - the old best route disappeared, we add the
 1055: 	     new route (if we have any) to the list (we don't care about
 1056: 	     position) and then we elect the new optimal route and relink
 1057: 	     that route at the first position and announce it. New optimal
 1058: 	     route might be NULL if there is no more routes */
 1059: 
 1060: 	do_recalculate:
 1061: 	  /* Add the new route to the list */
 1062: 	  if (new)
 1063: 	    {
 1064: 	      new->next = net->routes;
 1065: 	      net->routes = new;
 1066: 	    }
 1067: 
 1068: 	  /* Find a new optimal route (if there is any) */
 1069: 	  if (net->routes)
 1070: 	    {
 1071: 	      rte **bp = &net->routes;
 1072: 	      for (k=&(*bp)->next; *k; k=&(*k)->next)
 1073: 		if (rte_better(*k, *bp))
 1074: 		  bp = k;
 1075: 
 1076: 	      /* And relink it */
 1077: 	      rte *best = *bp;
 1078: 	      *bp = best->next;
 1079: 	      best->next = net->routes;
 1080: 	      net->routes = best;
 1081: 	    }
 1082: 	}
 1083:       else if (new)
 1084: 	{
 1085: 	  /* The third case - the new route is not better than the old
 1086: 	     best route (therefore old_best != NULL) and the old best
 1087: 	     route was not removed (therefore old_best == net->routes).
 1088: 	     We just link the new route after the old best route. */
 1089: 
 1090: 	  ASSERT(net->routes != NULL);
 1091: 	  new->next = net->routes->next;
 1092: 	  net->routes->next = new;
 1093: 	}
 1094:       /* The fourth (empty) case - suboptimal route was removed, nothing to do */
 1095:     }
 1096: 
 1097:   if (new)
 1098:     new->lastmod = now;
 1099: 
 1100:   /* Log the route change */
 1101:   if (p->debug & D_ROUTES)
 1102:     {
 1103:       if (new_ok)
 1104: 	rte_trace(p, new, '>', new == net->routes ? "added [best]" : "added");
 1105:       else if (old_ok)
 1106: 	{
 1107: 	  if (old != old_best)
 1108: 	    rte_trace(p, old, '>', "removed");
 1109: 	  else if (rte_is_ok(net->routes))
 1110: 	    rte_trace(p, old, '>', "removed [replaced]");
 1111: 	  else
 1112: 	    rte_trace(p, old, '>', "removed [sole]");
 1113: 	}
 1114:     }
 1115: 
 1116:   /* Propagate the route change */
 1117:   rte_announce(table, RA_ANY, net, new, old, NULL, NULL, NULL);
 1118:   if (net->routes != old_best)
 1119:     rte_announce(table, RA_OPTIMAL, net, net->routes, old_best, NULL, NULL, NULL);
 1120:   if (table->config->sorted)
 1121:     rte_announce(table, RA_ACCEPTED, net, new, old, NULL, NULL, before_old);
 1122:   rte_announce(table, RA_MERGED, net, new, old, net->routes, old_best, NULL);
 1123: 
 1124:   if (!net->routes &&
 1125:       (table->gc_counter++ >= table->config->gc_max_ops) &&
 1126:       (table->gc_time + table->config->gc_min_time <= now))
 1127:     rt_schedule_gc(table);
 1128: 
 1129:   if (old_ok && p->rte_remove)
 1130:     p->rte_remove(net, old);
 1131:   if (new_ok && p->rte_insert)
 1132:     p->rte_insert(net, new);
 1133: 
 1134:   if (old)
 1135:     rte_free_quick(old);
 1136: }
 1137: 
 1138: static int rte_update_nest_cnt;		/* Nesting counter to allow recursive updates */
 1139: 
 1140: static inline void
 1141: rte_update_lock(void)
 1142: {
 1143:   rte_update_nest_cnt++;
 1144: }
 1145: 
 1146: static inline void
 1147: rte_update_unlock(void)
 1148: {
 1149:   if (!--rte_update_nest_cnt)
 1150:     lp_flush(rte_update_pool);
 1151: }
 1152: 
 1153: static inline void
 1154: rte_hide_dummy_routes(net *net, rte **dummy)
 1155: {
 1156:   if (net->routes && net->routes->attrs->source == RTS_DUMMY)
 1157:   {
 1158:     *dummy = net->routes;
 1159:     net->routes = (*dummy)->next;
 1160:   }
 1161: }
 1162: 
 1163: static inline void
 1164: rte_unhide_dummy_routes(net *net, rte **dummy)
 1165: {
 1166:   if (*dummy)
 1167:   {
 1168:     (*dummy)->next = net->routes;
 1169:     net->routes = *dummy;
 1170:   }
 1171: }
 1172: 
 1173: /**
 1174:  * rte_update - enter a new update to a routing table
 1175:  * @table: table to be updated
 1176:  * @ah: pointer to table announce hook
 1177:  * @net: network node
 1178:  * @p: protocol submitting the update
 1179:  * @src: protocol originating the update
 1180:  * @new: a &rte representing the new route or %NULL for route removal.
 1181:  *
 1182:  * This function is called by the routing protocols whenever they discover
 1183:  * a new route or wish to update/remove an existing route. The right announcement
 1184:  * sequence is to build route attributes first (either un-cached with @aflags set
 1185:  * to zero or a cached one using rta_lookup(); in this case please note that
 1186:  * you need to increase the use count of the attributes yourself by calling
 1187:  * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
 1188:  * the appropriate data and finally submit the new &rte by calling rte_update().
 1189:  *
 1190:  * @src specifies the protocol that originally created the route and the meaning
 1191:  * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
 1192:  * same value as @new->attrs->proto. @p specifies the protocol that called
 1193:  * rte_update(). In most cases it is the same protocol as @src. rte_update()
 1194:  * stores @p in @new->sender;
 1195:  *
 1196:  * When rte_update() gets any route, it automatically validates it (checks,
 1197:  * whether the network and next hop address are valid IP addresses and also
 1198:  * whether a normal routing protocol doesn't try to smuggle a host or link
 1199:  * scope route to the table), converts all protocol dependent attributes stored
 1200:  * in the &rte to temporary extended attributes, consults import filters of the
 1201:  * protocol to see if the route should be accepted and/or its attributes modified,
 1202:  * stores the temporary attributes back to the &rte.
 1203:  *
 1204:  * Now, having a "public" version of the route, we
 1205:  * automatically find any old route defined by the protocol @src
 1206:  * for network @n, replace it by the new one (or removing it if @new is %NULL),
 1207:  * recalculate the optimal route for this destination and finally broadcast
 1208:  * the change (if any) to all routing protocols by calling rte_announce().
 1209:  *
 1210:  * All memory used for attribute lists and other temporary allocations is taken
 1211:  * from a special linear pool @rte_update_pool and freed when rte_update()
 1212:  * finishes.
 1213:  */
 1214: 
 1215: void
 1216: rte_update2(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
 1217: {
 1218:   struct proto *p = ah->proto;
 1219:   struct proto_stats *stats = ah->stats;
 1220:   struct filter *filter = ah->in_filter;
 1221:   ea_list *tmpa = NULL;
 1222:   rte *dummy = NULL;
 1223: 
 1224:   rte_update_lock();
 1225:   if (new)
 1226:     {
 1227:       new->sender = ah;
 1228: 
 1229:       stats->imp_updates_received++;
 1230:       if (!rte_validate(new))
 1231: 	{
 1232: 	  rte_trace_in(D_FILTERS, p, new, "invalid");
 1233: 	  stats->imp_updates_invalid++;
 1234: 	  goto drop;
 1235: 	}
 1236: 
 1237:       if (filter == FILTER_REJECT)
 1238: 	{
 1239: 	  stats->imp_updates_filtered++;
 1240: 	  rte_trace_in(D_FILTERS, p, new, "filtered out");
 1241: 
 1242: 	  if (! ah->in_keep_filtered)
 1243: 	    goto drop;
 1244: 
 1245: 	  /* new is a private copy, i could modify it */
 1246: 	  new->flags |= REF_FILTERED;
 1247: 	}
 1248:       else
 1249: 	{
 1250: 	  tmpa = rte_make_tmp_attrs(new, rte_update_pool);
 1251: 	  if (filter && (filter != FILTER_REJECT))
 1252: 	    {
 1253: 	      ea_list *old_tmpa = tmpa;
 1254: 	      int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
 1255: 	      if (fr > F_ACCEPT)
 1256: 		{
 1257: 		  stats->imp_updates_filtered++;
 1258: 		  rte_trace_in(D_FILTERS, p, new, "filtered out");
 1259: 
 1260: 		  if (! ah->in_keep_filtered)
 1261: 		    goto drop;
 1262: 
 1263: 		  new->flags |= REF_FILTERED;
 1264: 		}
 1265: 	      if (tmpa != old_tmpa && src->proto->store_tmp_attrs)
 1266: 		src->proto->store_tmp_attrs(new, tmpa);
 1267: 	    }
 1268: 	}
 1269:       if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
 1270: 	new->attrs = rta_lookup(new->attrs);
 1271:       new->flags |= REF_COW;
 1272:     }
 1273:   else
 1274:     {
 1275:       stats->imp_withdraws_received++;
 1276: 
 1277:       if (!net || !src)
 1278: 	{
 1279: 	  stats->imp_withdraws_ignored++;
 1280: 	  rte_update_unlock();
 1281: 	  return;
 1282: 	}
 1283:     }
 1284: 
 1285:  recalc:
 1286:   rte_hide_dummy_routes(net, &dummy);
 1287:   rte_recalculate(ah, net, new, src);
 1288:   rte_unhide_dummy_routes(net, &dummy);
 1289:   rte_update_unlock();
 1290:   return;
 1291: 
 1292:  drop:
 1293:   rte_free(new);
 1294:   new = NULL;
 1295:   goto recalc;
 1296: }
 1297: 
 1298: /* Independent call to rte_announce(), used from next hop
 1299:    recalculation, outside of rte_update(). new must be non-NULL */
 1300: static inline void 
 1301: rte_announce_i(rtable *tab, unsigned type, net *net, rte *new, rte *old,
 1302: 	       rte *new_best, rte *old_best)
 1303: {
 1304:   rte_update_lock();
 1305:   rte_announce(tab, type, net, new, old, new_best, old_best, NULL);
 1306:   rte_update_unlock();
 1307: }
 1308: 
 1309: static inline void
 1310: rte_discard(rte *old)	/* Non-filtered route deletion, used during garbage collection */
 1311: {
 1312:   rte_update_lock();
 1313:   rte_recalculate(old->sender, old->net, NULL, old->attrs->src);
 1314:   rte_update_unlock();
 1315: }
 1316: 
 1317: /* Modify existing route by protocol hook, used for long-lived graceful restart */
 1318: static inline void
 1319: rte_modify(rte *old)
 1320: {
 1321:   rte_update_lock();
 1322: 
 1323:   rte *new = old->sender->proto->rte_modify(old, rte_update_pool);
 1324:   if (new != old)
 1325:   {
 1326:     if (new)
 1327:     {
 1328:       if (!rta_is_cached(new->attrs))
 1329: 	new->attrs = rta_lookup(new->attrs);
 1330:       new->flags = (old->flags & ~REF_MODIFY) | REF_COW;
 1331:     }
 1332: 
 1333:     rte_recalculate(old->sender, old->net, new, old->attrs->src);
 1334:   }
 1335: 
 1336:   rte_update_unlock();
 1337: }
 1338: 
 1339: /* Check rtable for best route to given net whether it would be exported do p */
 1340: int
 1341: rt_examine(rtable *t, ip_addr prefix, int pxlen, struct proto *p, struct filter *filter)
 1342: {
 1343:   net *n = net_find(t, prefix, pxlen);
 1344:   rte *rt = n ? n->routes : NULL;
 1345: 
 1346:   if (!rte_is_valid(rt))
 1347:     return 0;
 1348: 
 1349:   rte_update_lock();
 1350: 
 1351:   /* Rest is stripped down export_filter() */
 1352:   ea_list *tmpa = rte_make_tmp_attrs(rt, rte_update_pool);
 1353:   int v = p->import_control ? p->import_control(p, &rt, &tmpa, rte_update_pool) : 0;
 1354:   if (v == RIC_PROCESS)
 1355:     v = (f_run(filter, &rt, &tmpa, rte_update_pool,
 1356: 	       FF_FORCE_TMPATTR | FF_SILENT) <= F_ACCEPT);
 1357: 
 1358:    /* Discard temporary rte */
 1359:   if (rt != n->routes)
 1360:     rte_free(rt);
 1361: 
 1362:   rte_update_unlock();
 1363: 
 1364:   return v > 0;
 1365: }
 1366: 
 1367: 
 1368: /**
 1369:  * rt_refresh_begin - start a refresh cycle
 1370:  * @t: related routing table
 1371:  * @ah: related announce hook 
 1372:  *
 1373:  * This function starts a refresh cycle for given routing table and announce
 1374:  * hook. The refresh cycle is a sequence where the protocol sends all its valid
 1375:  * routes to the routing table (by rte_update()). After that, all protocol
 1376:  * routes (more precisely routes with @ah as @sender) not sent during the
 1377:  * refresh cycle but still in the table from the past are pruned. This is
 1378:  * implemented by marking all related routes as stale by REF_STALE flag in
 1379:  * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
 1380:  * flag in rt_refresh_end() and then removing such routes in the prune loop.
 1381:  */
 1382: void
 1383: rt_refresh_begin(rtable *t, struct announce_hook *ah)
 1384: {
 1385:   net *n;
 1386:   rte *e;
 1387: 
 1388:   FIB_WALK(&t->fib, fn)
 1389:     {
 1390:       n = (net *) fn;
 1391:       for (e = n->routes; e; e = e->next)
 1392: 	if (e->sender == ah)
 1393: 	  e->flags |= REF_STALE;
 1394:     }
 1395:   FIB_WALK_END;
 1396: }
 1397: 
 1398: /**
 1399:  * rt_refresh_end - end a refresh cycle
 1400:  * @t: related routing table
 1401:  * @ah: related announce hook 
 1402:  *
 1403:  * This function starts a refresh cycle for given routing table and announce
 1404:  * hook. See rt_refresh_begin() for description of refresh cycles.
 1405:  */
 1406: void
 1407: rt_refresh_end(rtable *t, struct announce_hook *ah)
 1408: {
 1409:   int prune = 0;
 1410:   net *n;
 1411:   rte *e;
 1412: 
 1413:   FIB_WALK(&t->fib, fn)
 1414:     {
 1415:       n = (net *) fn;
 1416:       for (e = n->routes; e; e = e->next)
 1417: 	if ((e->sender == ah) && (e->flags & REF_STALE))
 1418: 	  {
 1419: 	    e->flags |= REF_DISCARD;
 1420: 	    prune = 1;
 1421: 	  }
 1422:     }
 1423:   FIB_WALK_END;
 1424: 
 1425:   if (prune)
 1426:     rt_schedule_prune(t);
 1427: }
 1428: 
 1429: void
 1430: rt_modify_stale(rtable *t, struct announce_hook *ah)
 1431: {
 1432:   int prune = 0;
 1433:   net *n;
 1434:   rte *e;
 1435: 
 1436:   FIB_WALK(&t->fib, fn)
 1437:     {
 1438:       n = (net *) fn;
 1439:       for (e = n->routes; e; e = e->next)
 1440: 	if ((e->sender == ah) && (e->flags & REF_STALE) && !(e->flags & REF_FILTERED))
 1441: 	  {
 1442: 	    e->flags |= REF_MODIFY;
 1443: 	    prune = 1;
 1444: 	  }
 1445:     }
 1446:   FIB_WALK_END;
 1447: 
 1448:   if (prune)
 1449:     rt_schedule_prune(t);
 1450: }
 1451: 
 1452: 
 1453: /**
 1454:  * rte_dump - dump a route
 1455:  * @e: &rte to be dumped
 1456:  *
 1457:  * This functions dumps contents of a &rte to debug output.
 1458:  */
 1459: void
 1460: rte_dump(rte *e)
 1461: {
 1462:   net *n = e->net;
 1463:   debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
 1464:   debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
 1465:   rta_dump(e->attrs);
 1466:   if (e->attrs->src->proto->proto->dump_attrs)
 1467:     e->attrs->src->proto->proto->dump_attrs(e);
 1468:   debug("\n");
 1469: }
 1470: 
 1471: /**
 1472:  * rt_dump - dump a routing table
 1473:  * @t: routing table to be dumped
 1474:  *
 1475:  * This function dumps contents of a given routing table to debug output.
 1476:  */
 1477: void
 1478: rt_dump(rtable *t)
 1479: {
 1480:   rte *e;
 1481:   net *n;
 1482:   struct announce_hook *a;
 1483: 
 1484:   debug("Dump of routing table <%s>\n", t->name);
 1485: #ifdef DEBUGGING
 1486:   fib_check(&t->fib);
 1487: #endif
 1488:   FIB_WALK(&t->fib, fn)
 1489:     {
 1490:       n = (net *) fn;
 1491:       for(e=n->routes; e; e=e->next)
 1492: 	rte_dump(e);
 1493:     }
 1494:   FIB_WALK_END;
 1495:   WALK_LIST(a, t->hooks)
 1496:     debug("\tAnnounces routes to protocol %s\n", a->proto->name);
 1497:   debug("\n");
 1498: }
 1499: 
 1500: /**
 1501:  * rt_dump_all - dump all routing tables
 1502:  *
 1503:  * This function dumps contents of all routing tables to debug output.
 1504:  */
 1505: void
 1506: rt_dump_all(void)
 1507: {
 1508:   rtable *t;
 1509: 
 1510:   WALK_LIST(t, routing_tables)
 1511:     rt_dump(t);
 1512: }
 1513: 
 1514: static inline void
 1515: rt_schedule_prune(rtable *tab)
 1516: {
 1517:   rt_mark_for_prune(tab);
 1518:   ev_schedule(tab->rt_event);
 1519: }
 1520: 
 1521: static inline void
 1522: rt_schedule_gc(rtable *tab)
 1523: {
 1524:   if (tab->gc_scheduled)
 1525:     return;
 1526: 
 1527:   tab->gc_scheduled = 1;
 1528:   ev_schedule(tab->rt_event);
 1529: }
 1530: 
 1531: static inline void
 1532: rt_schedule_hcu(rtable *tab)
 1533: {
 1534:   if (tab->hcu_scheduled)
 1535:     return;
 1536: 
 1537:   tab->hcu_scheduled = 1;
 1538:   ev_schedule(tab->rt_event);
 1539: }
 1540: 
 1541: static inline void
 1542: rt_schedule_nhu(rtable *tab)
 1543: {
 1544:   if (tab->nhu_state == 0)
 1545:     ev_schedule(tab->rt_event);
 1546: 
 1547:   /* state change 0->1, 2->3 */
 1548:   tab->nhu_state |= 1;
 1549: }
 1550: 
 1551: 
 1552: static void
 1553: rt_prune_nets(rtable *tab)
 1554: {
 1555:   struct fib_iterator fit;
 1556:   int ncnt = 0, ndel = 0;
 1557: 
 1558: #ifdef DEBUGGING
 1559:   fib_check(&tab->fib);
 1560: #endif
 1561: 
 1562:   FIB_ITERATE_INIT(&fit, &tab->fib);
 1563: again:
 1564:   FIB_ITERATE_START(&tab->fib, &fit, f)
 1565:     {
 1566:       net *n = (net *) f;
 1567:       ncnt++;
 1568:       if (!n->routes)		/* Orphaned FIB entry */
 1569: 	{
 1570: 	  FIB_ITERATE_PUT(&fit, f);
 1571: 	  fib_delete(&tab->fib, f);
 1572: 	  ndel++;
 1573: 	  goto again;
 1574: 	}
 1575:     }
 1576:   FIB_ITERATE_END(f);
 1577:   DBG("Pruned %d of %d networks\n", ndel, ncnt);
 1578: 
 1579:   tab->gc_counter = 0;
 1580:   tab->gc_time = now;
 1581:   tab->gc_scheduled = 0;
 1582: }
 1583: 
 1584: static void
 1585: rt_event(void *ptr)
 1586: {
 1587:   rtable *tab = ptr;
 1588: 
 1589:   if (tab->hcu_scheduled)
 1590:     rt_update_hostcache(tab);
 1591: 
 1592:   if (tab->nhu_state)
 1593:     rt_next_hop_update(tab);
 1594: 
 1595:   if (tab->prune_state)
 1596:     if (!rt_prune_table(tab))
 1597:       {
 1598: 	/* Table prune unfinished */
 1599: 	ev_schedule(tab->rt_event);
 1600: 	return;
 1601:       }
 1602: 
 1603:   if (tab->gc_scheduled)
 1604:     {
 1605:       rt_prune_nets(tab);
 1606:       rt_prune_sources(); // FIXME this should be moved to independent event
 1607:     }
 1608: }
 1609: 
 1610: void
 1611: rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
 1612: {
 1613:   bzero(t, sizeof(*t));
 1614:   fib_init(&t->fib, p, sizeof(net), 0, rte_init);
 1615:   t->name = name;
 1616:   t->config = cf;
 1617:   init_list(&t->hooks);
 1618:   if (cf)
 1619:     {
 1620:       t->rt_event = ev_new(p);
 1621:       t->rt_event->hook = rt_event;
 1622:       t->rt_event->data = t;
 1623:       t->gc_time = now;
 1624:     }
 1625: }
 1626: 
 1627: /**
 1628:  * rt_init - initialize routing tables
 1629:  *
 1630:  * This function is called during BIRD startup. It initializes the
 1631:  * routing table module.
 1632:  */
 1633: void
 1634: rt_init(void)
 1635: {
 1636:   rta_init();
 1637:   rt_table_pool = rp_new(&root_pool, "Routing tables");
 1638:   rte_update_pool = lp_new(rt_table_pool, 4080);
 1639:   rte_slab = sl_new(rt_table_pool, sizeof(rte));
 1640:   init_list(&routing_tables);
 1641: }
 1642: 
 1643: 
 1644: static int
 1645: rt_prune_step(rtable *tab, int *limit)
 1646: {
 1647:   struct fib_iterator *fit = &tab->prune_fit;
 1648: 
 1649:   DBG("Pruning route table %s\n", tab->name);
 1650: #ifdef DEBUGGING
 1651:   fib_check(&tab->fib);
 1652: #endif
 1653: 
 1654:   if (tab->prune_state == RPS_NONE)
 1655:     return 1;
 1656: 
 1657:   if (tab->prune_state == RPS_SCHEDULED)
 1658:     {
 1659:       FIB_ITERATE_INIT(fit, &tab->fib);
 1660:       tab->prune_state = RPS_RUNNING;
 1661:     }
 1662: 
 1663: again:
 1664:   FIB_ITERATE_START(&tab->fib, fit, fn)
 1665:     {
 1666:       net *n = (net *) fn;
 1667:       rte *e;
 1668: 
 1669:     rescan:
 1670:       for (e=n->routes; e; e=e->next)
 1671:       {
 1672: 	if (e->sender->proto->flushing || (e->flags & REF_DISCARD))
 1673: 	  {
 1674: 	    if (*limit <= 0)
 1675: 	      {
 1676: 		FIB_ITERATE_PUT(fit, fn);
 1677: 		return 0;
 1678: 	      }
 1679: 
 1680: 	    rte_discard(e);
 1681: 	    (*limit)--;
 1682: 
 1683: 	    goto rescan;
 1684: 	  }
 1685: 
 1686: 	if (e->flags & REF_MODIFY)
 1687: 	  {
 1688: 	    if (*limit <= 0)
 1689: 	      {
 1690: 		FIB_ITERATE_PUT(fit, fn);
 1691: 		return 0;
 1692: 	      }
 1693: 
 1694: 	    rte_modify(e);
 1695: 	    (*limit)--;
 1696: 
 1697: 	    goto rescan;
 1698: 	  }
 1699:       }
 1700: 
 1701:       if (!n->routes)		/* Orphaned FIB entry */
 1702: 	{
 1703: 	  FIB_ITERATE_PUT(fit, fn);
 1704: 	  fib_delete(&tab->fib, fn);
 1705: 	  goto again;
 1706: 	}
 1707:     }
 1708:   FIB_ITERATE_END(fn);
 1709: 
 1710: #ifdef DEBUGGING
 1711:   fib_check(&tab->fib);
 1712: #endif
 1713: 
 1714:   tab->prune_state = RPS_NONE;
 1715:   return 1;
 1716: }
 1717: 
 1718: /**
 1719:  * rt_prune_table - prune a routing table
 1720:  * @tab: a routing table for pruning
 1721:  *
 1722:  * This function scans the routing table @tab and removes routes belonging to
 1723:  * flushing protocols, discarded routes and also stale network entries, in a
 1724:  * similar fashion like rt_prune_loop(). Returns 1 when all such routes are
 1725:  * pruned. Contrary to rt_prune_loop(), this function is not a part of the
 1726:  * protocol flushing loop, but it is called from rt_event() for just one routing
 1727:  * table.
 1728:  *
 1729:  * Note that rt_prune_table() and rt_prune_loop() share (for each table) the
 1730:  * prune state (@prune_state) and also the pruning iterator (@prune_fit).
 1731:  */
 1732: static inline int
 1733: rt_prune_table(rtable *tab)
 1734: {
 1735:   int limit = 512;
 1736:   return rt_prune_step(tab, &limit);
 1737: }
 1738: 
 1739: /**
 1740:  * rt_prune_loop - prune routing tables
 1741:  *
 1742:  * The prune loop scans routing tables and removes routes belonging to flushing
 1743:  * protocols, discarded routes and also stale network entries. Returns 1 when
 1744:  * all such routes are pruned. It is a part of the protocol flushing loop.
 1745:  */
 1746: int
 1747: rt_prune_loop(void)
 1748: {
 1749:   int limit = 512;
 1750:   rtable *t;
 1751: 
 1752:   WALK_LIST(t, routing_tables)
 1753:     if (! rt_prune_step(t, &limit))
 1754:       return 0;
 1755: 
 1756:   return 1;
 1757: }
 1758: 
 1759: void
 1760: rt_preconfig(struct config *c)
 1761: {
 1762:   struct symbol *s = cf_get_symbol("master");
 1763: 
 1764:   init_list(&c->tables);
 1765:   c->master_rtc = rt_new_table(s);
 1766: }
 1767: 
 1768: 
 1769: /* 
 1770:  * Some functions for handing internal next hop updates
 1771:  * triggered by rt_schedule_nhu().
 1772:  */
 1773: 
 1774: static inline int
 1775: rta_next_hop_outdated(rta *a)
 1776: {
 1777:   struct hostentry *he = a->hostentry;
 1778: 
 1779:   if (!he)
 1780:     return 0;
 1781: 
 1782:   if (!he->src)
 1783:     return a->dest != RTD_UNREACHABLE;
 1784: 
 1785:   return (a->iface != he->src->iface) || !ipa_equal(a->gw, he->gw) ||
 1786:     (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
 1787:     !mpnh_same(a->nexthops, he->src->nexthops);
 1788: }
 1789: 
 1790: static inline void
 1791: rta_apply_hostentry(rta *a, struct hostentry *he)
 1792: {
 1793:   a->hostentry = he;
 1794:   a->iface = he->src ? he->src->iface : NULL;
 1795:   a->gw = he->gw;
 1796:   a->dest = he->dest;
 1797:   a->igp_metric = he->igp_metric;
 1798:   a->nexthops = he->src ? he->src->nexthops : NULL;
 1799: }
 1800: 
 1801: static inline rte *
 1802: rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
 1803: {
 1804:   rta a;
 1805:   memcpy(&a, old->attrs, sizeof(rta));
 1806:   rta_apply_hostentry(&a, old->attrs->hostentry);
 1807:   a.aflags = 0;
 1808: 
 1809:   rte *e = sl_alloc(rte_slab);
 1810:   memcpy(e, old, sizeof(rte));
 1811:   e->attrs = rta_lookup(&a);
 1812: 
 1813:   return e;
 1814: }
 1815: 
 1816: static inline int
 1817: rt_next_hop_update_net(rtable *tab, net *n)
 1818: {
 1819:   rte **k, *e, *new, *old_best, **new_best;
 1820:   int count = 0;
 1821:   int free_old_best = 0;
 1822: 
 1823:   old_best = n->routes;
 1824:   if (!old_best)
 1825:     return 0;
 1826: 
 1827:   for (k = &n->routes; e = *k; k = &e->next)
 1828:     if (rta_next_hop_outdated(e->attrs))
 1829:       {
 1830: 	new = rt_next_hop_update_rte(tab, e);
 1831: 	*k = new;
 1832: 
 1833: 	rte_announce_i(tab, RA_ANY, n, new, e, NULL, NULL);
 1834: 	rte_trace_in(D_ROUTES, new->sender->proto, new, "updated");
 1835: 
 1836: 	/* Call a pre-comparison hook */
 1837: 	/* Not really an efficient way to compute this */
 1838: 	if (e->attrs->src->proto->rte_recalculate)
 1839: 	  e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
 1840: 
 1841: 	if (e != old_best)
 1842: 	  rte_free_quick(e);
 1843: 	else /* Freeing of the old best rte is postponed */
 1844: 	  free_old_best = 1;
 1845: 
 1846: 	e = new;
 1847: 	count++;
 1848:       }
 1849: 
 1850:   if (!count)
 1851:     return 0;
 1852: 
 1853:   /* Find the new best route */
 1854:   new_best = NULL;
 1855:   for (k = &n->routes; e = *k; k = &e->next)
 1856:     {
 1857:       if (!new_best || rte_better(e, *new_best))
 1858: 	new_best = k;
 1859:     }
 1860: 
 1861:   /* Relink the new best route to the first position */
 1862:   new = *new_best;
 1863:   if (new != n->routes)
 1864:     {
 1865:       *new_best = new->next;
 1866:       new->next = n->routes;
 1867:       n->routes = new;
 1868:     }
 1869: 
 1870:   /* Announce the new best route */
 1871:   if (new != old_best)
 1872:     {
 1873:       rte_announce_i(tab, RA_OPTIMAL, n, new, old_best, NULL, NULL);
 1874:       rte_trace_in(D_ROUTES, new->sender->proto, new, "updated [best]");
 1875:     }
 1876: 
 1877:   /* FIXME: Better announcement of merged routes */
 1878:   rte_announce_i(tab, RA_MERGED, n, new, old_best, new, old_best);
 1879: 
 1880:   if (free_old_best)
 1881:     rte_free_quick(old_best);
 1882: 
 1883:   return count;
 1884: }
 1885: 
 1886: static void
 1887: rt_next_hop_update(rtable *tab)
 1888: {
 1889:   struct fib_iterator *fit = &tab->nhu_fit;
 1890:   int max_feed = 32;
 1891: 
 1892:   if (tab->nhu_state == 0)
 1893:     return;
 1894: 
 1895:   if (tab->nhu_state == 1)
 1896:     {
 1897:       FIB_ITERATE_INIT(fit, &tab->fib);
 1898:       tab->nhu_state = 2;
 1899:     }
 1900: 
 1901:   FIB_ITERATE_START(&tab->fib, fit, fn)
 1902:     {
 1903:       if (max_feed <= 0)
 1904: 	{
 1905: 	  FIB_ITERATE_PUT(fit, fn);
 1906: 	  ev_schedule(tab->rt_event);
 1907: 	  return;
 1908: 	}
 1909:       max_feed -= rt_next_hop_update_net(tab, (net *) fn);
 1910:     }
 1911:   FIB_ITERATE_END(fn);
 1912: 
 1913:   /* state change 2->0, 3->1 */
 1914:   tab->nhu_state &= 1;
 1915: 
 1916:   if (tab->nhu_state > 0)
 1917:     ev_schedule(tab->rt_event);
 1918: }
 1919: 
 1920: 
 1921: struct rtable_config *
 1922: rt_new_table(struct symbol *s)
 1923: {
 1924:   /* Hack that allows to 'redefine' the master table */
 1925:   if ((s->class == SYM_TABLE) && (s->def == new_config->master_rtc))
 1926:     return s->def;
 1927: 
 1928:   struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
 1929: 
 1930:   cf_define_symbol(s, SYM_TABLE, c);
 1931:   c->name = s->name;
 1932:   add_tail(&new_config->tables, &c->n);
 1933:   c->gc_max_ops = 1000;
 1934:   c->gc_min_time = 5;
 1935:   return c;
 1936: }
 1937: 
 1938: /**
 1939:  * rt_lock_table - lock a routing table
 1940:  * @r: routing table to be locked
 1941:  *
 1942:  * Lock a routing table, because it's in use by a protocol,
 1943:  * preventing it from being freed when it gets undefined in a new
 1944:  * configuration.
 1945:  */
 1946: void
 1947: rt_lock_table(rtable *r)
 1948: {
 1949:   r->use_count++;
 1950: }
 1951: 
 1952: /**
 1953:  * rt_unlock_table - unlock a routing table
 1954:  * @r: routing table to be unlocked
 1955:  *
 1956:  * Unlock a routing table formerly locked by rt_lock_table(),
 1957:  * that is decrease its use count and delete it if it's scheduled
 1958:  * for deletion by configuration changes.
 1959:  */
 1960: void
 1961: rt_unlock_table(rtable *r)
 1962: {
 1963:   if (!--r->use_count && r->deleted)
 1964:     {
 1965:       struct config *conf = r->deleted;
 1966:       DBG("Deleting routing table %s\n", r->name);
 1967:       r->config->table = NULL;
 1968:       if (r->hostcache)
 1969: 	rt_free_hostcache(r);
 1970:       rem_node(&r->n);
 1971:       fib_free(&r->fib);
 1972:       rfree(r->rt_event);
 1973:       mb_free(r);
 1974:       config_del_obstacle(conf);
 1975:     }
 1976: }
 1977: 
 1978: /**
 1979:  * rt_commit - commit new routing table configuration
 1980:  * @new: new configuration
 1981:  * @old: original configuration or %NULL if it's boot time config
 1982:  *
 1983:  * Scan differences between @old and @new configuration and modify
 1984:  * the routing tables according to these changes. If @new defines a
 1985:  * previously unknown table, create it, if it omits a table existing
 1986:  * in @old, schedule it for deletion (it gets deleted when all protocols
 1987:  * disconnect from it by calling rt_unlock_table()), if it exists
 1988:  * in both configurations, leave it unchanged.
 1989:  */
 1990: void
 1991: rt_commit(struct config *new, struct config *old)
 1992: {
 1993:   struct rtable_config *o, *r;
 1994: 
 1995:   DBG("rt_commit:\n");
 1996:   if (old)
 1997:     {
 1998:       WALK_LIST(o, old->tables)
 1999: 	{
 2000: 	  rtable *ot = o->table;
 2001: 	  if (!ot->deleted)
 2002: 	    {
 2003: 	      struct symbol *sym = cf_find_symbol(new, o->name);
 2004: 	      if (sym && sym->class == SYM_TABLE && !new->shutdown)
 2005: 		{
 2006: 		  DBG("\t%s: same\n", o->name);
 2007: 		  r = sym->def;
 2008: 		  r->table = ot;
 2009: 		  ot->name = r->name;
 2010: 		  ot->config = r;
 2011: 		  if (o->sorted != r->sorted)
 2012: 		    log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
 2013: 		}
 2014: 	      else
 2015: 		{
 2016: 		  DBG("\t%s: deleted\n", o->name);
 2017: 		  ot->deleted = old;
 2018: 		  config_add_obstacle(old);
 2019: 		  rt_lock_table(ot);
 2020: 		  rt_unlock_table(ot);
 2021: 		}
 2022: 	    }
 2023: 	}
 2024:     }
 2025: 
 2026:   WALK_LIST(r, new->tables)
 2027:     if (!r->table)
 2028:       {
 2029: 	rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
 2030: 	DBG("\t%s: created\n", r->name);
 2031: 	rt_setup(rt_table_pool, t, r->name, r);
 2032: 	add_tail(&routing_tables, &t->n);
 2033: 	r->table = t;
 2034:       }
 2035:   DBG("\tdone\n");
 2036: }
 2037: 
 2038: static inline void
 2039: do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
 2040: {
 2041:   rte_update_lock();
 2042:   if (type == RA_ACCEPTED)
 2043:     rt_notify_accepted(h, n, e, NULL, NULL, p->refeeding ? 2 : 1);
 2044:   else if (type == RA_MERGED)
 2045:     rt_notify_merged(h, n, NULL, NULL, e, p->refeeding ? e : NULL, p->refeeding);
 2046:   else
 2047:     rt_notify_basic(h, n, e, p->refeeding ? e : NULL, p->refeeding);
 2048:   rte_update_unlock();
 2049: }
 2050: 
 2051: /**
 2052:  * rt_feed_baby - advertise routes to a new protocol
 2053:  * @p: protocol to be fed
 2054:  *
 2055:  * This function performs one pass of advertisement of routes to a newly
 2056:  * initialized protocol. It's called by the protocol code as long as it
 2057:  * has something to do. (We avoid transferring all the routes in single
 2058:  * pass in order not to monopolize CPU time.)
 2059:  */
 2060: int
 2061: rt_feed_baby(struct proto *p)
 2062: {
 2063:   struct announce_hook *h;
 2064:   struct fib_iterator *fit;
 2065:   int max_feed = 256;
 2066: 
 2067:   if (!p->feed_ahook)			/* Need to initialize first */
 2068:     {
 2069:       if (!p->ahooks)
 2070: 	return 1;
 2071:       DBG("Announcing routes to new protocol %s\n", p->name);
 2072:       p->feed_ahook = p->ahooks;
 2073:       fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
 2074:       goto next_hook;
 2075:     }
 2076:   fit = p->feed_iterator;
 2077: 
 2078: again:
 2079:   h = p->feed_ahook;
 2080:   FIB_ITERATE_START(&h->table->fib, fit, fn)
 2081:     {
 2082:       net *n = (net *) fn;
 2083:       rte *e = n->routes;
 2084:       if (max_feed <= 0)
 2085: 	{
 2086: 	  FIB_ITERATE_PUT(fit, fn);
 2087: 	  return 0;
 2088: 	}
 2089: 
 2090:       /* XXXX perhaps we should change feed for RA_ACCEPTED to not use 'new' */
 2091: 
 2092:       if ((p->accept_ra_types == RA_OPTIMAL) ||
 2093: 	  (p->accept_ra_types == RA_ACCEPTED) ||
 2094: 	  (p->accept_ra_types == RA_MERGED))
 2095: 	if (rte_is_valid(e))
 2096: 	  {
 2097: 	    if (p->export_state != ES_FEEDING)
 2098: 	      return 1;  /* In the meantime, the protocol fell down. */
 2099: 
 2100: 	    do_feed_baby(p, p->accept_ra_types, h, n, e);
 2101: 	    max_feed--;
 2102: 	  }
 2103: 
 2104:       if (p->accept_ra_types == RA_ANY)
 2105: 	for(e = n->routes; e; e = e->next)
 2106: 	  {
 2107: 	    if (p->export_state != ES_FEEDING)
 2108: 	      return 1;  /* In the meantime, the protocol fell down. */
 2109: 
 2110: 	    if (!rte_is_valid(e))
 2111: 	      continue;
 2112: 
 2113: 	    do_feed_baby(p, RA_ANY, h, n, e);
 2114: 	    max_feed--;
 2115: 	  }
 2116:     }
 2117:   FIB_ITERATE_END(fn);
 2118:   p->feed_ahook = h->next;
 2119:   if (!p->feed_ahook)
 2120:     {
 2121:       mb_free(p->feed_iterator);
 2122:       p->feed_iterator = NULL;
 2123:       return 1;
 2124:     }
 2125: 
 2126: next_hook:
 2127:   h = p->feed_ahook;
 2128:   FIB_ITERATE_INIT(fit, &h->table->fib);
 2129:   goto again;
 2130: }
 2131: 
 2132: /**
 2133:  * rt_feed_baby_abort - abort protocol feeding
 2134:  * @p: protocol
 2135:  *
 2136:  * This function is called by the protocol code when the protocol
 2137:  * stops or ceases to exist before the last iteration of rt_feed_baby()
 2138:  * has finished.
 2139:  */
 2140: void
 2141: rt_feed_baby_abort(struct proto *p)
 2142: {
 2143:   if (p->feed_ahook)
 2144:     {
 2145:       /* Unlink the iterator and exit */
 2146:       fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
 2147:       p->feed_ahook = NULL;
 2148:     }
 2149: }
 2150: 
 2151: 
 2152: static inline unsigned
 2153: ptr_hash(void *ptr)
 2154: {
 2155:   uintptr_t p = (uintptr_t) ptr;
 2156:   return p ^ (p << 8) ^ (p >> 16);
 2157: }
 2158: 
 2159: static inline unsigned
 2160: hc_hash(ip_addr a, rtable *dep)
 2161: {
 2162:   return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
 2163: }
 2164: 
 2165: static inline void
 2166: hc_insert(struct hostcache *hc, struct hostentry *he)
 2167: {
 2168:   uint k = he->hash_key >> hc->hash_shift;
 2169:   he->next = hc->hash_table[k];
 2170:   hc->hash_table[k] = he;
 2171: }
 2172: 
 2173: static inline void
 2174: hc_remove(struct hostcache *hc, struct hostentry *he)
 2175: {
 2176:   struct hostentry **hep;
 2177:   uint k = he->hash_key >> hc->hash_shift;
 2178: 
 2179:   for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
 2180:   *hep = he->next;
 2181: }
 2182: 
 2183: #define HC_DEF_ORDER 10
 2184: #define HC_HI_MARK *4
 2185: #define HC_HI_STEP 2
 2186: #define HC_HI_ORDER 16			/* Must be at most 16 */
 2187: #define HC_LO_MARK /5
 2188: #define HC_LO_STEP 2
 2189: #define HC_LO_ORDER 10
 2190: 
 2191: static void
 2192: hc_alloc_table(struct hostcache *hc, unsigned order)
 2193: {
 2194:   uint hsize = 1 << order;
 2195:   hc->hash_order = order;
 2196:   hc->hash_shift = 16 - order;
 2197:   hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
 2198:   hc->hash_min = (order <= HC_LO_ORDER) ?  0U : (hsize HC_LO_MARK);
 2199: 
 2200:   hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
 2201: }
 2202: 
 2203: static void
 2204: hc_resize(struct hostcache *hc, unsigned new_order)
 2205: {
 2206:   struct hostentry **old_table = hc->hash_table;
 2207:   struct hostentry *he, *hen;
 2208:   uint old_size = 1 << hc->hash_order;
 2209:   uint i;
 2210: 
 2211:   hc_alloc_table(hc, new_order);
 2212:   for (i = 0; i < old_size; i++)
 2213:     for (he = old_table[i]; he != NULL; he=hen)
 2214:       {
 2215: 	hen = he->next;
 2216: 	hc_insert(hc, he);
 2217:       }
 2218:   mb_free(old_table);
 2219: }
 2220: 
 2221: static struct hostentry *
 2222: hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
 2223: {
 2224:   struct hostentry *he = sl_alloc(hc->slab);
 2225: 
 2226:   he->addr = a;
 2227:   he->link = ll;
 2228:   he->tab = dep;
 2229:   he->hash_key = k;
 2230:   he->uc = 0;
 2231:   he->src = NULL;
 2232: 
 2233:   add_tail(&hc->hostentries, &he->ln);
 2234:   hc_insert(hc, he);
 2235: 
 2236:   hc->hash_items++;
 2237:   if (hc->hash_items > hc->hash_max)
 2238:     hc_resize(hc, hc->hash_order + HC_HI_STEP);
 2239: 
 2240:   return he;
 2241: }
 2242: 
 2243: static void
 2244: hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
 2245: {
 2246:   rta_free(he->src);
 2247: 
 2248:   rem_node(&he->ln);
 2249:   hc_remove(hc, he);
 2250:   sl_free(hc->slab, he);
 2251: 
 2252:   hc->hash_items--;
 2253:   if (hc->hash_items < hc->hash_min)
 2254:     hc_resize(hc, hc->hash_order - HC_LO_STEP);
 2255: }
 2256: 
 2257: static void
 2258: rt_init_hostcache(rtable *tab)
 2259: {
 2260:   struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
 2261:   init_list(&hc->hostentries);
 2262: 
 2263:   hc->hash_items = 0;
 2264:   hc_alloc_table(hc, HC_DEF_ORDER);
 2265:   hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
 2266: 
 2267:   hc->lp = lp_new(rt_table_pool, 1008);
 2268:   hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
 2269: 
 2270:   tab->hostcache = hc;
 2271: }
 2272: 
 2273: static void
 2274: rt_free_hostcache(rtable *tab)
 2275: {
 2276:   struct hostcache *hc = tab->hostcache;
 2277: 
 2278:   node *n;
 2279:   WALK_LIST(n, hc->hostentries)
 2280:     {
 2281:       struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
 2282:       rta_free(he->src);
 2283: 
 2284:       if (he->uc)
 2285: 	log(L_ERR "Hostcache is not empty in table %s", tab->name);
 2286:     }
 2287: 
 2288:   rfree(hc->slab);
 2289:   rfree(hc->lp);
 2290:   mb_free(hc->hash_table);
 2291:   mb_free(hc);
 2292: }
 2293: 
 2294: static void
 2295: rt_notify_hostcache(rtable *tab, net *net)
 2296: {
 2297:   struct hostcache *hc = tab->hostcache;
 2298: 
 2299:   if (tab->hcu_scheduled)
 2300:     return;
 2301: 
 2302:   if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
 2303:     rt_schedule_hcu(tab);
 2304: }
 2305: 
 2306: static int
 2307: if_local_addr(ip_addr a, struct iface *i)
 2308: {
 2309:   struct ifa *b;
 2310: 
 2311:   WALK_LIST(b, i->addrs)
 2312:     if (ipa_equal(a, b->ip))
 2313:       return 1;
 2314: 
 2315:   return 0;
 2316: }
 2317: 
 2318: static u32 
 2319: rt_get_igp_metric(rte *rt)
 2320: {
 2321:   eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
 2322: 
 2323:   if (ea)
 2324:     return ea->u.data;
 2325: 
 2326:   rta *a = rt->attrs;
 2327: 
 2328: #ifdef CONFIG_OSPF
 2329:   if ((a->source == RTS_OSPF) ||
 2330:       (a->source == RTS_OSPF_IA) ||
 2331:       (a->source == RTS_OSPF_EXT1))
 2332:     return rt->u.ospf.metric1;
 2333: #endif
 2334: 
 2335: #ifdef CONFIG_RIP
 2336:   if (a->source == RTS_RIP)
 2337:     return rt->u.rip.metric;
 2338: #endif
 2339: 
 2340:   /* Device routes */
 2341:   if ((a->dest != RTD_ROUTER) && (a->dest != RTD_MULTIPATH))
 2342:     return 0;
 2343: 
 2344:   return IGP_METRIC_UNKNOWN;
 2345: }
 2346: 
 2347: static int
 2348: rt_update_hostentry(rtable *tab, struct hostentry *he)
 2349: {
 2350:   rta *old_src = he->src;
 2351:   int pxlen = 0;
 2352: 
 2353:   /* Reset the hostentry */ 
 2354:   he->src = NULL;
 2355:   he->gw = IPA_NONE;
 2356:   he->dest = RTD_UNREACHABLE;
 2357:   he->igp_metric = 0;
 2358: 
 2359:   net *n = net_route(tab, he->addr, MAX_PREFIX_LENGTH);
 2360:   if (n)
 2361:     {
 2362:       rte *e = n->routes;
 2363:       rta *a = e->attrs;
 2364:       pxlen = n->n.pxlen;
 2365: 
 2366:       if (a->hostentry)
 2367: 	{
 2368: 	  /* Recursive route should not depend on another recursive route */
 2369: 	  log(L_WARN "Next hop address %I resolvable through recursive route for %I/%d",
 2370: 	      he->addr, n->n.prefix, pxlen);
 2371: 	  goto done;
 2372: 	}
 2373: 
 2374:       if (a->dest == RTD_DEVICE)
 2375: 	{
 2376: 	  if (if_local_addr(he->addr, a->iface))
 2377: 	    {
 2378: 	      /* The host address is a local address, this is not valid */
 2379: 	      log(L_WARN "Next hop address %I is a local address of iface %s",
 2380: 		  he->addr, a->iface->name);
 2381: 	      goto done;
 2382:       	    }
 2383: 
 2384: 	  /* The host is directly reachable, use link as a gateway */
 2385: 	  he->gw = he->link;
 2386: 	  he->dest = RTD_ROUTER;
 2387: 	}
 2388:       else
 2389: 	{
 2390: 	  /* The host is reachable through some route entry */
 2391: 	  he->gw = a->gw;
 2392: 	  he->dest = a->dest;
 2393: 	}
 2394: 
 2395:       he->src = rta_clone(a);
 2396:       he->igp_metric = rt_get_igp_metric(e);
 2397:     }
 2398: 
 2399:  done:
 2400:   /* Add a prefix range to the trie */
 2401:   trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
 2402: 
 2403:   rta_free(old_src);
 2404:   return old_src != he->src;
 2405: }
 2406: 
 2407: static void
 2408: rt_update_hostcache(rtable *tab)
 2409: {
 2410:   struct hostcache *hc = tab->hostcache;
 2411:   struct hostentry *he;
 2412:   node *n, *x;
 2413: 
 2414:   /* Reset the trie */
 2415:   lp_flush(hc->lp);
 2416:   hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
 2417: 
 2418:   WALK_LIST_DELSAFE(n, x, hc->hostentries)
 2419:     {
 2420:       he = SKIP_BACK(struct hostentry, ln, n);
 2421:       if (!he->uc)
 2422: 	{
 2423: 	  hc_delete_hostentry(hc, he);
 2424: 	  continue;
 2425: 	}
 2426: 
 2427:       if (rt_update_hostentry(tab, he))
 2428: 	rt_schedule_nhu(he->tab);
 2429:     }
 2430: 
 2431:   tab->hcu_scheduled = 0;
 2432: }
 2433: 
 2434: static struct hostentry *
 2435: rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
 2436: {
 2437:   struct hostentry *he;
 2438: 
 2439:   if (!tab->hostcache)
 2440:     rt_init_hostcache(tab);
 2441: 
 2442:   uint k = hc_hash(a, dep);
 2443:   struct hostcache *hc = tab->hostcache;
 2444:   for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
 2445:     if (ipa_equal(he->addr, a) && (he->tab == dep))
 2446:       return he;
 2447: 
 2448:   he = hc_new_hostentry(hc, a, ll, dep, k);
 2449:   rt_update_hostentry(tab, he);
 2450:   return he;
 2451: }
 2452: 
 2453: void
 2454: rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
 2455: {
 2456:   rta_apply_hostentry(a, rt_get_hostentry(tab, *gw, *ll, dep));
 2457: }
 2458: 
 2459: 
 2460: /*
 2461:  *  CLI commands
 2462:  */
 2463: 
 2464: static byte *
 2465: rt_format_via(rte *e)
 2466: {
 2467:   rta *a = e->attrs;
 2468: 
 2469:   /* Max text length w/o IP addr and interface name is 16 */
 2470:   static byte via[STD_ADDRESS_P_LENGTH+sizeof(a->iface->name)+16];
 2471: 
 2472:   switch (a->dest)
 2473:     {
 2474:     case RTD_ROUTER:	bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
 2475:     case RTD_DEVICE:	bsprintf(via, "dev %s", a->iface->name); break;
 2476:     case RTD_BLACKHOLE:	bsprintf(via, "blackhole"); break;
 2477:     case RTD_UNREACHABLE:	bsprintf(via, "unreachable"); break;
 2478:     case RTD_PROHIBIT:	bsprintf(via, "prohibited"); break;
 2479:     case RTD_MULTIPATH:	bsprintf(via, "multipath"); break;
 2480:     default:		bsprintf(via, "???");
 2481:     }
 2482:   return via;
 2483: }
 2484: 
 2485: static void
 2486: rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
 2487: {
 2488:   byte from[STD_ADDRESS_P_LENGTH+8];
 2489:   byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
 2490:   rta *a = e->attrs;
 2491:   int primary = (e->net->routes == e);
 2492:   int sync_error = (e->net->n.flags & KRF_SYNC_ERROR);
 2493:   void (*get_route_info)(struct rte *, byte *buf, struct ea_list *attrs);
 2494:   struct mpnh *nh;
 2495: 
 2496:   tm_format_datetime(tm, &config->tf_route, e->lastmod);
 2497:   if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
 2498:     bsprintf(from, " from %I", a->from);
 2499:   else
 2500:     from[0] = 0;
 2501: 
 2502:   get_route_info = a->src->proto->proto->get_route_info;
 2503:   if (get_route_info || d->verbose)
 2504:     {
 2505:       /* Need to normalize the extended attributes */
 2506:       ea_list *t = tmpa;
 2507:       t = ea_append(t, a->eattrs);
 2508:       tmpa = alloca(ea_scan(t));
 2509:       ea_merge(t, tmpa);
 2510:       ea_sort(tmpa);
 2511:     }
 2512:   if (get_route_info)
 2513:     get_route_info(e, info, tmpa);
 2514:   else
 2515:     bsprintf(info, " (%d)", e->pref);
 2516:   cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, rt_format_via(e), a->src->proto->name,
 2517: 	     tm, from, primary ? (sync_error ? " !" : " *") : "", info);
 2518:   for (nh = a->nexthops; nh; nh = nh->next)
 2519:     cli_printf(c, -1007, "\tvia %I on %s weight %d", nh->gw, nh->iface->name, nh->weight + 1);
 2520:   if (d->verbose)
 2521:     rta_show(c, a, tmpa);
 2522: }
 2523: 
 2524: static void
 2525: rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
 2526: {
 2527:   rte *e, *ee;
 2528:   byte ia[STD_ADDRESS_P_LENGTH+8];
 2529:   struct ea_list *tmpa;
 2530:   struct announce_hook *a = NULL;
 2531:   int first = 1;
 2532:   int pass = 0;
 2533: 
 2534:   bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
 2535: 
 2536:   if (d->export_mode)
 2537:     {
 2538:       if (! d->export_protocol->rt_notify)
 2539: 	return;
 2540: 
 2541:       a = proto_find_announce_hook(d->export_protocol, d->table);
 2542:       if (!a)
 2543: 	return;
 2544:     }
 2545: 
 2546:   for (e = n->routes; e; e = e->next)
 2547:     {
 2548:       if (rte_is_filtered(e) != d->filtered)
 2549: 	continue;
 2550: 
 2551:       d->rt_counter++;
 2552:       d->net_counter += first;
 2553:       first = 0;
 2554: 
 2555:       if (pass)
 2556: 	continue;
 2557: 
 2558:       ee = e;
 2559:       rte_update_lock();		/* We use the update buffer for filtering */
 2560:       tmpa = rte_make_tmp_attrs(e, rte_update_pool);
 2561: 
 2562:       /* Special case for merged export */
 2563:       if ((d->export_mode == RSEM_EXPORT) && (d->export_protocol->accept_ra_types == RA_MERGED))
 2564:         {
 2565: 	  rte *rt_free;
 2566: 	  e = rt_export_merged(a, n, &rt_free, &tmpa, rte_update_pool, 1);
 2567: 	  pass = 1;
 2568: 
 2569: 	  if (!e)
 2570: 	  { e = ee; goto skip; }
 2571: 	}
 2572:       else if (d->export_mode)
 2573: 	{
 2574: 	  struct proto *ep = d->export_protocol;
 2575: 	  int ic = ep->import_control ? ep->import_control(ep, &e, &tmpa, rte_update_pool) : 0;
 2576: 
 2577: 	  if (ep->accept_ra_types == RA_OPTIMAL || ep->accept_ra_types == RA_MERGED)
 2578: 	    pass = 1;
 2579: 
 2580: 	  if (ic < 0)
 2581: 	    goto skip;
 2582: 
 2583: 	  if (d->export_mode > RSEM_PREEXPORT)
 2584: 	    {
 2585: 	      /*
 2586: 	       * FIXME - This shows what should be exported according to current
 2587: 	       * filters, but not what was really exported. 'configure soft'
 2588: 	       * command may change the export filter and do not update routes.
 2589: 	       */
 2590: 	      int do_export = (ic > 0) ||
 2591: 		(f_run(a->out_filter, &e, &tmpa, rte_update_pool,
 2592: 		       FF_FORCE_TMPATTR | FF_SILENT) <= F_ACCEPT);
 2593: 
 2594: 	      if (do_export != (d->export_mode == RSEM_EXPORT))
 2595: 		goto skip;
 2596: 
 2597: 	      if ((d->export_mode == RSEM_EXPORT) && (ep->accept_ra_types == RA_ACCEPTED))
 2598: 		pass = 1;
 2599: 	    }
 2600: 	}
 2601: 
 2602:       if (d->show_protocol && (d->show_protocol != e->attrs->src->proto))
 2603: 	goto skip;
 2604: 
 2605:       if (f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT)
 2606: 	goto skip;
 2607: 
 2608:       d->show_counter++;
 2609:       if (d->stats < 2)
 2610: 	rt_show_rte(c, ia, e, d, tmpa);
 2611:       ia[0] = 0;
 2612: 
 2613:     skip:
 2614:       if (e != ee)
 2615:       {
 2616: 	rte_free(e);
 2617: 	e = ee;
 2618:       }
 2619:       rte_update_unlock();
 2620: 
 2621:       if (d->primary_only)
 2622: 	break;
 2623:     }
 2624: }
 2625: 
 2626: static void
 2627: rt_show_cont(struct cli *c)
 2628: {
 2629:   struct rt_show_data *d = c->rover;
 2630: #ifdef DEBUGGING
 2631:   unsigned max = 4;
 2632: #else
 2633:   unsigned max = 64;
 2634: #endif
 2635:   struct fib *fib = &d->table->fib;
 2636:   struct fib_iterator *it = &d->fit;
 2637: 
 2638:   FIB_ITERATE_START(fib, it, f)
 2639:     {
 2640:       net *n = (net *) f;
 2641:       if (d->running_on_config && d->running_on_config != config)
 2642: 	{
 2643: 	  cli_printf(c, 8004, "Stopped due to reconfiguration");
 2644: 	  goto done;
 2645: 	}
 2646:       if (d->export_protocol && (d->export_protocol->export_state == ES_DOWN))
 2647: 	{
 2648: 	  cli_printf(c, 8005, "Protocol is down");
 2649: 	  goto done;
 2650: 	}
 2651:       if (!max--)
 2652: 	{
 2653: 	  FIB_ITERATE_PUT(it, f);
 2654: 	  return;
 2655: 	}
 2656:       rt_show_net(c, n, d);
 2657:     }
 2658:   FIB_ITERATE_END(f);
 2659:   if (d->stats)
 2660:     cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
 2661:   else
 2662:     cli_printf(c, 0, "");
 2663: done:
 2664:   c->cont = c->cleanup = NULL;
 2665: }
 2666: 
 2667: static void
 2668: rt_show_cleanup(struct cli *c)
 2669: {
 2670:   struct rt_show_data *d = c->rover;
 2671: 
 2672:   /* Unlink the iterator */
 2673:   fit_get(&d->table->fib, &d->fit);
 2674: }
 2675: 
 2676: void
 2677: rt_show(struct rt_show_data *d)
 2678: {
 2679:   net *n;
 2680: 
 2681:   /* Default is either a master table or a table related to a respective protocol */
 2682:   if (!d->table && d->export_protocol) d->table = d->export_protocol->table;
 2683:   if (!d->table && d->show_protocol) d->table = d->show_protocol->table;
 2684:   if (!d->table) d->table = config->master_rtc->table;
 2685: 
 2686:   /* Filtered routes are neither exported nor have sensible ordering */
 2687:   if (d->filtered && (d->export_mode || d->primary_only))
 2688:     cli_msg(0, "");
 2689: 
 2690:   if (d->pxlen == 256)
 2691:     {
 2692:       FIB_ITERATE_INIT(&d->fit, &d->table->fib);
 2693:       this_cli->cont = rt_show_cont;
 2694:       this_cli->cleanup = rt_show_cleanup;
 2695:       this_cli->rover = d;
 2696:     }
 2697:   else
 2698:     {
 2699:       if (d->show_for)
 2700: 	n = net_route(d->table, d->prefix, d->pxlen);
 2701:       else
 2702: 	n = net_find(d->table, d->prefix, d->pxlen);
 2703: 
 2704:       if (n)
 2705: 	rt_show_net(this_cli, n, d);
 2706: 
 2707:       if (d->rt_counter)
 2708: 	cli_msg(0, "");
 2709:       else
 2710: 	cli_msg(8001, "Network not in table");
 2711:     }
 2712: }
 2713: 
 2714: /*
 2715:  *  Documentation for functions declared inline in route.h
 2716:  */
 2717: #if 0
 2718: 
 2719: /**
 2720:  * net_find - find a network entry
 2721:  * @tab: a routing table
 2722:  * @addr: address of the network
 2723:  * @len: length of the network prefix
 2724:  *
 2725:  * net_find() looks up the given network in routing table @tab and
 2726:  * returns a pointer to its &net entry or %NULL if no such network
 2727:  * exists.
 2728:  */
 2729: static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
 2730: { DUMMY; }
 2731: 
 2732: /**
 2733:  * net_get - obtain a network entry
 2734:  * @tab: a routing table
 2735:  * @addr: address of the network
 2736:  * @len: length of the network prefix
 2737:  *
 2738:  * net_get() looks up the given network in routing table @tab and
 2739:  * returns a pointer to its &net entry. If no such entry exists, it's
 2740:  * created.
 2741:  */
 2742: static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
 2743: { DUMMY; }
 2744: 
 2745: /**
 2746:  * rte_cow - copy a route for writing
 2747:  * @r: a route entry to be copied
 2748:  *
 2749:  * rte_cow() takes a &rte and prepares it for modification. The exact action
 2750:  * taken depends on the flags of the &rte -- if it's a temporary entry, it's
 2751:  * just returned unchanged, else a new temporary entry with the same contents
 2752:  * is created.
 2753:  *
 2754:  * The primary use of this function is inside the filter machinery -- when
 2755:  * a filter wants to modify &rte contents (to change the preference or to
 2756:  * attach another set of attributes), it must ensure that the &rte is not
 2757:  * shared with anyone else (and especially that it isn't stored in any routing
 2758:  * table).
 2759:  *
 2760:  * Result: a pointer to the new writable &rte.
 2761:  */
 2762: static inline rte * rte_cow(rte *r)
 2763: { DUMMY; }
 2764: 
 2765: #endif

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>