File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / bird / nest / rt-table.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Tue Aug 22 12:33:54 2017 UTC (6 years, 10 months ago) by misho
Branches: bird, MAIN
CVS tags: v1_6_3p0, v1_6_3, HEAD
bird 1.6.3

    1: /*
    2:  *	BIRD -- Routing Tables
    3:  *
    4:  *	(c) 1998--2000 Martin Mares <mj@ucw.cz>
    5:  *
    6:  *	Can be freely distributed and used under the terms of the GNU GPL.
    7:  */
    8: 
    9: /**
   10:  * DOC: Routing tables
   11:  *
   12:  * Routing tables are probably the most important structures BIRD uses. They
   13:  * hold all the information about known networks, the associated routes and
   14:  * their attributes.
   15:  *
   16:  * There are multiple routing tables (a primary one together with any
   17:  * number of secondary ones if requested by the configuration). Each table
   18:  * is basically a FIB containing entries describing the individual
   19:  * destination networks. For each network (represented by structure &net),
   20:  * there is a one-way linked list of route entries (&rte), the first entry
   21:  * on the list being the best one (i.e., the one we currently use
   22:  * for routing), the order of the other ones is undetermined.
   23:  *
   24:  * The &rte contains information specific to the route (preference, protocol
   25:  * metrics, time of last modification etc.) and a pointer to a &rta structure
   26:  * (see the route attribute module for a precise explanation) holding the
   27:  * remaining route attributes which are expected to be shared by multiple
   28:  * routes in order to conserve memory.
   29:  */
   30: 
   31: #undef LOCAL_DEBUG
   32: 
   33: #include "nest/bird.h"
   34: #include "nest/route.h"
   35: #include "nest/protocol.h"
   36: #include "nest/cli.h"
   37: #include "nest/iface.h"
   38: #include "lib/resource.h"
   39: #include "lib/event.h"
   40: #include "lib/string.h"
   41: #include "conf/conf.h"
   42: #include "filter/filter.h"
   43: #include "lib/string.h"
   44: #include "lib/alloca.h"
   45: 
   46: pool *rt_table_pool;
   47: 
   48: static slab *rte_slab;
   49: static linpool *rte_update_pool;
   50: 
   51: static list routing_tables;
   52: 
   53: static byte *rt_format_via(rte *e);
   54: static void rt_free_hostcache(rtable *tab);
   55: static void rt_notify_hostcache(rtable *tab, net *net);
   56: static void rt_update_hostcache(rtable *tab);
   57: static void rt_next_hop_update(rtable *tab);
   58: static inline int rt_prune_table(rtable *tab);
   59: static inline void rt_schedule_gc(rtable *tab);
   60: static inline void rt_schedule_prune(rtable *tab);
   61: 
   62: 
   63: static inline struct ea_list *
   64: make_tmp_attrs(struct rte *rt, struct linpool *pool)
   65: {
   66:   struct ea_list *(*mta)(struct rte *rt, struct linpool *pool);
   67:   mta = rt->attrs->src->proto->make_tmp_attrs;
   68:   return mta ? mta(rt, pool) : NULL;
   69: }
   70: 
   71: /* Like fib_route(), but skips empty net entries */
   72: static net *
   73: net_route(rtable *tab, ip_addr a, int len)
   74: {
   75:   ip_addr a0;
   76:   net *n;
   77: 
   78:   while (len >= 0)
   79:     {
   80:       a0 = ipa_and(a, ipa_mkmask(len));
   81:       n = fib_find(&tab->fib, &a0, len);
   82:       if (n && rte_is_valid(n->routes))
   83: 	return n;
   84:       len--;
   85:     }
   86:   return NULL;
   87: }
   88: 
   89: static void
   90: rte_init(struct fib_node *N)
   91: {
   92:   net *n = (net *) N;
   93: 
   94:   N->flags = 0;
   95:   n->routes = NULL;
   96: }
   97: 
   98: /**
   99:  * rte_find - find a route
  100:  * @net: network node
  101:  * @src: route source
  102:  *
  103:  * The rte_find() function returns a route for destination @net
  104:  * which is from route source @src.
  105:  */
  106: rte *
  107: rte_find(net *net, struct rte_src *src)
  108: {
  109:   rte *e = net->routes;
  110: 
  111:   while (e && e->attrs->src != src)
  112:     e = e->next;
  113:   return e;
  114: }
  115: 
  116: /**
  117:  * rte_get_temp - get a temporary &rte
  118:  * @a: attributes to assign to the new route (a &rta; in case it's
  119:  * un-cached, rte_update() will create a cached copy automatically)
  120:  *
  121:  * Create a temporary &rte and bind it with the attributes @a.
  122:  * Also set route preference to the default preference set for
  123:  * the protocol.
  124:  */
  125: rte *
  126: rte_get_temp(rta *a)
  127: {
  128:   rte *e = sl_alloc(rte_slab);
  129: 
  130:   e->attrs = a;
  131:   e->flags = 0;
  132:   e->pref = a->src->proto->preference;
  133:   return e;
  134: }
  135: 
  136: rte *
  137: rte_do_cow(rte *r)
  138: {
  139:   rte *e = sl_alloc(rte_slab);
  140: 
  141:   memcpy(e, r, sizeof(rte));
  142:   e->attrs = rta_clone(r->attrs);
  143:   e->flags = 0;
  144:   return e;
  145: }
  146: 
  147: /**
  148:  * rte_cow_rta - get a private writable copy of &rte with writable &rta
  149:  * @r: a route entry to be copied
  150:  * @lp: a linpool from which to allocate &rta
  151:  *
  152:  * rte_cow_rta() takes a &rte and prepares it and associated &rta for
  153:  * modification. There are three possibilities: First, both &rte and &rta are
  154:  * private copies, in that case they are returned unchanged.  Second, &rte is
  155:  * private copy, but &rta is cached, in that case &rta is duplicated using
  156:  * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
  157:  * both structures are duplicated by rte_do_cow() and rta_do_cow().
  158:  *
  159:  * Note that in the second case, cached &rta loses one reference, while private
  160:  * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
  161:  * nexthops, ...) with it. To work properly, original shared &rta should have
  162:  * another reference during the life of created private copy.
  163:  *
  164:  * Result: a pointer to the new writable &rte with writable &rta.
  165:  */
  166: rte *
  167: rte_cow_rta(rte *r, linpool *lp)
  168: {
  169:   if (!rta_is_cached(r->attrs))
  170:     return r;
  171: 
  172:   rte *e = rte_cow(r);
  173:   rta *a = rta_do_cow(r->attrs, lp);
  174:   rta_free(e->attrs);
  175:   e->attrs = a;
  176:   return e;
  177: }
  178: 
  179: static int				/* Actually better or at least as good as */
  180: rte_better(rte *new, rte *old)
  181: {
  182:   int (*better)(rte *, rte *);
  183: 
  184:   if (!rte_is_valid(old))
  185:     return 1;
  186:   if (!rte_is_valid(new))
  187:     return 0;
  188: 
  189:   if (new->pref > old->pref)
  190:     return 1;
  191:   if (new->pref < old->pref)
  192:     return 0;
  193:   if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
  194:     {
  195:       /*
  196:        *  If the user has configured protocol preferences, so that two different protocols
  197:        *  have the same preference, try to break the tie by comparing addresses. Not too
  198:        *  useful, but keeps the ordering of routes unambiguous.
  199:        */
  200:       return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
  201:     }
  202:   if (better = new->attrs->src->proto->rte_better)
  203:     return better(new, old);
  204:   return 0;
  205: }
  206: 
  207: static int
  208: rte_mergable(rte *pri, rte *sec)
  209: {
  210:   int (*mergable)(rte *, rte *);
  211: 
  212:   if (!rte_is_valid(pri) || !rte_is_valid(sec))
  213:     return 0;
  214: 
  215:   if (pri->pref != sec->pref)
  216:     return 0;
  217: 
  218:   if (pri->attrs->src->proto->proto != sec->attrs->src->proto->proto)
  219:     return 0;
  220: 
  221:   if (mergable = pri->attrs->src->proto->rte_mergable)
  222:     return mergable(pri, sec);
  223: 
  224:   return 0;
  225: }
  226: 
  227: static void
  228: rte_trace(struct proto *p, rte *e, int dir, char *msg)
  229: {
  230:   log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, rt_format_via(e));
  231: }
  232: 
  233: static inline void
  234: rte_trace_in(uint flag, struct proto *p, rte *e, char *msg)
  235: {
  236:   if (p->debug & flag)
  237:     rte_trace(p, e, '>', msg);
  238: }
  239: 
  240: static inline void
  241: rte_trace_out(uint flag, struct proto *p, rte *e, char *msg)
  242: {
  243:   if (p->debug & flag)
  244:     rte_trace(p, e, '<', msg);
  245: }
  246: 
  247: static rte *
  248: export_filter_(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, linpool *pool, int silent)
  249: {
  250:   struct proto *p = ah->proto;
  251:   struct filter *filter = ah->out_filter;
  252:   struct proto_stats *stats = ah->stats;
  253:   ea_list *tmpb = NULL;
  254:   rte *rt;
  255:   int v;
  256: 
  257:   rt = rt0;
  258:   *rt_free = NULL;
  259: 
  260:   if (!tmpa)
  261:     tmpa = &tmpb;
  262: 
  263:   *tmpa = make_tmp_attrs(rt, pool);
  264: 
  265:   v = p->import_control ? p->import_control(p, &rt, tmpa, pool) : 0;
  266:   if (v < 0)
  267:     {
  268:       if (silent)
  269: 	goto reject;
  270: 
  271:       stats->exp_updates_rejected++;
  272:       if (v == RIC_REJECT)
  273: 	rte_trace_out(D_FILTERS, p, rt, "rejected by protocol");
  274:       goto reject;
  275:     }
  276:   if (v > 0)
  277:     {
  278:       if (!silent)
  279: 	rte_trace_out(D_FILTERS, p, rt, "forced accept by protocol");
  280:       goto accept;
  281:     }
  282: 
  283:   v = filter && ((filter == FILTER_REJECT) ||
  284: 		 (f_run(filter, &rt, tmpa, pool, FF_FORCE_TMPATTR) > F_ACCEPT));
  285:   if (v)
  286:     {
  287:       if (silent)
  288: 	goto reject;
  289: 
  290:       stats->exp_updates_filtered++;
  291:       rte_trace_out(D_FILTERS, p, rt, "filtered out");
  292:       goto reject;
  293:     }
  294: 
  295:  accept:
  296:   if (rt != rt0)
  297:     *rt_free = rt;
  298:   return rt;
  299: 
  300:  reject:
  301:   /* Discard temporary rte */
  302:   if (rt != rt0)
  303:     rte_free(rt);
  304:   return NULL;
  305: }
  306: 
  307: static inline rte *
  308: export_filter(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, int silent)
  309: {
  310:   return export_filter_(ah, rt0, rt_free, tmpa, rte_update_pool, silent);
  311: }
  312: 
  313: static void
  314: do_rt_notify(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
  315: {
  316:   struct proto *p = ah->proto;
  317:   struct proto_stats *stats = ah->stats;
  318: 
  319: 
  320:   /*
  321:    * First, apply export limit.
  322:    *
  323:    * Export route limits has several problems. Because exp_routes
  324:    * counter is reset before refeed, we don't really know whether
  325:    * limit is breached and whether the update is new or not. Therefore
  326:    * the number of really exported routes may exceed the limit
  327:    * temporarily (routes exported before and new routes in refeed).
  328:    *
  329:    * Minor advantage is that if the limit is decreased and refeed is
  330:    * requested, the number of exported routes really decrease.
  331:    *
  332:    * Second problem is that with export limits, we don't know whether
  333:    * old was really exported (it might be blocked by limit). When a
  334:    * withdraw is exported, we announce it even when the previous
  335:    * update was blocked. This is not a big issue, but the same problem
  336:    * is in updating exp_routes counter. Therefore, to be consistent in
  337:    * increases and decreases of exp_routes, we count exported routes
  338:    * regardless of blocking by limits.
  339:    *
  340:    * Similar problem is in handling updates - when a new route is
  341:    * received and blocking is active, the route would be blocked, but
  342:    * when an update for the route will be received later, the update
  343:    * would be propagated (as old != NULL). Therefore, we have to block
  344:    * also non-new updates (contrary to import blocking).
  345:    */
  346: 
  347:   struct proto_limit *l = ah->out_limit;
  348:   if (l && new)
  349:     {
  350:       if ((!old || refeed) && (stats->exp_routes >= l->limit))
  351: 	proto_notify_limit(ah, l, PLD_OUT, stats->exp_routes);
  352: 
  353:       if (l->state == PLS_BLOCKED)
  354: 	{
  355: 	  stats->exp_routes++;	/* see note above */
  356: 	  stats->exp_updates_rejected++;
  357: 	  rte_trace_out(D_FILTERS, p, new, "rejected [limit]");
  358: 	  new = NULL;
  359: 
  360: 	  if (!old)
  361: 	    return;
  362: 	}
  363:     }
  364: 
  365: 
  366:   if (new)
  367:     stats->exp_updates_accepted++;
  368:   else
  369:     stats->exp_withdraws_accepted++;
  370: 
  371:   /* Hack: We do not decrease exp_routes during refeed, we instead
  372:      reset exp_routes at the start of refeed. */
  373:   if (new)
  374:     stats->exp_routes++;
  375:   if (old && !refeed)
  376:     stats->exp_routes--;
  377: 
  378:   if (p->debug & D_ROUTES)
  379:     {
  380:       if (new && old)
  381: 	rte_trace_out(D_ROUTES, p, new, "replaced");
  382:       else if (new)
  383: 	rte_trace_out(D_ROUTES, p, new, "added");
  384:       else if (old)
  385: 	rte_trace_out(D_ROUTES, p, old, "removed");
  386:     }
  387:   if (!new)
  388:     p->rt_notify(p, ah->table, net, NULL, old, NULL);
  389:   else if (tmpa)
  390:     {
  391:       ea_list *t = tmpa;
  392:       while (t->next)
  393: 	t = t->next;
  394:       t->next = new->attrs->eattrs;
  395:       p->rt_notify(p, ah->table, net, new, old, tmpa);
  396:       t->next = NULL;
  397:     }
  398:   else
  399:     p->rt_notify(p, ah->table, net, new, old, new->attrs->eattrs);
  400: }
  401: 
  402: static void
  403: rt_notify_basic(struct announce_hook *ah, net *net, rte *new0, rte *old0, int refeed)
  404: {
  405:   struct proto *p = ah->proto;
  406:   struct proto_stats *stats = ah->stats;
  407: 
  408:   rte *new = new0;
  409:   rte *old = old0;
  410:   rte *new_free = NULL;
  411:   rte *old_free = NULL;
  412:   ea_list *tmpa = NULL;
  413: 
  414:   if (new)
  415:     stats->exp_updates_received++;
  416:   else
  417:     stats->exp_withdraws_received++;
  418: 
  419:   /*
  420:    * This is a tricky part - we don't know whether route 'old' was
  421:    * exported to protocol 'p' or was filtered by the export filter.
  422:    * We try to run the export filter to know this to have a correct
  423:    * value in 'old' argument of rte_update (and proper filter value)
  424:    *
  425:    * FIXME - this is broken because 'configure soft' may change
  426:    * filters but keep routes. Refeed is expected to be called after
  427:    * change of the filters and with old == new, therefore we do not
  428:    * even try to run the filter on an old route, This may lead to
  429:    * 'spurious withdraws' but ensure that there are no 'missing
  430:    * withdraws'.
  431:    *
  432:    * This is not completely safe as there is a window between
  433:    * reconfiguration and the end of refeed - if a newly filtered
  434:    * route disappears during this period, proper withdraw is not
  435:    * sent (because old would be also filtered) and the route is
  436:    * not refeeded (because it disappeared before that).
  437:    */
  438: 
  439:   if (new)
  440:     new = export_filter(ah, new, &new_free, &tmpa, 0);
  441: 
  442:   if (old && !refeed)
  443:     old = export_filter(ah, old, &old_free, NULL, 1);
  444: 
  445:   if (!new && !old)
  446:   {
  447:     /*
  448:      * As mentioned above, 'old' value may be incorrect in some race conditions.
  449:      * We generally ignore it with the exception of withdraw to pipe protocol.
  450:      * In that case we rather propagate unfiltered withdraws regardless of
  451:      * export filters to ensure that when a protocol is flushed, its routes are
  452:      * removed from all tables. Possible spurious unfiltered withdraws are not
  453:      * problem here as they are ignored if there is no corresponding route at
  454:      * the other end of the pipe. We directly call rt_notify() hook instead of
  455:      * do_rt_notify() to avoid logging and stat counters.
  456:      */
  457: 
  458: #ifdef CONFIG_PIPE
  459:     if ((p->proto == &proto_pipe) && !new0 && (p != old0->sender->proto))
  460:       p->rt_notify(p, ah->table, net, NULL, old0, NULL);
  461: #endif
  462: 
  463:     return;
  464:   }
  465: 
  466:   do_rt_notify(ah, net, new, old, tmpa, refeed);
  467: 
  468:   /* Discard temporary rte's */
  469:   if (new_free)
  470:     rte_free(new_free);
  471:   if (old_free)
  472:     rte_free(old_free);
  473: }
  474: 
  475: static void
  476: rt_notify_accepted(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed, rte *before_old, int feed)
  477: {
  478:   // struct proto *p = ah->proto;
  479:   struct proto_stats *stats = ah->stats;
  480: 
  481:   rte *r;
  482:   rte *new_best = NULL;
  483:   rte *old_best = NULL;
  484:   rte *new_free = NULL;
  485:   rte *old_free = NULL;
  486:   ea_list *tmpa = NULL;
  487: 
  488:   /* Used to track whether we met old_changed position. If before_old is NULL
  489:      old_changed was the first and we met it implicitly before current best route. */
  490:   int old_meet = old_changed && !before_old;
  491: 
  492:   /* Note that before_old is either NULL or valid (not rejected) route.
  493:      If old_changed is valid, before_old have to be too. If old changed route
  494:      was not valid, caller must use NULL for both old_changed and before_old. */
  495: 
  496:   if (new_changed)
  497:     stats->exp_updates_received++;
  498:   else
  499:     stats->exp_withdraws_received++;
  500: 
  501:   /* First, find the new_best route - first accepted by filters */
  502:   for (r=net->routes; rte_is_valid(r); r=r->next)
  503:     {
  504:       if (new_best = export_filter(ah, r, &new_free, &tmpa, 0))
  505: 	break;
  506: 
  507:       /* Note if we walked around the position of old_changed route */
  508:       if (r == before_old)
  509: 	old_meet = 1;
  510:     }
  511: 
  512:   /* 
  513:    * Second, handle the feed case. That means we do not care for
  514:    * old_best. It is NULL for feed, and the new_best for refeed. 
  515:    * For refeed, there is a hack similar to one in rt_notify_basic()
  516:    * to ensure withdraws in case of changed filters
  517:    */
  518:   if (feed)
  519:     {
  520:       if (feed == 2)	/* refeed */
  521: 	old_best = new_best ? new_best :
  522: 	  (rte_is_valid(net->routes) ? net->routes : NULL);
  523:       else
  524: 	old_best = NULL;
  525: 
  526:       if (!new_best && !old_best)
  527: 	return;
  528: 
  529:       goto found;
  530:     }
  531: 
  532:   /*
  533:    * Now, we find the old_best route. Generally, it is the same as the
  534:    * new_best, unless new_best is the same as new_changed or
  535:    * old_changed is accepted before new_best.
  536:    *
  537:    * There are four cases:
  538:    *
  539:    * - We would find and accept old_changed before new_best, therefore
  540:    *   old_changed is old_best. In remaining cases we suppose this
  541:    *   is not true.
  542:    *
  543:    * - We found no new_best, therefore there is also no old_best and
  544:    *   we ignore this withdraw.
  545:    *
  546:    * - We found new_best different than new_changed, therefore
  547:    *   old_best is the same as new_best and we ignore this update.
  548:    *
  549:    * - We found new_best the same as new_changed, therefore it cannot
  550:    *   be old_best and we have to continue search for old_best.
  551:    */
  552: 
  553:   /* First case */
  554:   if (old_meet)
  555:     if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
  556:       goto found;
  557: 
  558:   /* Second case */
  559:   if (!new_best)
  560:     return;
  561: 
  562:   /* Third case, we use r instead of new_best, because export_filter() could change it */
  563:   if (r != new_changed)
  564:     {
  565:       if (new_free)
  566: 	rte_free(new_free);
  567:       return;
  568:     }
  569: 
  570:   /* Fourth case */
  571:   for (r=r->next; rte_is_valid(r); r=r->next)
  572:     {
  573:       if (old_best = export_filter(ah, r, &old_free, NULL, 1))
  574: 	goto found;
  575: 
  576:       if (r == before_old)
  577: 	if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
  578: 	  goto found;
  579:     }
  580: 
  581:   /* Implicitly, old_best is NULL and new_best is non-NULL */
  582: 
  583:  found:
  584:   do_rt_notify(ah, net, new_best, old_best, tmpa, (feed == 2));
  585: 
  586:   /* Discard temporary rte's */
  587:   if (new_free)
  588:     rte_free(new_free);
  589:   if (old_free)
  590:     rte_free(old_free);
  591: }
  592: 
  593: 
  594: static struct mpnh *
  595: mpnh_merge_rta(struct mpnh *nhs, rta *a, linpool *pool, int max)
  596: {
  597:   struct mpnh nh = { .gw = a->gw, .iface = a->iface };
  598:   struct mpnh *nh2 = (a->dest == RTD_MULTIPATH) ? a->nexthops : &nh;
  599:   return mpnh_merge(nhs, nh2, 1, 0, max, pool);
  600: }
  601: 
  602: rte *
  603: rt_export_merged(struct announce_hook *ah, net *net, rte **rt_free, ea_list **tmpa, linpool *pool, int silent)
  604: {
  605:   // struct proto *p = ah->proto;
  606:   struct mpnh *nhs = NULL;
  607:   rte *best0, *best, *rt0, *rt, *tmp;
  608: 
  609:   best0 = net->routes;
  610:   *rt_free = NULL;
  611: 
  612:   if (!rte_is_valid(best0))
  613:     return NULL;
  614: 
  615:   best = export_filter_(ah, best0, rt_free, tmpa, pool, silent);
  616: 
  617:   if (!best || !rte_is_reachable(best))
  618:     return best;
  619: 
  620:   for (rt0 = best0->next; rt0; rt0 = rt0->next)
  621:   {
  622:     if (!rte_mergable(best0, rt0))
  623:       continue;
  624: 
  625:     rt = export_filter_(ah, rt0, &tmp, NULL, pool, 1);
  626: 
  627:     if (!rt)
  628:       continue;
  629: 
  630:     if (rte_is_reachable(rt))
  631:       nhs = mpnh_merge_rta(nhs, rt->attrs, pool, ah->proto->merge_limit);
  632: 
  633:     if (tmp)
  634:       rte_free(tmp);
  635:   }
  636: 
  637:   if (nhs)
  638:   {
  639:     nhs = mpnh_merge_rta(nhs, best->attrs, pool, ah->proto->merge_limit);
  640: 
  641:     if (nhs->next)
  642:     {
  643:       best = rte_cow_rta(best, pool);
  644:       best->attrs->dest = RTD_MULTIPATH;
  645:       best->attrs->nexthops = nhs;
  646:     }
  647:   }
  648: 
  649:   if (best != best0)
  650:     *rt_free = best;
  651: 
  652:   return best;
  653: }
  654: 
  655: 
  656: static void
  657: rt_notify_merged(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed,
  658: 		 rte *new_best, rte*old_best, int refeed)
  659: {
  660:   // struct proto *p = ah->proto;
  661: 
  662:   rte *new_best_free = NULL;
  663:   rte *old_best_free = NULL;
  664:   rte *new_changed_free = NULL;
  665:   rte *old_changed_free = NULL;
  666:   ea_list *tmpa = NULL;
  667: 
  668:   /* We assume that all rte arguments are either NULL or rte_is_valid() */
  669: 
  670:   /* This check should be done by the caller */
  671:   if (!new_best && !old_best)
  672:     return;
  673: 
  674:   /* Check whether the change is relevant to the merged route */
  675:   if ((new_best == old_best) && !refeed)
  676:   {
  677:     new_changed = rte_mergable(new_best, new_changed) ?
  678:       export_filter(ah, new_changed, &new_changed_free, NULL, 1) : NULL;
  679: 
  680:     old_changed = rte_mergable(old_best, old_changed) ?
  681:       export_filter(ah, old_changed, &old_changed_free, NULL, 1) : NULL;
  682: 
  683:     if (!new_changed && !old_changed)
  684:       return;
  685:   }
  686: 
  687:   if (new_best)
  688:     ah->stats->exp_updates_received++;
  689:   else
  690:     ah->stats->exp_withdraws_received++;
  691: 
  692:   /* Prepare new merged route */
  693:   if (new_best)
  694:     new_best = rt_export_merged(ah, net, &new_best_free, &tmpa, rte_update_pool, 0);
  695: 
  696:   /* Prepare old merged route (without proper merged next hops) */
  697:   /* There are some issues with running filter on old route - see rt_notify_basic() */
  698:   if (old_best && !refeed)
  699:     old_best = export_filter(ah, old_best, &old_best_free, NULL, 1);
  700: 
  701:   if (new_best || old_best)
  702:     do_rt_notify(ah, net, new_best, old_best, tmpa, refeed);
  703: 
  704:   /* Discard temporary rte's */
  705:   if (new_best_free)
  706:     rte_free(new_best_free);
  707:   if (old_best_free)
  708:     rte_free(old_best_free);
  709:   if (new_changed_free)
  710:     rte_free(new_changed_free);
  711:   if (old_changed_free)
  712:     rte_free(old_changed_free);
  713: }
  714: 
  715: 
  716: /**
  717:  * rte_announce - announce a routing table change
  718:  * @tab: table the route has been added to
  719:  * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
  720:  * @net: network in question
  721:  * @new: the new route to be announced
  722:  * @old: the previous route for the same network
  723:  * @new_best: the new best route for the same network
  724:  * @old_best: the previous best route for the same network
  725:  * @before_old: The previous route before @old for the same network.
  726:  * 		If @before_old is NULL @old was the first.
  727:  *
  728:  * This function gets a routing table update and announces it
  729:  * to all protocols that acccepts given type of route announcement
  730:  * and are connected to the same table by their announcement hooks.
  731:  *
  732:  * Route announcement of type %RA_OPTIMAL si generated when optimal
  733:  * route (in routing table @tab) changes. In that case @old stores the
  734:  * old optimal route.
  735:  *
  736:  * Route announcement of type %RA_ANY si generated when any route (in
  737:  * routing table @tab) changes In that case @old stores the old route
  738:  * from the same protocol.
  739:  *
  740:  * For each appropriate protocol, we first call its import_control()
  741:  * hook which performs basic checks on the route (each protocol has a
  742:  * right to veto or force accept of the route before any filter is
  743:  * asked) and adds default values of attributes specific to the new
  744:  * protocol (metrics, tags etc.).  Then it consults the protocol's
  745:  * export filter and if it accepts the route, the rt_notify() hook of
  746:  * the protocol gets called.
  747:  */
  748: static void
  749: rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old,
  750: 	     rte *new_best, rte *old_best, rte *before_old)
  751: {
  752:   if (!rte_is_valid(new))
  753:     new = NULL;
  754: 
  755:   if (!rte_is_valid(old))
  756:     old = before_old = NULL;
  757: 
  758:   if (!rte_is_valid(new_best))
  759:     new_best = NULL;
  760: 
  761:   if (!rte_is_valid(old_best))
  762:     old_best = NULL;
  763: 
  764:   if (!old && !new)
  765:     return;
  766: 
  767:   if (type == RA_OPTIMAL)
  768:     {
  769:       if (new)
  770: 	new->attrs->src->proto->stats.pref_routes++;
  771:       if (old)
  772: 	old->attrs->src->proto->stats.pref_routes--;
  773: 
  774:       if (tab->hostcache)
  775: 	rt_notify_hostcache(tab, net);
  776:     }
  777: 
  778:   struct announce_hook *a;
  779:   WALK_LIST(a, tab->hooks)
  780:     {
  781:       ASSERT(a->proto->export_state != ES_DOWN);
  782:       if (a->proto->accept_ra_types == type)
  783: 	if (type == RA_ACCEPTED)
  784: 	  rt_notify_accepted(a, net, new, old, before_old, 0);
  785: 	else if (type == RA_MERGED)
  786: 	  rt_notify_merged(a, net, new, old, new_best, old_best, 0);
  787: 	else
  788: 	  rt_notify_basic(a, net, new, old, 0);
  789:     }
  790: }
  791: 
  792: static inline int
  793: rte_validate(rte *e)
  794: {
  795:   int c;
  796:   net *n = e->net;
  797: 
  798:   if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
  799:     {
  800:       log(L_WARN "Ignoring bogus prefix %I/%d received via %s",
  801: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  802:       return 0;
  803:     }
  804: 
  805:   c = ipa_classify_net(n->n.prefix);
  806:   if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
  807:     {
  808:       log(L_WARN "Ignoring bogus route %I/%d received via %s",
  809: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  810:       return 0;
  811:     }
  812: 
  813:   if ((e->attrs->dest == RTD_MULTIPATH) && !mpnh_is_sorted(e->attrs->nexthops))
  814:     {
  815:       log(L_WARN "Ignoring unsorted multipath route %I/%d received via %s",
  816: 	  n->n.prefix, n->n.pxlen, e->sender->proto->name);
  817:       return 0;
  818:     }
  819: 
  820:   return 1;
  821: }
  822: 
  823: /**
  824:  * rte_free - delete a &rte
  825:  * @e: &rte to be deleted
  826:  *
  827:  * rte_free() deletes the given &rte from the routing table it's linked to.
  828:  */
  829: void
  830: rte_free(rte *e)
  831: {
  832:   if (rta_is_cached(e->attrs))
  833:     rta_free(e->attrs);
  834:   sl_free(rte_slab, e);
  835: }
  836: 
  837: static inline void
  838: rte_free_quick(rte *e)
  839: {
  840:   rta_free(e->attrs);
  841:   sl_free(rte_slab, e);
  842: }
  843: 
  844: static int
  845: rte_same(rte *x, rte *y)
  846: {
  847:   return
  848:     x->attrs == y->attrs &&
  849:     x->flags == y->flags &&
  850:     x->pflags == y->pflags &&
  851:     x->pref == y->pref &&
  852:     (!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y));
  853: }
  854: 
  855: static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
  856: 
  857: static void
  858: rte_recalculate(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
  859: {
  860:   struct proto *p = ah->proto;
  861:   struct rtable *table = ah->table;
  862:   struct proto_stats *stats = ah->stats;
  863:   static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS;
  864:   rte *before_old = NULL;
  865:   rte *old_best = net->routes;
  866:   rte *old = NULL;
  867:   rte **k;
  868: 
  869:   k = &net->routes;			/* Find and remove original route from the same protocol */
  870:   while (old = *k)
  871:     {
  872:       if (old->attrs->src == src)
  873: 	{
  874: 	  /* If there is the same route in the routing table but from
  875: 	   * a different sender, then there are two paths from the
  876: 	   * source protocol to this routing table through transparent
  877: 	   * pipes, which is not allowed.
  878: 	   *
  879: 	   * We log that and ignore the route. If it is withdraw, we
  880: 	   * ignore it completely (there might be 'spurious withdraws',
  881: 	   * see FIXME in do_rte_announce())
  882: 	   */
  883: 	  if (old->sender->proto != p)
  884: 	    {
  885: 	      if (new)
  886: 		{
  887: 		  log_rl(&rl_pipe, L_ERR "Pipe collision detected when sending %I/%d to table %s",
  888: 		      net->n.prefix, net->n.pxlen, table->name);
  889: 		  rte_free_quick(new);
  890: 		}
  891: 	      return;
  892: 	    }
  893: 
  894: 	  if (new && rte_same(old, new))
  895: 	    {
  896: 	      /* No changes, ignore the new route */
  897: 
  898: 	      if (!rte_is_filtered(new))
  899: 		{
  900: 		  stats->imp_updates_ignored++;
  901: 		  rte_trace_in(D_ROUTES, p, new, "ignored");
  902: 		}
  903: 
  904: 	      rte_free_quick(new);
  905: 	      return;
  906: 	    }
  907: 	  *k = old->next;
  908: 	  break;
  909: 	}
  910:       k = &old->next;
  911:       before_old = old;
  912:     }
  913: 
  914:   if (!old)
  915:     before_old = NULL;
  916: 
  917:   if (!old && !new)
  918:     {
  919:       stats->imp_withdraws_ignored++;
  920:       return;
  921:     }
  922: 
  923:   int new_ok = rte_is_ok(new);
  924:   int old_ok = rte_is_ok(old);
  925: 
  926:   struct proto_limit *l = ah->rx_limit;
  927:   if (l && !old && new)
  928:     {
  929:       u32 all_routes = stats->imp_routes + stats->filt_routes;
  930: 
  931:       if (all_routes >= l->limit)
  932: 	proto_notify_limit(ah, l, PLD_RX, all_routes);
  933: 
  934:       if (l->state == PLS_BLOCKED)
  935: 	{
  936: 	  /* In receive limit the situation is simple, old is NULL so
  937: 	     we just free new and exit like nothing happened */
  938: 
  939: 	  stats->imp_updates_ignored++;
  940: 	  rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
  941: 	  rte_free_quick(new);
  942: 	  return;
  943: 	}
  944:     }
  945: 
  946:   l = ah->in_limit;
  947:   if (l && !old_ok && new_ok)
  948:     {
  949:       if (stats->imp_routes >= l->limit)
  950: 	proto_notify_limit(ah, l, PLD_IN, stats->imp_routes);
  951: 
  952:       if (l->state == PLS_BLOCKED)
  953: 	{
  954: 	  /* In import limit the situation is more complicated. We
  955: 	     shouldn't just drop the route, we should handle it like
  956: 	     it was filtered. We also have to continue the route
  957: 	     processing if old or new is non-NULL, but we should exit
  958: 	     if both are NULL as this case is probably assumed to be
  959: 	     already handled. */
  960: 
  961: 	  stats->imp_updates_ignored++;
  962: 	  rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
  963: 
  964: 	  if (ah->in_keep_filtered)
  965: 	    new->flags |= REF_FILTERED;
  966: 	  else
  967: 	    { rte_free_quick(new); new = NULL; }
  968: 
  969: 	  /* Note that old && !new could be possible when
  970: 	     ah->in_keep_filtered changed in the recent past. */
  971: 
  972: 	  if (!old && !new)
  973: 	    return;
  974: 
  975: 	  new_ok = 0;
  976: 	  goto skip_stats1;
  977: 	}
  978:     }
  979: 
  980:   if (new_ok)
  981:     stats->imp_updates_accepted++;
  982:   else if (old_ok)
  983:     stats->imp_withdraws_accepted++;
  984:   else
  985:     stats->imp_withdraws_ignored++;
  986: 
  987:  skip_stats1:
  988: 
  989:   if (new)
  990:     rte_is_filtered(new) ? stats->filt_routes++ : stats->imp_routes++;
  991:   if (old)
  992:     rte_is_filtered(old) ? stats->filt_routes-- : stats->imp_routes--;
  993: 
  994:   if (table->config->sorted)
  995:     {
  996:       /* If routes are sorted, just insert new route to appropriate position */
  997:       if (new)
  998: 	{
  999: 	  if (before_old && !rte_better(new, before_old))
 1000: 	    k = &before_old->next;
 1001: 	  else
 1002: 	    k = &net->routes;
 1003: 
 1004: 	  for (; *k; k=&(*k)->next)
 1005: 	    if (rte_better(new, *k))
 1006: 	      break;
 1007: 
 1008: 	  new->next = *k;
 1009: 	  *k = new;
 1010: 	}
 1011:     }
 1012:   else
 1013:     {
 1014:       /* If routes are not sorted, find the best route and move it on
 1015: 	 the first position. There are several optimized cases. */
 1016: 
 1017:       if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
 1018: 	goto do_recalculate;
 1019: 
 1020:       if (new && rte_better(new, old_best))
 1021: 	{
 1022: 	  /* The first case - the new route is cleary optimal,
 1023: 	     we link it at the first position */
 1024: 
 1025: 	  new->next = net->routes;
 1026: 	  net->routes = new;
 1027: 	}
 1028:       else if (old == old_best)
 1029: 	{
 1030: 	  /* The second case - the old best route disappeared, we add the
 1031: 	     new route (if we have any) to the list (we don't care about
 1032: 	     position) and then we elect the new optimal route and relink
 1033: 	     that route at the first position and announce it. New optimal
 1034: 	     route might be NULL if there is no more routes */
 1035: 
 1036: 	do_recalculate:
 1037: 	  /* Add the new route to the list */
 1038: 	  if (new)
 1039: 	    {
 1040: 	      new->next = net->routes;
 1041: 	      net->routes = new;
 1042: 	    }
 1043: 
 1044: 	  /* Find a new optimal route (if there is any) */
 1045: 	  if (net->routes)
 1046: 	    {
 1047: 	      rte **bp = &net->routes;
 1048: 	      for (k=&(*bp)->next; *k; k=&(*k)->next)
 1049: 		if (rte_better(*k, *bp))
 1050: 		  bp = k;
 1051: 
 1052: 	      /* And relink it */
 1053: 	      rte *best = *bp;
 1054: 	      *bp = best->next;
 1055: 	      best->next = net->routes;
 1056: 	      net->routes = best;
 1057: 	    }
 1058: 	}
 1059:       else if (new)
 1060: 	{
 1061: 	  /* The third case - the new route is not better than the old
 1062: 	     best route (therefore old_best != NULL) and the old best
 1063: 	     route was not removed (therefore old_best == net->routes).
 1064: 	     We just link the new route after the old best route. */
 1065: 
 1066: 	  ASSERT(net->routes != NULL);
 1067: 	  new->next = net->routes->next;
 1068: 	  net->routes->next = new;
 1069: 	}
 1070:       /* The fourth (empty) case - suboptimal route was removed, nothing to do */
 1071:     }
 1072: 
 1073:   if (new)
 1074:     new->lastmod = now;
 1075: 
 1076:   /* Log the route change */
 1077:   if (p->debug & D_ROUTES)
 1078:     {
 1079:       if (new_ok)
 1080: 	rte_trace(p, new, '>', new == net->routes ? "added [best]" : "added");
 1081:       else if (old_ok)
 1082: 	{
 1083: 	  if (old != old_best)
 1084: 	    rte_trace(p, old, '>', "removed");
 1085: 	  else if (rte_is_ok(net->routes))
 1086: 	    rte_trace(p, old, '>', "removed [replaced]");
 1087: 	  else
 1088: 	    rte_trace(p, old, '>', "removed [sole]");
 1089: 	}
 1090:     }
 1091: 
 1092:   /* Propagate the route change */
 1093:   rte_announce(table, RA_ANY, net, new, old, NULL, NULL, NULL);
 1094:   if (net->routes != old_best)
 1095:     rte_announce(table, RA_OPTIMAL, net, net->routes, old_best, NULL, NULL, NULL);
 1096:   if (table->config->sorted)
 1097:     rte_announce(table, RA_ACCEPTED, net, new, old, NULL, NULL, before_old);
 1098:   rte_announce(table, RA_MERGED, net, new, old, net->routes, old_best, NULL);
 1099: 
 1100:   if (!net->routes &&
 1101:       (table->gc_counter++ >= table->config->gc_max_ops) &&
 1102:       (table->gc_time + table->config->gc_min_time <= now))
 1103:     rt_schedule_gc(table);
 1104: 
 1105:   if (old_ok && p->rte_remove)
 1106:     p->rte_remove(net, old);
 1107:   if (new_ok && p->rte_insert)
 1108:     p->rte_insert(net, new);
 1109: 
 1110:   if (old)
 1111:     rte_free_quick(old);
 1112: }
 1113: 
 1114: static int rte_update_nest_cnt;		/* Nesting counter to allow recursive updates */
 1115: 
 1116: static inline void
 1117: rte_update_lock(void)
 1118: {
 1119:   rte_update_nest_cnt++;
 1120: }
 1121: 
 1122: static inline void
 1123: rte_update_unlock(void)
 1124: {
 1125:   if (!--rte_update_nest_cnt)
 1126:     lp_flush(rte_update_pool);
 1127: }
 1128: 
 1129: static inline void
 1130: rte_hide_dummy_routes(net *net, rte **dummy)
 1131: {
 1132:   if (net->routes && net->routes->attrs->source == RTS_DUMMY)
 1133:   {
 1134:     *dummy = net->routes;
 1135:     net->routes = (*dummy)->next;
 1136:   }
 1137: }
 1138: 
 1139: static inline void
 1140: rte_unhide_dummy_routes(net *net, rte **dummy)
 1141: {
 1142:   if (*dummy)
 1143:   {
 1144:     (*dummy)->next = net->routes;
 1145:     net->routes = *dummy;
 1146:   }
 1147: }
 1148: 
 1149: /**
 1150:  * rte_update - enter a new update to a routing table
 1151:  * @table: table to be updated
 1152:  * @ah: pointer to table announce hook
 1153:  * @net: network node
 1154:  * @p: protocol submitting the update
 1155:  * @src: protocol originating the update
 1156:  * @new: a &rte representing the new route or %NULL for route removal.
 1157:  *
 1158:  * This function is called by the routing protocols whenever they discover
 1159:  * a new route or wish to update/remove an existing route. The right announcement
 1160:  * sequence is to build route attributes first (either un-cached with @aflags set
 1161:  * to zero or a cached one using rta_lookup(); in this case please note that
 1162:  * you need to increase the use count of the attributes yourself by calling
 1163:  * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
 1164:  * the appropriate data and finally submit the new &rte by calling rte_update().
 1165:  *
 1166:  * @src specifies the protocol that originally created the route and the meaning
 1167:  * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
 1168:  * same value as @new->attrs->proto. @p specifies the protocol that called
 1169:  * rte_update(). In most cases it is the same protocol as @src. rte_update()
 1170:  * stores @p in @new->sender;
 1171:  *
 1172:  * When rte_update() gets any route, it automatically validates it (checks,
 1173:  * whether the network and next hop address are valid IP addresses and also
 1174:  * whether a normal routing protocol doesn't try to smuggle a host or link
 1175:  * scope route to the table), converts all protocol dependent attributes stored
 1176:  * in the &rte to temporary extended attributes, consults import filters of the
 1177:  * protocol to see if the route should be accepted and/or its attributes modified,
 1178:  * stores the temporary attributes back to the &rte.
 1179:  *
 1180:  * Now, having a "public" version of the route, we
 1181:  * automatically find any old route defined by the protocol @src
 1182:  * for network @n, replace it by the new one (or removing it if @new is %NULL),
 1183:  * recalculate the optimal route for this destination and finally broadcast
 1184:  * the change (if any) to all routing protocols by calling rte_announce().
 1185:  *
 1186:  * All memory used for attribute lists and other temporary allocations is taken
 1187:  * from a special linear pool @rte_update_pool and freed when rte_update()
 1188:  * finishes.
 1189:  */
 1190: 
 1191: void
 1192: rte_update2(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
 1193: {
 1194:   struct proto *p = ah->proto;
 1195:   struct proto_stats *stats = ah->stats;
 1196:   struct filter *filter = ah->in_filter;
 1197:   ea_list *tmpa = NULL;
 1198:   rte *dummy = NULL;
 1199: 
 1200:   rte_update_lock();
 1201:   if (new)
 1202:     {
 1203:       new->sender = ah;
 1204: 
 1205:       stats->imp_updates_received++;
 1206:       if (!rte_validate(new))
 1207: 	{
 1208: 	  rte_trace_in(D_FILTERS, p, new, "invalid");
 1209: 	  stats->imp_updates_invalid++;
 1210: 	  goto drop;
 1211: 	}
 1212: 
 1213:       if (filter == FILTER_REJECT)
 1214: 	{
 1215: 	  stats->imp_updates_filtered++;
 1216: 	  rte_trace_in(D_FILTERS, p, new, "filtered out");
 1217: 
 1218: 	  if (! ah->in_keep_filtered)
 1219: 	    goto drop;
 1220: 
 1221: 	  /* new is a private copy, i could modify it */
 1222: 	  new->flags |= REF_FILTERED;
 1223: 	}
 1224:       else
 1225: 	{
 1226: 	  tmpa = make_tmp_attrs(new, rte_update_pool);
 1227: 	  if (filter && (filter != FILTER_REJECT))
 1228: 	    {
 1229: 	      ea_list *old_tmpa = tmpa;
 1230: 	      int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
 1231: 	      if (fr > F_ACCEPT)
 1232: 		{
 1233: 		  stats->imp_updates_filtered++;
 1234: 		  rte_trace_in(D_FILTERS, p, new, "filtered out");
 1235: 
 1236: 		  if (! ah->in_keep_filtered)
 1237: 		    goto drop;
 1238: 
 1239: 		  new->flags |= REF_FILTERED;
 1240: 		}
 1241: 	      if (tmpa != old_tmpa && src->proto->store_tmp_attrs)
 1242: 		src->proto->store_tmp_attrs(new, tmpa);
 1243: 	    }
 1244: 	}
 1245:       if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
 1246: 	new->attrs = rta_lookup(new->attrs);
 1247:       new->flags |= REF_COW;
 1248:     }
 1249:   else
 1250:     {
 1251:       stats->imp_withdraws_received++;
 1252: 
 1253:       if (!net || !src)
 1254: 	{
 1255: 	  stats->imp_withdraws_ignored++;
 1256: 	  rte_update_unlock();
 1257: 	  return;
 1258: 	}
 1259:     }
 1260: 
 1261:  recalc:
 1262:   rte_hide_dummy_routes(net, &dummy);
 1263:   rte_recalculate(ah, net, new, src);
 1264:   rte_unhide_dummy_routes(net, &dummy);
 1265:   rte_update_unlock();
 1266:   return;
 1267: 
 1268:  drop:
 1269:   rte_free(new);
 1270:   new = NULL;
 1271:   goto recalc;
 1272: }
 1273: 
 1274: /* Independent call to rte_announce(), used from next hop
 1275:    recalculation, outside of rte_update(). new must be non-NULL */
 1276: static inline void 
 1277: rte_announce_i(rtable *tab, unsigned type, net *net, rte *new, rte *old,
 1278: 	       rte *new_best, rte *old_best)
 1279: {
 1280:   rte_update_lock();
 1281:   rte_announce(tab, type, net, new, old, new_best, old_best, NULL);
 1282:   rte_update_unlock();
 1283: }
 1284: 
 1285: static inline void
 1286: rte_discard(rte *old)	/* Non-filtered route deletion, used during garbage collection */
 1287: {
 1288:   rte_update_lock();
 1289:   rte_recalculate(old->sender, old->net, NULL, old->attrs->src);
 1290:   rte_update_unlock();
 1291: }
 1292: 
 1293: /* Check rtable for best route to given net whether it would be exported do p */
 1294: int
 1295: rt_examine(rtable *t, ip_addr prefix, int pxlen, struct proto *p, struct filter *filter)
 1296: {
 1297:   net *n = net_find(t, prefix, pxlen);
 1298:   rte *rt = n ? n->routes : NULL;
 1299: 
 1300:   if (!rte_is_valid(rt))
 1301:     return 0;
 1302: 
 1303:   rte_update_lock();
 1304: 
 1305:   /* Rest is stripped down export_filter() */
 1306:   ea_list *tmpa = make_tmp_attrs(rt, rte_update_pool);
 1307:   int v = p->import_control ? p->import_control(p, &rt, &tmpa, rte_update_pool) : 0;
 1308:   if (v == RIC_PROCESS)
 1309:     v = (f_run(filter, &rt, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
 1310: 
 1311:    /* Discard temporary rte */
 1312:   if (rt != n->routes)
 1313:     rte_free(rt);
 1314: 
 1315:   rte_update_unlock();
 1316: 
 1317:   return v > 0;
 1318: }
 1319: 
 1320: 
 1321: /**
 1322:  * rt_refresh_begin - start a refresh cycle
 1323:  * @t: related routing table
 1324:  * @ah: related announce hook 
 1325:  *
 1326:  * This function starts a refresh cycle for given routing table and announce
 1327:  * hook. The refresh cycle is a sequence where the protocol sends all its valid
 1328:  * routes to the routing table (by rte_update()). After that, all protocol
 1329:  * routes (more precisely routes with @ah as @sender) not sent during the
 1330:  * refresh cycle but still in the table from the past are pruned. This is
 1331:  * implemented by marking all related routes as stale by REF_STALE flag in
 1332:  * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
 1333:  * flag in rt_refresh_end() and then removing such routes in the prune loop.
 1334:  */
 1335: void
 1336: rt_refresh_begin(rtable *t, struct announce_hook *ah)
 1337: {
 1338:   net *n;
 1339:   rte *e;
 1340: 
 1341:   FIB_WALK(&t->fib, fn)
 1342:     {
 1343:       n = (net *) fn;
 1344:       for (e = n->routes; e; e = e->next)
 1345: 	if (e->sender == ah)
 1346: 	  e->flags |= REF_STALE;
 1347:     }
 1348:   FIB_WALK_END;
 1349: }
 1350: 
 1351: /**
 1352:  * rt_refresh_end - end a refresh cycle
 1353:  * @t: related routing table
 1354:  * @ah: related announce hook 
 1355:  *
 1356:  * This function starts a refresh cycle for given routing table and announce
 1357:  * hook. See rt_refresh_begin() for description of refresh cycles.
 1358:  */
 1359: void
 1360: rt_refresh_end(rtable *t, struct announce_hook *ah)
 1361: {
 1362:   int prune = 0;
 1363:   net *n;
 1364:   rte *e;
 1365: 
 1366:   FIB_WALK(&t->fib, fn)
 1367:     {
 1368:       n = (net *) fn;
 1369:       for (e = n->routes; e; e = e->next)
 1370: 	if ((e->sender == ah) && (e->flags & REF_STALE))
 1371: 	  {
 1372: 	    e->flags |= REF_DISCARD;
 1373: 	    prune = 1;
 1374: 	  }
 1375:     }
 1376:   FIB_WALK_END;
 1377: 
 1378:   if (prune)
 1379:     rt_schedule_prune(t);
 1380: }
 1381: 
 1382: 
 1383: /**
 1384:  * rte_dump - dump a route
 1385:  * @e: &rte to be dumped
 1386:  *
 1387:  * This functions dumps contents of a &rte to debug output.
 1388:  */
 1389: void
 1390: rte_dump(rte *e)
 1391: {
 1392:   net *n = e->net;
 1393:   debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
 1394:   debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
 1395:   rta_dump(e->attrs);
 1396:   if (e->attrs->src->proto->proto->dump_attrs)
 1397:     e->attrs->src->proto->proto->dump_attrs(e);
 1398:   debug("\n");
 1399: }
 1400: 
 1401: /**
 1402:  * rt_dump - dump a routing table
 1403:  * @t: routing table to be dumped
 1404:  *
 1405:  * This function dumps contents of a given routing table to debug output.
 1406:  */
 1407: void
 1408: rt_dump(rtable *t)
 1409: {
 1410:   rte *e;
 1411:   net *n;
 1412:   struct announce_hook *a;
 1413: 
 1414:   debug("Dump of routing table <%s>\n", t->name);
 1415: #ifdef DEBUGGING
 1416:   fib_check(&t->fib);
 1417: #endif
 1418:   FIB_WALK(&t->fib, fn)
 1419:     {
 1420:       n = (net *) fn;
 1421:       for(e=n->routes; e; e=e->next)
 1422: 	rte_dump(e);
 1423:     }
 1424:   FIB_WALK_END;
 1425:   WALK_LIST(a, t->hooks)
 1426:     debug("\tAnnounces routes to protocol %s\n", a->proto->name);
 1427:   debug("\n");
 1428: }
 1429: 
 1430: /**
 1431:  * rt_dump_all - dump all routing tables
 1432:  *
 1433:  * This function dumps contents of all routing tables to debug output.
 1434:  */
 1435: void
 1436: rt_dump_all(void)
 1437: {
 1438:   rtable *t;
 1439: 
 1440:   WALK_LIST(t, routing_tables)
 1441:     rt_dump(t);
 1442: }
 1443: 
 1444: static inline void
 1445: rt_schedule_prune(rtable *tab)
 1446: {
 1447:   rt_mark_for_prune(tab);
 1448:   ev_schedule(tab->rt_event);
 1449: }
 1450: 
 1451: static inline void
 1452: rt_schedule_gc(rtable *tab)
 1453: {
 1454:   if (tab->gc_scheduled)
 1455:     return;
 1456: 
 1457:   tab->gc_scheduled = 1;
 1458:   ev_schedule(tab->rt_event);
 1459: }
 1460: 
 1461: static inline void
 1462: rt_schedule_hcu(rtable *tab)
 1463: {
 1464:   if (tab->hcu_scheduled)
 1465:     return;
 1466: 
 1467:   tab->hcu_scheduled = 1;
 1468:   ev_schedule(tab->rt_event);
 1469: }
 1470: 
 1471: static inline void
 1472: rt_schedule_nhu(rtable *tab)
 1473: {
 1474:   if (tab->nhu_state == 0)
 1475:     ev_schedule(tab->rt_event);
 1476: 
 1477:   /* state change 0->1, 2->3 */
 1478:   tab->nhu_state |= 1;
 1479: }
 1480: 
 1481: 
 1482: static void
 1483: rt_prune_nets(rtable *tab)
 1484: {
 1485:   struct fib_iterator fit;
 1486:   int ncnt = 0, ndel = 0;
 1487: 
 1488: #ifdef DEBUGGING
 1489:   fib_check(&tab->fib);
 1490: #endif
 1491: 
 1492:   FIB_ITERATE_INIT(&fit, &tab->fib);
 1493: again:
 1494:   FIB_ITERATE_START(&tab->fib, &fit, f)
 1495:     {
 1496:       net *n = (net *) f;
 1497:       ncnt++;
 1498:       if (!n->routes)		/* Orphaned FIB entry */
 1499: 	{
 1500: 	  FIB_ITERATE_PUT(&fit, f);
 1501: 	  fib_delete(&tab->fib, f);
 1502: 	  ndel++;
 1503: 	  goto again;
 1504: 	}
 1505:     }
 1506:   FIB_ITERATE_END(f);
 1507:   DBG("Pruned %d of %d networks\n", ndel, ncnt);
 1508: 
 1509:   tab->gc_counter = 0;
 1510:   tab->gc_time = now;
 1511:   tab->gc_scheduled = 0;
 1512: }
 1513: 
 1514: static void
 1515: rt_event(void *ptr)
 1516: {
 1517:   rtable *tab = ptr;
 1518: 
 1519:   if (tab->hcu_scheduled)
 1520:     rt_update_hostcache(tab);
 1521: 
 1522:   if (tab->nhu_state)
 1523:     rt_next_hop_update(tab);
 1524: 
 1525:   if (tab->prune_state)
 1526:     if (!rt_prune_table(tab))
 1527:       {
 1528: 	/* Table prune unfinished */
 1529: 	ev_schedule(tab->rt_event);
 1530: 	return;
 1531:       }
 1532: 
 1533:   if (tab->gc_scheduled)
 1534:     {
 1535:       rt_prune_nets(tab);
 1536:       rt_prune_sources(); // FIXME this should be moved to independent event
 1537:     }
 1538: }
 1539: 
 1540: void
 1541: rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
 1542: {
 1543:   bzero(t, sizeof(*t));
 1544:   fib_init(&t->fib, p, sizeof(net), 0, rte_init);
 1545:   t->name = name;
 1546:   t->config = cf;
 1547:   init_list(&t->hooks);
 1548:   if (cf)
 1549:     {
 1550:       t->rt_event = ev_new(p);
 1551:       t->rt_event->hook = rt_event;
 1552:       t->rt_event->data = t;
 1553:       t->gc_time = now;
 1554:     }
 1555: }
 1556: 
 1557: /**
 1558:  * rt_init - initialize routing tables
 1559:  *
 1560:  * This function is called during BIRD startup. It initializes the
 1561:  * routing table module.
 1562:  */
 1563: void
 1564: rt_init(void)
 1565: {
 1566:   rta_init();
 1567:   rt_table_pool = rp_new(&root_pool, "Routing tables");
 1568:   rte_update_pool = lp_new(rt_table_pool, 4080);
 1569:   rte_slab = sl_new(rt_table_pool, sizeof(rte));
 1570:   init_list(&routing_tables);
 1571: }
 1572: 
 1573: 
 1574: static int
 1575: rt_prune_step(rtable *tab, int *limit)
 1576: {
 1577:   struct fib_iterator *fit = &tab->prune_fit;
 1578: 
 1579:   DBG("Pruning route table %s\n", tab->name);
 1580: #ifdef DEBUGGING
 1581:   fib_check(&tab->fib);
 1582: #endif
 1583: 
 1584:   if (tab->prune_state == RPS_NONE)
 1585:     return 1;
 1586: 
 1587:   if (tab->prune_state == RPS_SCHEDULED)
 1588:     {
 1589:       FIB_ITERATE_INIT(fit, &tab->fib);
 1590:       tab->prune_state = RPS_RUNNING;
 1591:     }
 1592: 
 1593: again:
 1594:   FIB_ITERATE_START(&tab->fib, fit, fn)
 1595:     {
 1596:       net *n = (net *) fn;
 1597:       rte *e;
 1598: 
 1599:     rescan:
 1600:       for (e=n->routes; e; e=e->next)
 1601: 	if (e->sender->proto->flushing || (e->flags & REF_DISCARD))
 1602: 	  {
 1603: 	    if (*limit <= 0)
 1604: 	      {
 1605: 		FIB_ITERATE_PUT(fit, fn);
 1606: 		return 0;
 1607: 	      }
 1608: 
 1609: 	    rte_discard(e);
 1610: 	    (*limit)--;
 1611: 
 1612: 	    goto rescan;
 1613: 	  }
 1614:       if (!n->routes)		/* Orphaned FIB entry */
 1615: 	{
 1616: 	  FIB_ITERATE_PUT(fit, fn);
 1617: 	  fib_delete(&tab->fib, fn);
 1618: 	  goto again;
 1619: 	}
 1620:     }
 1621:   FIB_ITERATE_END(fn);
 1622: 
 1623: #ifdef DEBUGGING
 1624:   fib_check(&tab->fib);
 1625: #endif
 1626: 
 1627:   tab->prune_state = RPS_NONE;
 1628:   return 1;
 1629: }
 1630: 
 1631: /**
 1632:  * rt_prune_table - prune a routing table
 1633:  * @tab: a routing table for pruning
 1634:  *
 1635:  * This function scans the routing table @tab and removes routes belonging to
 1636:  * flushing protocols, discarded routes and also stale network entries, in a
 1637:  * similar fashion like rt_prune_loop(). Returns 1 when all such routes are
 1638:  * pruned. Contrary to rt_prune_loop(), this function is not a part of the
 1639:  * protocol flushing loop, but it is called from rt_event() for just one routing
 1640:  * table.
 1641:  *
 1642:  * Note that rt_prune_table() and rt_prune_loop() share (for each table) the
 1643:  * prune state (@prune_state) and also the pruning iterator (@prune_fit).
 1644:  */
 1645: static inline int
 1646: rt_prune_table(rtable *tab)
 1647: {
 1648:   int limit = 512;
 1649:   return rt_prune_step(tab, &limit);
 1650: }
 1651: 
 1652: /**
 1653:  * rt_prune_loop - prune routing tables
 1654:  *
 1655:  * The prune loop scans routing tables and removes routes belonging to flushing
 1656:  * protocols, discarded routes and also stale network entries. Returns 1 when
 1657:  * all such routes are pruned. It is a part of the protocol flushing loop.
 1658:  */
 1659: int
 1660: rt_prune_loop(void)
 1661: {
 1662:   int limit = 512;
 1663:   rtable *t;
 1664: 
 1665:   WALK_LIST(t, routing_tables)
 1666:     if (! rt_prune_step(t, &limit))
 1667:       return 0;
 1668: 
 1669:   return 1;
 1670: }
 1671: 
 1672: void
 1673: rt_preconfig(struct config *c)
 1674: {
 1675:   struct symbol *s = cf_get_symbol("master");
 1676: 
 1677:   init_list(&c->tables);
 1678:   c->master_rtc = rt_new_table(s);
 1679: }
 1680: 
 1681: 
 1682: /* 
 1683:  * Some functions for handing internal next hop updates
 1684:  * triggered by rt_schedule_nhu().
 1685:  */
 1686: 
 1687: static inline int
 1688: rta_next_hop_outdated(rta *a)
 1689: {
 1690:   struct hostentry *he = a->hostentry;
 1691: 
 1692:   if (!he)
 1693:     return 0;
 1694: 
 1695:   if (!he->src)
 1696:     return a->dest != RTD_UNREACHABLE;
 1697: 
 1698:   return (a->iface != he->src->iface) || !ipa_equal(a->gw, he->gw) ||
 1699:     (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
 1700:     !mpnh_same(a->nexthops, he->src->nexthops);
 1701: }
 1702: 
 1703: static inline void
 1704: rta_apply_hostentry(rta *a, struct hostentry *he)
 1705: {
 1706:   a->hostentry = he;
 1707:   a->iface = he->src ? he->src->iface : NULL;
 1708:   a->gw = he->gw;
 1709:   a->dest = he->dest;
 1710:   a->igp_metric = he->igp_metric;
 1711:   a->nexthops = he->src ? he->src->nexthops : NULL;
 1712: }
 1713: 
 1714: static inline rte *
 1715: rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
 1716: {
 1717:   rta a;
 1718:   memcpy(&a, old->attrs, sizeof(rta));
 1719:   rta_apply_hostentry(&a, old->attrs->hostentry);
 1720:   a.aflags = 0;
 1721: 
 1722:   rte *e = sl_alloc(rte_slab);
 1723:   memcpy(e, old, sizeof(rte));
 1724:   e->attrs = rta_lookup(&a);
 1725: 
 1726:   return e;
 1727: }
 1728: 
 1729: static inline int
 1730: rt_next_hop_update_net(rtable *tab, net *n)
 1731: {
 1732:   rte **k, *e, *new, *old_best, **new_best;
 1733:   int count = 0;
 1734:   int free_old_best = 0;
 1735: 
 1736:   old_best = n->routes;
 1737:   if (!old_best)
 1738:     return 0;
 1739: 
 1740:   for (k = &n->routes; e = *k; k = &e->next)
 1741:     if (rta_next_hop_outdated(e->attrs))
 1742:       {
 1743: 	new = rt_next_hop_update_rte(tab, e);
 1744: 	*k = new;
 1745: 
 1746: 	rte_announce_i(tab, RA_ANY, n, new, e, NULL, NULL);
 1747: 	rte_trace_in(D_ROUTES, new->sender->proto, new, "updated");
 1748: 
 1749: 	/* Call a pre-comparison hook */
 1750: 	/* Not really an efficient way to compute this */
 1751: 	if (e->attrs->src->proto->rte_recalculate)
 1752: 	  e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
 1753: 
 1754: 	if (e != old_best)
 1755: 	  rte_free_quick(e);
 1756: 	else /* Freeing of the old best rte is postponed */
 1757: 	  free_old_best = 1;
 1758: 
 1759: 	e = new;
 1760: 	count++;
 1761:       }
 1762: 
 1763:   if (!count)
 1764:     return 0;
 1765: 
 1766:   /* Find the new best route */
 1767:   new_best = NULL;
 1768:   for (k = &n->routes; e = *k; k = &e->next)
 1769:     {
 1770:       if (!new_best || rte_better(e, *new_best))
 1771: 	new_best = k;
 1772:     }
 1773: 
 1774:   /* Relink the new best route to the first position */
 1775:   new = *new_best;
 1776:   if (new != n->routes)
 1777:     {
 1778:       *new_best = new->next;
 1779:       new->next = n->routes;
 1780:       n->routes = new;
 1781:     }
 1782: 
 1783:   /* Announce the new best route */
 1784:   if (new != old_best)
 1785:     {
 1786:       rte_announce_i(tab, RA_OPTIMAL, n, new, old_best, NULL, NULL);
 1787:       rte_trace_in(D_ROUTES, new->sender->proto, new, "updated [best]");
 1788:     }
 1789: 
 1790:   /* FIXME: Better announcement of merged routes */
 1791:   rte_announce_i(tab, RA_MERGED, n, new, old_best, new, old_best);
 1792: 
 1793:   if (free_old_best)
 1794:     rte_free_quick(old_best);
 1795: 
 1796:   return count;
 1797: }
 1798: 
 1799: static void
 1800: rt_next_hop_update(rtable *tab)
 1801: {
 1802:   struct fib_iterator *fit = &tab->nhu_fit;
 1803:   int max_feed = 32;
 1804: 
 1805:   if (tab->nhu_state == 0)
 1806:     return;
 1807: 
 1808:   if (tab->nhu_state == 1)
 1809:     {
 1810:       FIB_ITERATE_INIT(fit, &tab->fib);
 1811:       tab->nhu_state = 2;
 1812:     }
 1813: 
 1814:   FIB_ITERATE_START(&tab->fib, fit, fn)
 1815:     {
 1816:       if (max_feed <= 0)
 1817: 	{
 1818: 	  FIB_ITERATE_PUT(fit, fn);
 1819: 	  ev_schedule(tab->rt_event);
 1820: 	  return;
 1821: 	}
 1822:       max_feed -= rt_next_hop_update_net(tab, (net *) fn);
 1823:     }
 1824:   FIB_ITERATE_END(fn);
 1825: 
 1826:   /* state change 2->0, 3->1 */
 1827:   tab->nhu_state &= 1;
 1828: 
 1829:   if (tab->nhu_state > 0)
 1830:     ev_schedule(tab->rt_event);
 1831: }
 1832: 
 1833: 
 1834: struct rtable_config *
 1835: rt_new_table(struct symbol *s)
 1836: {
 1837:   /* Hack that allows to 'redefine' the master table */
 1838:   if ((s->class == SYM_TABLE) && (s->def == new_config->master_rtc))
 1839:     return s->def;
 1840: 
 1841:   struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
 1842: 
 1843:   cf_define_symbol(s, SYM_TABLE, c);
 1844:   c->name = s->name;
 1845:   add_tail(&new_config->tables, &c->n);
 1846:   c->gc_max_ops = 1000;
 1847:   c->gc_min_time = 5;
 1848:   return c;
 1849: }
 1850: 
 1851: /**
 1852:  * rt_lock_table - lock a routing table
 1853:  * @r: routing table to be locked
 1854:  *
 1855:  * Lock a routing table, because it's in use by a protocol,
 1856:  * preventing it from being freed when it gets undefined in a new
 1857:  * configuration.
 1858:  */
 1859: void
 1860: rt_lock_table(rtable *r)
 1861: {
 1862:   r->use_count++;
 1863: }
 1864: 
 1865: /**
 1866:  * rt_unlock_table - unlock a routing table
 1867:  * @r: routing table to be unlocked
 1868:  *
 1869:  * Unlock a routing table formerly locked by rt_lock_table(),
 1870:  * that is decrease its use count and delete it if it's scheduled
 1871:  * for deletion by configuration changes.
 1872:  */
 1873: void
 1874: rt_unlock_table(rtable *r)
 1875: {
 1876:   if (!--r->use_count && r->deleted)
 1877:     {
 1878:       struct config *conf = r->deleted;
 1879:       DBG("Deleting routing table %s\n", r->name);
 1880:       r->config->table = NULL;
 1881:       if (r->hostcache)
 1882: 	rt_free_hostcache(r);
 1883:       rem_node(&r->n);
 1884:       fib_free(&r->fib);
 1885:       rfree(r->rt_event);
 1886:       mb_free(r);
 1887:       config_del_obstacle(conf);
 1888:     }
 1889: }
 1890: 
 1891: /**
 1892:  * rt_commit - commit new routing table configuration
 1893:  * @new: new configuration
 1894:  * @old: original configuration or %NULL if it's boot time config
 1895:  *
 1896:  * Scan differences between @old and @new configuration and modify
 1897:  * the routing tables according to these changes. If @new defines a
 1898:  * previously unknown table, create it, if it omits a table existing
 1899:  * in @old, schedule it for deletion (it gets deleted when all protocols
 1900:  * disconnect from it by calling rt_unlock_table()), if it exists
 1901:  * in both configurations, leave it unchanged.
 1902:  */
 1903: void
 1904: rt_commit(struct config *new, struct config *old)
 1905: {
 1906:   struct rtable_config *o, *r;
 1907: 
 1908:   DBG("rt_commit:\n");
 1909:   if (old)
 1910:     {
 1911:       WALK_LIST(o, old->tables)
 1912: 	{
 1913: 	  rtable *ot = o->table;
 1914: 	  if (!ot->deleted)
 1915: 	    {
 1916: 	      struct symbol *sym = cf_find_symbol(new, o->name);
 1917: 	      if (sym && sym->class == SYM_TABLE && !new->shutdown)
 1918: 		{
 1919: 		  DBG("\t%s: same\n", o->name);
 1920: 		  r = sym->def;
 1921: 		  r->table = ot;
 1922: 		  ot->name = r->name;
 1923: 		  ot->config = r;
 1924: 		  if (o->sorted != r->sorted)
 1925: 		    log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
 1926: 		}
 1927: 	      else
 1928: 		{
 1929: 		  DBG("\t%s: deleted\n", o->name);
 1930: 		  ot->deleted = old;
 1931: 		  config_add_obstacle(old);
 1932: 		  rt_lock_table(ot);
 1933: 		  rt_unlock_table(ot);
 1934: 		}
 1935: 	    }
 1936: 	}
 1937:     }
 1938: 
 1939:   WALK_LIST(r, new->tables)
 1940:     if (!r->table)
 1941:       {
 1942: 	rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
 1943: 	DBG("\t%s: created\n", r->name);
 1944: 	rt_setup(rt_table_pool, t, r->name, r);
 1945: 	add_tail(&routing_tables, &t->n);
 1946: 	r->table = t;
 1947:       }
 1948:   DBG("\tdone\n");
 1949: }
 1950: 
 1951: static inline void
 1952: do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
 1953: {
 1954:   rte_update_lock();
 1955:   if (type == RA_ACCEPTED)
 1956:     rt_notify_accepted(h, n, e, NULL, NULL, p->refeeding ? 2 : 1);
 1957:   else if (type == RA_MERGED)
 1958:     rt_notify_merged(h, n, NULL, NULL, e, p->refeeding ? e : NULL, p->refeeding);
 1959:   else
 1960:     rt_notify_basic(h, n, e, p->refeeding ? e : NULL, p->refeeding);
 1961:   rte_update_unlock();
 1962: }
 1963: 
 1964: /**
 1965:  * rt_feed_baby - advertise routes to a new protocol
 1966:  * @p: protocol to be fed
 1967:  *
 1968:  * This function performs one pass of advertisement of routes to a newly
 1969:  * initialized protocol. It's called by the protocol code as long as it
 1970:  * has something to do. (We avoid transferring all the routes in single
 1971:  * pass in order not to monopolize CPU time.)
 1972:  */
 1973: int
 1974: rt_feed_baby(struct proto *p)
 1975: {
 1976:   struct announce_hook *h;
 1977:   struct fib_iterator *fit;
 1978:   int max_feed = 256;
 1979: 
 1980:   if (!p->feed_ahook)			/* Need to initialize first */
 1981:     {
 1982:       if (!p->ahooks)
 1983: 	return 1;
 1984:       DBG("Announcing routes to new protocol %s\n", p->name);
 1985:       p->feed_ahook = p->ahooks;
 1986:       fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
 1987:       goto next_hook;
 1988:     }
 1989:   fit = p->feed_iterator;
 1990: 
 1991: again:
 1992:   h = p->feed_ahook;
 1993:   FIB_ITERATE_START(&h->table->fib, fit, fn)
 1994:     {
 1995:       net *n = (net *) fn;
 1996:       rte *e = n->routes;
 1997:       if (max_feed <= 0)
 1998: 	{
 1999: 	  FIB_ITERATE_PUT(fit, fn);
 2000: 	  return 0;
 2001: 	}
 2002: 
 2003:       /* XXXX perhaps we should change feed for RA_ACCEPTED to not use 'new' */
 2004: 
 2005:       if ((p->accept_ra_types == RA_OPTIMAL) ||
 2006: 	  (p->accept_ra_types == RA_ACCEPTED) ||
 2007: 	  (p->accept_ra_types == RA_MERGED))
 2008: 	if (rte_is_valid(e))
 2009: 	  {
 2010: 	    if (p->export_state != ES_FEEDING)
 2011: 	      return 1;  /* In the meantime, the protocol fell down. */
 2012: 
 2013: 	    do_feed_baby(p, p->accept_ra_types, h, n, e);
 2014: 	    max_feed--;
 2015: 	  }
 2016: 
 2017:       if (p->accept_ra_types == RA_ANY)
 2018: 	for(e = n->routes; e; e = e->next)
 2019: 	  {
 2020: 	    if (p->export_state != ES_FEEDING)
 2021: 	      return 1;  /* In the meantime, the protocol fell down. */
 2022: 
 2023: 	    if (!rte_is_valid(e))
 2024: 	      continue;
 2025: 
 2026: 	    do_feed_baby(p, RA_ANY, h, n, e);
 2027: 	    max_feed--;
 2028: 	  }
 2029:     }
 2030:   FIB_ITERATE_END(fn);
 2031:   p->feed_ahook = h->next;
 2032:   if (!p->feed_ahook)
 2033:     {
 2034:       mb_free(p->feed_iterator);
 2035:       p->feed_iterator = NULL;
 2036:       return 1;
 2037:     }
 2038: 
 2039: next_hook:
 2040:   h = p->feed_ahook;
 2041:   FIB_ITERATE_INIT(fit, &h->table->fib);
 2042:   goto again;
 2043: }
 2044: 
 2045: /**
 2046:  * rt_feed_baby_abort - abort protocol feeding
 2047:  * @p: protocol
 2048:  *
 2049:  * This function is called by the protocol code when the protocol
 2050:  * stops or ceases to exist before the last iteration of rt_feed_baby()
 2051:  * has finished.
 2052:  */
 2053: void
 2054: rt_feed_baby_abort(struct proto *p)
 2055: {
 2056:   if (p->feed_ahook)
 2057:     {
 2058:       /* Unlink the iterator and exit */
 2059:       fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
 2060:       p->feed_ahook = NULL;
 2061:     }
 2062: }
 2063: 
 2064: 
 2065: static inline unsigned
 2066: ptr_hash(void *ptr)
 2067: {
 2068:   uintptr_t p = (uintptr_t) ptr;
 2069:   return p ^ (p << 8) ^ (p >> 16);
 2070: }
 2071: 
 2072: static inline unsigned
 2073: hc_hash(ip_addr a, rtable *dep)
 2074: {
 2075:   return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
 2076: }
 2077: 
 2078: static inline void
 2079: hc_insert(struct hostcache *hc, struct hostentry *he)
 2080: {
 2081:   uint k = he->hash_key >> hc->hash_shift;
 2082:   he->next = hc->hash_table[k];
 2083:   hc->hash_table[k] = he;
 2084: }
 2085: 
 2086: static inline void
 2087: hc_remove(struct hostcache *hc, struct hostentry *he)
 2088: {
 2089:   struct hostentry **hep;
 2090:   uint k = he->hash_key >> hc->hash_shift;
 2091: 
 2092:   for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
 2093:   *hep = he->next;
 2094: }
 2095: 
 2096: #define HC_DEF_ORDER 10
 2097: #define HC_HI_MARK *4
 2098: #define HC_HI_STEP 2
 2099: #define HC_HI_ORDER 16			/* Must be at most 16 */
 2100: #define HC_LO_MARK /5
 2101: #define HC_LO_STEP 2
 2102: #define HC_LO_ORDER 10
 2103: 
 2104: static void
 2105: hc_alloc_table(struct hostcache *hc, unsigned order)
 2106: {
 2107:   uint hsize = 1 << order;
 2108:   hc->hash_order = order;
 2109:   hc->hash_shift = 16 - order;
 2110:   hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
 2111:   hc->hash_min = (order <= HC_LO_ORDER) ?  0U : (hsize HC_LO_MARK);
 2112: 
 2113:   hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
 2114: }
 2115: 
 2116: static void
 2117: hc_resize(struct hostcache *hc, unsigned new_order)
 2118: {
 2119:   struct hostentry **old_table = hc->hash_table;
 2120:   struct hostentry *he, *hen;
 2121:   uint old_size = 1 << hc->hash_order;
 2122:   uint i;
 2123: 
 2124:   hc_alloc_table(hc, new_order);
 2125:   for (i = 0; i < old_size; i++)
 2126:     for (he = old_table[i]; he != NULL; he=hen)
 2127:       {
 2128: 	hen = he->next;
 2129: 	hc_insert(hc, he);
 2130:       }
 2131:   mb_free(old_table);
 2132: }
 2133: 
 2134: static struct hostentry *
 2135: hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
 2136: {
 2137:   struct hostentry *he = sl_alloc(hc->slab);
 2138: 
 2139:   he->addr = a;
 2140:   he->link = ll;
 2141:   he->tab = dep;
 2142:   he->hash_key = k;
 2143:   he->uc = 0;
 2144:   he->src = NULL;
 2145: 
 2146:   add_tail(&hc->hostentries, &he->ln);
 2147:   hc_insert(hc, he);
 2148: 
 2149:   hc->hash_items++;
 2150:   if (hc->hash_items > hc->hash_max)
 2151:     hc_resize(hc, hc->hash_order + HC_HI_STEP);
 2152: 
 2153:   return he;
 2154: }
 2155: 
 2156: static void
 2157: hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
 2158: {
 2159:   rta_free(he->src);
 2160: 
 2161:   rem_node(&he->ln);
 2162:   hc_remove(hc, he);
 2163:   sl_free(hc->slab, he);
 2164: 
 2165:   hc->hash_items--;
 2166:   if (hc->hash_items < hc->hash_min)
 2167:     hc_resize(hc, hc->hash_order - HC_LO_STEP);
 2168: }
 2169: 
 2170: static void
 2171: rt_init_hostcache(rtable *tab)
 2172: {
 2173:   struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
 2174:   init_list(&hc->hostentries);
 2175: 
 2176:   hc->hash_items = 0;
 2177:   hc_alloc_table(hc, HC_DEF_ORDER);
 2178:   hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
 2179: 
 2180:   hc->lp = lp_new(rt_table_pool, 1008);
 2181:   hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
 2182: 
 2183:   tab->hostcache = hc;
 2184: }
 2185: 
 2186: static void
 2187: rt_free_hostcache(rtable *tab)
 2188: {
 2189:   struct hostcache *hc = tab->hostcache;
 2190: 
 2191:   node *n;
 2192:   WALK_LIST(n, hc->hostentries)
 2193:     {
 2194:       struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
 2195:       rta_free(he->src);
 2196: 
 2197:       if (he->uc)
 2198: 	log(L_ERR "Hostcache is not empty in table %s", tab->name);
 2199:     }
 2200: 
 2201:   rfree(hc->slab);
 2202:   rfree(hc->lp);
 2203:   mb_free(hc->hash_table);
 2204:   mb_free(hc);
 2205: }
 2206: 
 2207: static void
 2208: rt_notify_hostcache(rtable *tab, net *net)
 2209: {
 2210:   struct hostcache *hc = tab->hostcache;
 2211: 
 2212:   if (tab->hcu_scheduled)
 2213:     return;
 2214: 
 2215:   if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
 2216:     rt_schedule_hcu(tab);
 2217: }
 2218: 
 2219: static int
 2220: if_local_addr(ip_addr a, struct iface *i)
 2221: {
 2222:   struct ifa *b;
 2223: 
 2224:   WALK_LIST(b, i->addrs)
 2225:     if (ipa_equal(a, b->ip))
 2226:       return 1;
 2227: 
 2228:   return 0;
 2229: }
 2230: 
 2231: static u32 
 2232: rt_get_igp_metric(rte *rt)
 2233: {
 2234:   eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
 2235: 
 2236:   if (ea)
 2237:     return ea->u.data;
 2238: 
 2239:   rta *a = rt->attrs;
 2240: 
 2241: #ifdef CONFIG_OSPF
 2242:   if ((a->source == RTS_OSPF) ||
 2243:       (a->source == RTS_OSPF_IA) ||
 2244:       (a->source == RTS_OSPF_EXT1))
 2245:     return rt->u.ospf.metric1;
 2246: #endif
 2247: 
 2248: #ifdef CONFIG_RIP
 2249:   if (a->source == RTS_RIP)
 2250:     return rt->u.rip.metric;
 2251: #endif
 2252: 
 2253:   /* Device routes */
 2254:   if ((a->dest != RTD_ROUTER) && (a->dest != RTD_MULTIPATH))
 2255:     return 0;
 2256: 
 2257:   return IGP_METRIC_UNKNOWN;
 2258: }
 2259: 
 2260: static int
 2261: rt_update_hostentry(rtable *tab, struct hostentry *he)
 2262: {
 2263:   rta *old_src = he->src;
 2264:   int pxlen = 0;
 2265: 
 2266:   /* Reset the hostentry */ 
 2267:   he->src = NULL;
 2268:   he->gw = IPA_NONE;
 2269:   he->dest = RTD_UNREACHABLE;
 2270:   he->igp_metric = 0;
 2271: 
 2272:   net *n = net_route(tab, he->addr, MAX_PREFIX_LENGTH);
 2273:   if (n)
 2274:     {
 2275:       rte *e = n->routes;
 2276:       rta *a = e->attrs;
 2277:       pxlen = n->n.pxlen;
 2278: 
 2279:       if (a->hostentry)
 2280: 	{
 2281: 	  /* Recursive route should not depend on another recursive route */
 2282: 	  log(L_WARN "Next hop address %I resolvable through recursive route for %I/%d",
 2283: 	      he->addr, n->n.prefix, pxlen);
 2284: 	  goto done;
 2285: 	}
 2286: 
 2287:       if (a->dest == RTD_DEVICE)
 2288: 	{
 2289: 	  if (if_local_addr(he->addr, a->iface))
 2290: 	    {
 2291: 	      /* The host address is a local address, this is not valid */
 2292: 	      log(L_WARN "Next hop address %I is a local address of iface %s",
 2293: 		  he->addr, a->iface->name);
 2294: 	      goto done;
 2295:       	    }
 2296: 
 2297: 	  /* The host is directly reachable, use link as a gateway */
 2298: 	  he->gw = he->link;
 2299: 	  he->dest = RTD_ROUTER;
 2300: 	}
 2301:       else
 2302: 	{
 2303: 	  /* The host is reachable through some route entry */
 2304: 	  he->gw = a->gw;
 2305: 	  he->dest = a->dest;
 2306: 	}
 2307: 
 2308:       he->src = rta_clone(a);
 2309:       he->igp_metric = rt_get_igp_metric(e);
 2310:     }
 2311: 
 2312:  done:
 2313:   /* Add a prefix range to the trie */
 2314:   trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
 2315: 
 2316:   rta_free(old_src);
 2317:   return old_src != he->src;
 2318: }
 2319: 
 2320: static void
 2321: rt_update_hostcache(rtable *tab)
 2322: {
 2323:   struct hostcache *hc = tab->hostcache;
 2324:   struct hostentry *he;
 2325:   node *n, *x;
 2326: 
 2327:   /* Reset the trie */
 2328:   lp_flush(hc->lp);
 2329:   hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
 2330: 
 2331:   WALK_LIST_DELSAFE(n, x, hc->hostentries)
 2332:     {
 2333:       he = SKIP_BACK(struct hostentry, ln, n);
 2334:       if (!he->uc)
 2335: 	{
 2336: 	  hc_delete_hostentry(hc, he);
 2337: 	  continue;
 2338: 	}
 2339: 
 2340:       if (rt_update_hostentry(tab, he))
 2341: 	rt_schedule_nhu(he->tab);
 2342:     }
 2343: 
 2344:   tab->hcu_scheduled = 0;
 2345: }
 2346: 
 2347: static struct hostentry *
 2348: rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
 2349: {
 2350:   struct hostentry *he;
 2351: 
 2352:   if (!tab->hostcache)
 2353:     rt_init_hostcache(tab);
 2354: 
 2355:   uint k = hc_hash(a, dep);
 2356:   struct hostcache *hc = tab->hostcache;
 2357:   for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
 2358:     if (ipa_equal(he->addr, a) && (he->tab == dep))
 2359:       return he;
 2360: 
 2361:   he = hc_new_hostentry(hc, a, ll, dep, k);
 2362:   rt_update_hostentry(tab, he);
 2363:   return he;
 2364: }
 2365: 
 2366: void
 2367: rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
 2368: {
 2369:   rta_apply_hostentry(a, rt_get_hostentry(tab, *gw, *ll, dep));
 2370: }
 2371: 
 2372: 
 2373: /*
 2374:  *  CLI commands
 2375:  */
 2376: 
 2377: static byte *
 2378: rt_format_via(rte *e)
 2379: {
 2380:   rta *a = e->attrs;
 2381: 
 2382:   /* Max text length w/o IP addr and interface name is 16 */
 2383:   static byte via[STD_ADDRESS_P_LENGTH+sizeof(a->iface->name)+16];
 2384: 
 2385:   switch (a->dest)
 2386:     {
 2387:     case RTD_ROUTER:	bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
 2388:     case RTD_DEVICE:	bsprintf(via, "dev %s", a->iface->name); break;
 2389:     case RTD_BLACKHOLE:	bsprintf(via, "blackhole"); break;
 2390:     case RTD_UNREACHABLE:	bsprintf(via, "unreachable"); break;
 2391:     case RTD_PROHIBIT:	bsprintf(via, "prohibited"); break;
 2392:     case RTD_MULTIPATH:	bsprintf(via, "multipath"); break;
 2393:     default:		bsprintf(via, "???");
 2394:     }
 2395:   return via;
 2396: }
 2397: 
 2398: static void
 2399: rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
 2400: {
 2401:   byte from[STD_ADDRESS_P_LENGTH+8];
 2402:   byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
 2403:   rta *a = e->attrs;
 2404:   int primary = (e->net->routes == e);
 2405:   int sync_error = (e->net->n.flags & KRF_SYNC_ERROR);
 2406:   void (*get_route_info)(struct rte *, byte *buf, struct ea_list *attrs);
 2407:   struct mpnh *nh;
 2408: 
 2409:   tm_format_datetime(tm, &config->tf_route, e->lastmod);
 2410:   if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
 2411:     bsprintf(from, " from %I", a->from);
 2412:   else
 2413:     from[0] = 0;
 2414: 
 2415:   get_route_info = a->src->proto->proto->get_route_info;
 2416:   if (get_route_info || d->verbose)
 2417:     {
 2418:       /* Need to normalize the extended attributes */
 2419:       ea_list *t = tmpa;
 2420:       t = ea_append(t, a->eattrs);
 2421:       tmpa = alloca(ea_scan(t));
 2422:       ea_merge(t, tmpa);
 2423:       ea_sort(tmpa);
 2424:     }
 2425:   if (get_route_info)
 2426:     get_route_info(e, info, tmpa);
 2427:   else
 2428:     bsprintf(info, " (%d)", e->pref);
 2429:   cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, rt_format_via(e), a->src->proto->name,
 2430: 	     tm, from, primary ? (sync_error ? " !" : " *") : "", info);
 2431:   for (nh = a->nexthops; nh; nh = nh->next)
 2432:     cli_printf(c, -1007, "\tvia %I on %s weight %d", nh->gw, nh->iface->name, nh->weight + 1);
 2433:   if (d->verbose)
 2434:     rta_show(c, a, tmpa);
 2435: }
 2436: 
 2437: static void
 2438: rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
 2439: {
 2440:   rte *e, *ee;
 2441:   byte ia[STD_ADDRESS_P_LENGTH+8];
 2442:   struct ea_list *tmpa;
 2443:   struct announce_hook *a = NULL;
 2444:   int first = 1;
 2445:   int pass = 0;
 2446: 
 2447:   bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
 2448: 
 2449:   if (d->export_mode)
 2450:     {
 2451:       if (! d->export_protocol->rt_notify)
 2452: 	return;
 2453: 
 2454:       a = proto_find_announce_hook(d->export_protocol, d->table);
 2455:       if (!a)
 2456: 	return;
 2457:     }
 2458: 
 2459:   for (e = n->routes; e; e = e->next)
 2460:     {
 2461:       if (rte_is_filtered(e) != d->filtered)
 2462: 	continue;
 2463: 
 2464:       d->rt_counter++;
 2465:       d->net_counter += first;
 2466:       first = 0;
 2467: 
 2468:       if (pass)
 2469: 	continue;
 2470: 
 2471:       ee = e;
 2472:       rte_update_lock();		/* We use the update buffer for filtering */
 2473:       tmpa = make_tmp_attrs(e, rte_update_pool);
 2474: 
 2475:       /* Special case for merged export */
 2476:       if ((d->export_mode == RSEM_EXPORT) && (d->export_protocol->accept_ra_types == RA_MERGED))
 2477:         {
 2478: 	  rte *rt_free;
 2479: 	  e = rt_export_merged(a, n, &rt_free, &tmpa, rte_update_pool, 1);
 2480: 	  pass = 1;
 2481: 
 2482: 	  if (!e)
 2483: 	  { e = ee; goto skip; }
 2484: 	}
 2485:       else if (d->export_mode)
 2486: 	{
 2487: 	  struct proto *ep = d->export_protocol;
 2488: 	  int ic = ep->import_control ? ep->import_control(ep, &e, &tmpa, rte_update_pool) : 0;
 2489: 
 2490: 	  if (ep->accept_ra_types == RA_OPTIMAL || ep->accept_ra_types == RA_MERGED)
 2491: 	    pass = 1;
 2492: 
 2493: 	  if (ic < 0)
 2494: 	    goto skip;
 2495: 
 2496: 	  if (d->export_mode > RSEM_PREEXPORT)
 2497: 	    {
 2498: 	      /*
 2499: 	       * FIXME - This shows what should be exported according to current
 2500: 	       * filters, but not what was really exported. 'configure soft'
 2501: 	       * command may change the export filter and do not update routes.
 2502: 	       */
 2503: 	      int do_export = (ic > 0) ||
 2504: 		(f_run(a->out_filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
 2505: 
 2506: 	      if (do_export != (d->export_mode == RSEM_EXPORT))
 2507: 		goto skip;
 2508: 
 2509: 	      if ((d->export_mode == RSEM_EXPORT) && (ep->accept_ra_types == RA_ACCEPTED))
 2510: 		pass = 1;
 2511: 	    }
 2512: 	}
 2513: 
 2514:       if (d->show_protocol && (d->show_protocol != e->attrs->src->proto))
 2515: 	goto skip;
 2516: 
 2517:       if (f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT)
 2518: 	goto skip;
 2519: 
 2520:       d->show_counter++;
 2521:       if (d->stats < 2)
 2522: 	rt_show_rte(c, ia, e, d, tmpa);
 2523:       ia[0] = 0;
 2524: 
 2525:     skip:
 2526:       if (e != ee)
 2527:       {
 2528: 	rte_free(e);
 2529: 	e = ee;
 2530:       }
 2531:       rte_update_unlock();
 2532: 
 2533:       if (d->primary_only)
 2534: 	break;
 2535:     }
 2536: }
 2537: 
 2538: static void
 2539: rt_show_cont(struct cli *c)
 2540: {
 2541:   struct rt_show_data *d = c->rover;
 2542: #ifdef DEBUGGING
 2543:   unsigned max = 4;
 2544: #else
 2545:   unsigned max = 64;
 2546: #endif
 2547:   struct fib *fib = &d->table->fib;
 2548:   struct fib_iterator *it = &d->fit;
 2549: 
 2550:   FIB_ITERATE_START(fib, it, f)
 2551:     {
 2552:       net *n = (net *) f;
 2553:       if (d->running_on_config && d->running_on_config != config)
 2554: 	{
 2555: 	  cli_printf(c, 8004, "Stopped due to reconfiguration");
 2556: 	  goto done;
 2557: 	}
 2558:       if (d->export_protocol && (d->export_protocol->export_state == ES_DOWN))
 2559: 	{
 2560: 	  cli_printf(c, 8005, "Protocol is down");
 2561: 	  goto done;
 2562: 	}
 2563:       if (!max--)
 2564: 	{
 2565: 	  FIB_ITERATE_PUT(it, f);
 2566: 	  return;
 2567: 	}
 2568:       rt_show_net(c, n, d);
 2569:     }
 2570:   FIB_ITERATE_END(f);
 2571:   if (d->stats)
 2572:     cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
 2573:   else
 2574:     cli_printf(c, 0, "");
 2575: done:
 2576:   c->cont = c->cleanup = NULL;
 2577: }
 2578: 
 2579: static void
 2580: rt_show_cleanup(struct cli *c)
 2581: {
 2582:   struct rt_show_data *d = c->rover;
 2583: 
 2584:   /* Unlink the iterator */
 2585:   fit_get(&d->table->fib, &d->fit);
 2586: }
 2587: 
 2588: void
 2589: rt_show(struct rt_show_data *d)
 2590: {
 2591:   net *n;
 2592: 
 2593:   /* Default is either a master table or a table related to a respective protocol */
 2594:   if (!d->table && d->export_protocol) d->table = d->export_protocol->table;
 2595:   if (!d->table && d->show_protocol) d->table = d->show_protocol->table;
 2596:   if (!d->table) d->table = config->master_rtc->table;
 2597: 
 2598:   /* Filtered routes are neither exported nor have sensible ordering */
 2599:   if (d->filtered && (d->export_mode || d->primary_only))
 2600:     cli_msg(0, "");
 2601: 
 2602:   if (d->pxlen == 256)
 2603:     {
 2604:       FIB_ITERATE_INIT(&d->fit, &d->table->fib);
 2605:       this_cli->cont = rt_show_cont;
 2606:       this_cli->cleanup = rt_show_cleanup;
 2607:       this_cli->rover = d;
 2608:     }
 2609:   else
 2610:     {
 2611:       if (d->show_for)
 2612: 	n = net_route(d->table, d->prefix, d->pxlen);
 2613:       else
 2614: 	n = net_find(d->table, d->prefix, d->pxlen);
 2615: 
 2616:       if (n)
 2617: 	rt_show_net(this_cli, n, d);
 2618: 
 2619:       if (d->rt_counter)
 2620: 	cli_msg(0, "");
 2621:       else
 2622: 	cli_msg(8001, "Network not in table");
 2623:     }
 2624: }
 2625: 
 2626: /*
 2627:  *  Documentation for functions declared inline in route.h
 2628:  */
 2629: #if 0
 2630: 
 2631: /**
 2632:  * net_find - find a network entry
 2633:  * @tab: a routing table
 2634:  * @addr: address of the network
 2635:  * @len: length of the network prefix
 2636:  *
 2637:  * net_find() looks up the given network in routing table @tab and
 2638:  * returns a pointer to its &net entry or %NULL if no such network
 2639:  * exists.
 2640:  */
 2641: static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
 2642: { DUMMY; }
 2643: 
 2644: /**
 2645:  * net_get - obtain a network entry
 2646:  * @tab: a routing table
 2647:  * @addr: address of the network
 2648:  * @len: length of the network prefix
 2649:  *
 2650:  * net_get() looks up the given network in routing table @tab and
 2651:  * returns a pointer to its &net entry. If no such entry exists, it's
 2652:  * created.
 2653:  */
 2654: static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
 2655: { DUMMY; }
 2656: 
 2657: /**
 2658:  * rte_cow - copy a route for writing
 2659:  * @r: a route entry to be copied
 2660:  *
 2661:  * rte_cow() takes a &rte and prepares it for modification. The exact action
 2662:  * taken depends on the flags of the &rte -- if it's a temporary entry, it's
 2663:  * just returned unchanged, else a new temporary entry with the same contents
 2664:  * is created.
 2665:  *
 2666:  * The primary use of this function is inside the filter machinery -- when
 2667:  * a filter wants to modify &rte contents (to change the preference or to
 2668:  * attach another set of attributes), it must ensure that the &rte is not
 2669:  * shared with anyone else (and especially that it isn't stored in any routing
 2670:  * table).
 2671:  *
 2672:  * Result: a pointer to the new writable &rte.
 2673:  */
 2674: static inline rte * rte_cow(rte *r)
 2675: { DUMMY; }
 2676: 
 2677: #endif

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>