File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / bird / proto / rip / rip.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Tue Aug 22 12:33:54 2017 UTC (6 years, 10 months ago) by misho
Branches: bird, MAIN
CVS tags: v1_6_3p0, v1_6_3, HEAD
bird 1.6.3

    1: /*
    2:  *	BIRD -- Routing Information Protocol (RIP)
    3:  *
    4:  *	(c) 1998--1999 Pavel Machek <pavel@ucw.cz>
    5:  *	(c) 2004--2013 Ondrej Filip <feela@network.cz>
    6:  *	(c) 2009--2015 Ondrej Zajicek <santiago@crfreenet.org>
    7:  *	(c) 2009--2015 CZ.NIC z.s.p.o.
    8:  *
    9:  *	Can be freely distributed and used under the terms of the GNU GPL.
   10:  */
   11: 
   12: /**
   13:  * DOC: Routing Information Protocol (RIP)
   14:  *
   15:  * The RIP protocol is implemented in two files: |rip.c| containing the protocol
   16:  * logic, route management and the protocol glue with BIRD core, and |packets.c|
   17:  * handling RIP packet processing, RX, TX and protocol sockets.
   18:  *
   19:  * Each instance of RIP is described by a structure &rip_proto, which contains
   20:  * an internal RIP routing table, a list of protocol interfaces and the main
   21:  * timer responsible for RIP routing table cleanup.
   22:  *
   23:  * RIP internal routing table contains incoming and outgoing routes. For each
   24:  * network (represented by structure &rip_entry) there is one outgoing route
   25:  * stored directly in &rip_entry and an one-way linked list of incoming routes
   26:  * (structures &rip_rte). The list contains incoming routes from different RIP
   27:  * neighbors, but only routes with the lowest metric are stored (i.e., all
   28:  * stored incoming routes have the same metric).
   29:  *
   30:  * Note that RIP itself does not select outgoing route, that is done by the core
   31:  * routing table. When a new incoming route is received, it is propagated to the
   32:  * RIP table by rip_update_rte() and possibly stored in the list of incoming
   33:  * routes. Then the change may be propagated to the core by rip_announce_rte().
   34:  * The core selects the best route and propagate it to RIP by rip_rt_notify(),
   35:  * which updates outgoing route part of &rip_entry and possibly triggers route
   36:  * propagation by rip_trigger_update().
   37:  *
   38:  * RIP interfaces are represented by structures &rip_iface. A RIP interface
   39:  * contains a per-interface socket, a list of associated neighbors, interface
   40:  * configuration, and state information related to scheduled interface events
   41:  * and running update sessions. RIP interfaces are added and removed based on
   42:  * core interface notifications.
   43:  *
   44:  * There are two RIP interface events - regular updates and triggered updates.
   45:  * Both are managed from the RIP interface timer (rip_iface_timer()). Regular
   46:  * updates are called at fixed interval and propagate the whole routing table,
   47:  * while triggered updates are scheduled by rip_trigger_update() due to some
   48:  * routing table change and propagate only the routes modified since the time
   49:  * they were scheduled. There are also unicast-destined requested updates, but
   50:  * these are sent directly as a reaction to received RIP request message. The
   51:  * update session is started by rip_send_table(). There may be at most one
   52:  * active update session per interface, as the associated state (including the
   53:  * fib iterator) is stored directly in &rip_iface structure.
   54:  *
   55:  * RIP neighbors are represented by structures &rip_neighbor. Compared to
   56:  * neighbor handling in other routing protocols, RIP does not have explicit
   57:  * neighbor discovery and adjacency maintenance, which makes the &rip_neighbor
   58:  * related code a bit peculiar. RIP neighbors are interlinked with core neighbor
   59:  * structures (&neighbor) and use core neighbor notifications to ensure that RIP
   60:  * neighbors are timely removed. RIP neighbors are added based on received route
   61:  * notifications and removed based on core neighbor and RIP interface events.
   62:  *
   63:  * RIP neighbors are linked by RIP routes and use counter to track the number of
   64:  * associated routes, but when these RIP routes timeout, associated RIP neighbor
   65:  * is still alive (with zero counter). When RIP neighbor is removed but still
   66:  * has some associated routes, it is not freed, just changed to detached state
   67:  * (core neighbors and RIP ifaces are unlinked), then during the main timer
   68:  * cleanup phase the associated routes are removed and the &rip_neighbor
   69:  * structure is finally freed.
   70:  *
   71:  * Supported standards:
   72:  * - RFC 1058 - RIPv1
   73:  * - RFC 2453 - RIPv2
   74:  * - RFC 2080 - RIPng
   75:  * - RFC 4822 - RIP cryptographic authentication
   76:  */
   77: 
   78: #include <stdlib.h>
   79: #include "rip.h"
   80: 
   81: 
   82: static inline void rip_lock_neighbor(struct rip_neighbor *n);
   83: static inline void rip_unlock_neighbor(struct rip_neighbor *n);
   84: static inline int rip_iface_link_up(struct rip_iface *ifa);
   85: static inline void rip_kick_timer(struct rip_proto *p);
   86: static inline void rip_iface_kick_timer(struct rip_iface *ifa);
   87: static void rip_iface_timer(timer *timer);
   88: static void rip_trigger_update(struct rip_proto *p);
   89: 
   90: 
   91: /*
   92:  *	RIP routes
   93:  */
   94: 
   95: static void
   96: rip_init_entry(struct fib_node *fn)
   97: {
   98:   // struct rip_entry *en = (void) *fn;
   99: 
  100:   const uint offset = OFFSETOF(struct rip_entry, routes);
  101:   memset((byte *)fn + offset, 0, sizeof(struct rip_entry) - offset);
  102: }
  103: 
  104: static struct rip_rte *
  105: rip_add_rte(struct rip_proto *p, struct rip_rte **rp, struct rip_rte *src)
  106: {
  107:   struct rip_rte *rt = sl_alloc(p->rte_slab);
  108: 
  109:   memcpy(rt, src, sizeof(struct rip_rte));
  110:   rt->next = *rp;
  111:   *rp = rt;
  112: 
  113:   rip_lock_neighbor(rt->from);
  114: 
  115:   return rt;
  116: }
  117: 
  118: static inline void
  119: rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
  120: {
  121:   struct rip_rte *rt = *rp;
  122: 
  123:   rip_unlock_neighbor(rt->from);
  124: 
  125:   *rp = rt->next;
  126:   sl_free(p->rte_slab, rt);
  127: }
  128: 
  129: static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)
  130: { return a->metric == b->metric && a->tag == b->tag && ipa_equal(a->next_hop, b->next_hop); }
  131: 
  132: static inline int rip_valid_rte(struct rip_rte *rt)
  133: { return rt->from->ifa != NULL; }
  134: 
  135: /**
  136:  * rip_announce_rte - announce route from RIP routing table to the core
  137:  * @p: RIP instance
  138:  * @en: related network
  139:  *
  140:  * The function takes a list of incoming routes from @en, prepare appropriate
  141:  * &rte for the core and propagate it by rte_update().
  142:  */
  143: static void
  144: rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
  145: {
  146:   struct rip_rte *rt = en->routes;
  147: 
  148:   /* Find first valid rte */
  149:   while (rt && !rip_valid_rte(rt))
  150:     rt = rt->next;
  151: 
  152:   if (rt)
  153:   {
  154:     /* Update */
  155:     net *n = net_get(p->p.table, en->n.prefix, en->n.pxlen);
  156: 
  157:     rta a0 = {
  158:       .src = p->p.main_source,
  159:       .source = RTS_RIP,
  160:       .scope = SCOPE_UNIVERSE,
  161:       .cast = RTC_UNICAST
  162:     };
  163: 
  164:     u8 rt_metric = rt->metric;
  165:     u16 rt_tag = rt->tag;
  166:     struct rip_rte *rt2 = rt->next;
  167: 
  168:     /* Find second valid rte */
  169:     while (rt2 && !rip_valid_rte(rt2))
  170:       rt2 = rt2->next;
  171: 
  172:     if (p->ecmp && rt2)
  173:     {
  174:       /* ECMP route */
  175:       struct mpnh *nhs = NULL;
  176:       int num = 0;
  177: 
  178:       for (rt = en->routes; rt && (num < p->ecmp); rt = rt->next)
  179:       {
  180: 	if (!rip_valid_rte(rt))
  181: 	    continue;
  182: 
  183: 	struct mpnh *nh = alloca(sizeof(struct mpnh));
  184: 	nh->gw = rt->next_hop;
  185: 	nh->iface = rt->from->nbr->iface;
  186: 	nh->weight = rt->from->ifa->cf->ecmp_weight;
  187: 	mpnh_insert(&nhs, nh);
  188: 	num++;
  189: 
  190: 	if (rt->tag != rt_tag)
  191: 	  rt_tag = 0;
  192:       }
  193: 
  194:       a0.dest = RTD_MULTIPATH;
  195:       a0.nexthops = nhs;
  196:     }
  197:     else
  198:     {
  199:       /* Unipath route */
  200:       a0.dest = RTD_ROUTER;
  201:       a0.gw = rt->next_hop;
  202:       a0.iface = rt->from->nbr->iface;
  203:       a0.from = rt->from->nbr->addr;
  204:     }
  205: 
  206:     rta *a = rta_lookup(&a0);
  207:     rte *e = rte_get_temp(a);
  208: 
  209:     e->u.rip.from = a0.iface;
  210:     e->u.rip.metric = rt_metric;
  211:     e->u.rip.tag = rt_tag;
  212: 
  213:     e->net = n;
  214:     e->pflags = 0;
  215: 
  216:     rte_update(&p->p, n, e);
  217:   }
  218:   else
  219:   {
  220:     /* Withdraw */
  221:     net *n = net_find(p->p.table, en->n.prefix, en->n.pxlen);
  222:     rte_update(&p->p, n, NULL);
  223:   }
  224: }
  225: 
  226: /**
  227:  * rip_update_rte - enter a route update to RIP routing table
  228:  * @p: RIP instance
  229:  * @prefix: network prefix
  230:  * @pxlen: network prefix length
  231:  * @new: a &rip_rte representing the new route
  232:  *
  233:  * The function is called by the RIP packet processing code whenever it receives
  234:  * a reachable route. The appropriate routing table entry is found and the list
  235:  * of incoming routes is updated. Eventually, the change is also propagated to
  236:  * the core by rip_announce_rte(). Note that for unreachable routes,
  237:  * rip_withdraw_rte() should be called instead of rip_update_rte().
  238:  */
  239: void
  240: rip_update_rte(struct rip_proto *p, ip_addr *prefix, int pxlen, struct rip_rte *new)
  241: {
  242:   struct rip_entry *en = fib_get(&p->rtable, prefix, pxlen);
  243:   struct rip_rte *rt, **rp;
  244:   int changed = 0;
  245: 
  246:   /* If the new route is better, remove all current routes */
  247:   if (en->routes && new->metric < en->routes->metric)
  248:     while (en->routes)
  249:       rip_remove_rte(p, &en->routes);
  250: 
  251:   /* Find the old route (also set rp for later) */
  252:   for (rp = &en->routes; rt = *rp; rp = &rt->next)
  253:     if (rt->from == new->from)
  254:     {
  255:       if (rip_same_rte(rt, new))
  256:       {
  257: 	rt->expires = new->expires;
  258: 	return;
  259:       }
  260: 
  261:       /* Remove the old route */
  262:       rip_remove_rte(p, rp);
  263:       changed = 1;
  264:       break;
  265:     }
  266: 
  267:   /* If the new route is optimal, add it to the list */
  268:   if (!en->routes || new->metric == en->routes->metric)
  269:   {
  270:     rt = rip_add_rte(p, rp, new);
  271:     changed = 1;
  272:   }
  273: 
  274:   /* Announce change if on relevant position (the first or any for ECMP) */
  275:   if (changed && (rp == &en->routes || p->ecmp))
  276:     rip_announce_rte(p, en);
  277: }
  278: 
  279: /**
  280:  * rip_withdraw_rte - enter a route withdraw to RIP routing table
  281:  * @p: RIP instance
  282:  * @prefix: network prefix
  283:  * @pxlen: network prefix length
  284:  * @from: a &rip_neighbor propagating the withdraw
  285:  *
  286:  * The function is called by the RIP packet processing code whenever it receives
  287:  * an unreachable route. The incoming route for given network from nbr @from is
  288:  * removed. Eventually, the change is also propagated by rip_announce_rte().
  289:  */
  290: void
  291: rip_withdraw_rte(struct rip_proto *p, ip_addr *prefix, int pxlen, struct rip_neighbor *from)
  292: {
  293:   struct rip_entry *en = fib_find(&p->rtable, prefix, pxlen);
  294:   struct rip_rte *rt, **rp;
  295: 
  296:   if (!en)
  297:     return;
  298: 
  299:   /* Find the old route */
  300:   for (rp = &en->routes; rt = *rp; rp = &rt->next)
  301:     if (rt->from == from)
  302:       break;
  303: 
  304:   if (!rt)
  305:     return;
  306: 
  307:   /* Remove the old route */
  308:   rip_remove_rte(p, rp);
  309: 
  310:   /* Announce change if on relevant position */
  311:   if (rp == &en->routes || p->ecmp)
  312:     rip_announce_rte(p, en);
  313: }
  314: 
  315: /*
  316:  * rip_rt_notify - core tells us about new route, so store
  317:  * it into our data structures.
  318:  */
  319: static void
  320: rip_rt_notify(struct proto *P, struct rtable *table UNUSED, struct network *net, struct rte *new,
  321: 	      struct rte *old UNUSED, struct ea_list *attrs)
  322: {
  323:   struct rip_proto *p = (struct rip_proto *) P;
  324:   struct rip_entry *en;
  325:   int old_metric;
  326: 
  327:   if (new)
  328:   {
  329:     /* Update */
  330:     u32 rt_metric = ea_get_int(attrs, EA_RIP_METRIC, 1);
  331:     u32 rt_tag = ea_get_int(attrs, EA_RIP_TAG, 0);
  332: 
  333:     if (rt_metric > p->infinity)
  334:     {
  335:       log(L_WARN "%s: Invalid rip_metric value %u for route %I/%d",
  336: 	  p->p.name, rt_metric, net->n.prefix, net->n.pxlen);
  337:       rt_metric = p->infinity;
  338:     }
  339: 
  340:     if (rt_tag > 0xffff)
  341:     {
  342:       log(L_WARN "%s: Invalid rip_tag value %u for route %I/%d",
  343: 	  p->p.name, rt_tag, net->n.prefix, net->n.pxlen);
  344:       rt_metric = p->infinity;
  345:       rt_tag = 0;
  346:     }
  347: 
  348:     /*
  349:      * Note that we accept exported routes with infinity metric (this could
  350:      * happen if rip_metric is modified in filters). Such entry has infinity
  351:      * metric but is RIP_ENTRY_VALID and therefore is not subject to garbage
  352:      * collection.
  353:      */
  354: 
  355:     en = fib_get(&p->rtable, &net->n.prefix, net->n.pxlen);
  356: 
  357:     old_metric = en->valid ? en->metric : -1;
  358: 
  359:     en->valid = RIP_ENTRY_VALID;
  360:     en->metric = rt_metric;
  361:     en->tag = rt_tag;
  362:     en->from = (new->attrs->src->proto == P) ? new->u.rip.from : NULL;
  363:     en->iface = new->attrs->iface;
  364:     en->next_hop = new->attrs->gw;
  365:   }
  366:   else
  367:   {
  368:     /* Withdraw */
  369:     en = fib_find(&p->rtable, &net->n.prefix, net->n.pxlen);
  370: 
  371:     if (!en || en->valid != RIP_ENTRY_VALID)
  372:       return;
  373: 
  374:     old_metric = en->metric;
  375: 
  376:     en->valid = RIP_ENTRY_STALE;
  377:     en->metric = p->infinity;
  378:     en->tag = 0;
  379:     en->from = NULL;
  380:     en->iface = NULL;
  381:     en->next_hop = IPA_NONE;
  382:   }
  383: 
  384:   /* Activate triggered updates */
  385:   if (en->metric != old_metric)
  386:   {
  387:     en->changed = now;
  388:     rip_trigger_update(p);
  389:   }
  390: }
  391: 
  392: 
  393: /*
  394:  *	RIP neighbors
  395:  */
  396: 
  397: struct rip_neighbor *
  398: rip_get_neighbor(struct rip_proto *p, ip_addr *a, struct rip_iface *ifa)
  399: {
  400:   neighbor *nbr = neigh_find2(&p->p, a, ifa->iface, 0);
  401: 
  402:   if (!nbr || (nbr->scope == SCOPE_HOST) || !rip_iface_link_up(ifa))
  403:     return NULL;
  404: 
  405:   if (nbr->data)
  406:     return nbr->data;
  407: 
  408:   TRACE(D_EVENTS, "New neighbor %I on %s", *a, ifa->iface->name);
  409: 
  410:   struct rip_neighbor *n = mb_allocz(p->p.pool, sizeof(struct rip_neighbor));
  411:   n->ifa = ifa;
  412:   n->nbr = nbr;
  413:   nbr->data = n;
  414:   n->csn = nbr->aux;
  415: 
  416:   add_tail(&ifa->neigh_list, NODE n);
  417: 
  418:   return n;
  419: }
  420: 
  421: static void
  422: rip_remove_neighbor(struct rip_proto *p, struct rip_neighbor *n)
  423: {
  424:   neighbor *nbr = n->nbr;
  425: 
  426:   TRACE(D_EVENTS, "Removing neighbor %I on %s", nbr->addr, nbr->iface->name);
  427: 
  428:   rem_node(NODE n);
  429:   n->ifa = NULL;
  430:   n->nbr = NULL;
  431:   nbr->data = NULL;
  432:   nbr->aux = n->csn;
  433: 
  434:   rfree(n->bfd_req);
  435:   n->bfd_req = NULL;
  436:   n->last_seen = 0;
  437: 
  438:   if (!n->uc)
  439:     mb_free(n);
  440: 
  441:   /* Related routes are removed in rip_timer() */
  442:   rip_kick_timer(p);
  443: }
  444: 
  445: static inline void
  446: rip_lock_neighbor(struct rip_neighbor *n)
  447: {
  448:   n->uc++;
  449: }
  450: 
  451: static inline void
  452: rip_unlock_neighbor(struct rip_neighbor *n)
  453: {
  454:   n->uc--;
  455: 
  456:   if (!n->nbr && !n->uc)
  457:     mb_free(n);
  458: }
  459: 
  460: static void
  461: rip_neigh_notify(struct neighbor *nbr)
  462: {
  463:   struct rip_proto *p = (struct rip_proto *) nbr->proto;
  464:   struct rip_neighbor *n = nbr->data;
  465: 
  466:   if (!n)
  467:     return;
  468: 
  469:   /*
  470:    * We assume that rip_neigh_notify() is called before rip_if_notify() for
  471:    * IF_CHANGE_DOWN and therefore n->ifa is still valid. We have no such
  472:    * ordering assumption for IF_CHANGE_LINK, so we test link state of the
  473:    * underlying iface instead of just rip_iface state.
  474:    */
  475:   if ((nbr->scope <= 0) || !rip_iface_link_up(n->ifa))
  476:     rip_remove_neighbor(p, n);
  477: }
  478: 
  479: static void
  480: rip_bfd_notify(struct bfd_request *req)
  481: {
  482:   struct rip_neighbor *n = req->data;
  483:   struct rip_proto *p = n->ifa->rip;
  484: 
  485:   if (req->down)
  486:   {
  487:     TRACE(D_EVENTS, "BFD session down for nbr %I on %s",
  488: 	  n->nbr->addr, n->ifa->iface->name);
  489:     rip_remove_neighbor(p, n);
  490:   }
  491: }
  492: 
  493: void
  494: rip_update_bfd(struct rip_proto *p, struct rip_neighbor *n)
  495: {
  496:   int use_bfd = n->ifa->cf->bfd && n->last_seen;
  497: 
  498:   if (use_bfd && !n->bfd_req)
  499:   {
  500:     /*
  501:      * For RIPv2, use the same address as rip_open_socket(). For RIPng, neighbor
  502:      * should contain an address from the same prefix, thus also link-local. It
  503:      * may cause problems if two link-local addresses are assigned to one iface.
  504:      */
  505:     ip_addr saddr = rip_is_v2(p) ? n->ifa->sk->saddr : n->nbr->ifa->ip;
  506:     n->bfd_req = bfd_request_session(p->p.pool, n->nbr->addr, saddr,
  507: 				     n->nbr->iface, rip_bfd_notify, n);
  508:   }
  509: 
  510:   if (!use_bfd && n->bfd_req)
  511:   {
  512:     rfree(n->bfd_req);
  513:     n->bfd_req = NULL;
  514:   }
  515: }
  516: 
  517: 
  518: /*
  519:  *	RIP interfaces
  520:  */
  521: 
  522: static void
  523: rip_iface_start(struct rip_iface *ifa)
  524: {
  525:   struct rip_proto *p = ifa->rip;
  526: 
  527:   TRACE(D_EVENTS, "Starting interface %s", ifa->iface->name);
  528: 
  529:   ifa->next_regular = now + (random() % ifa->cf->update_time) + 1;
  530:   ifa->next_triggered = now;	/* Available immediately */
  531:   ifa->want_triggered = 1;	/* All routes in triggered update */
  532:   tm_start(ifa->timer, 1);	/* Or 100 ms */
  533:   ifa->up = 1;
  534: 
  535:   if (!ifa->cf->passive)
  536:     rip_send_request(ifa->rip, ifa);
  537: }
  538: 
  539: static void
  540: rip_iface_stop(struct rip_iface *ifa)
  541: {
  542:   struct rip_proto *p = ifa->rip;
  543:   struct rip_neighbor *n;
  544: 
  545:   TRACE(D_EVENTS, "Stopping interface %s", ifa->iface->name);
  546: 
  547:   rip_reset_tx_session(p, ifa);
  548: 
  549:   WALK_LIST_FIRST(n, ifa->neigh_list)
  550:     rip_remove_neighbor(p, n);
  551: 
  552:   tm_stop(ifa->timer);
  553:   ifa->up = 0;
  554: }
  555: 
  556: static inline int
  557: rip_iface_link_up(struct rip_iface *ifa)
  558: {
  559:   return !ifa->cf->check_link || (ifa->iface->flags & IF_LINK_UP);
  560: }
  561: 
  562: static void
  563: rip_iface_update_state(struct rip_iface *ifa)
  564: {
  565:   int up = ifa->sk && rip_iface_link_up(ifa);
  566: 
  567:   if (up == ifa->up)
  568:     return;
  569: 
  570:   if (up)
  571:     rip_iface_start(ifa);
  572:   else
  573:     rip_iface_stop(ifa);
  574: }
  575: 
  576: static void
  577: rip_iface_update_buffers(struct rip_iface *ifa)
  578: {
  579:   if (!ifa->sk)
  580:     return;
  581: 
  582:   uint rbsize = ifa->cf->rx_buffer ?: ifa->iface->mtu;
  583:   uint tbsize = ifa->cf->tx_length ?: ifa->iface->mtu;
  584:   rbsize = MAX(rbsize, tbsize);
  585: 
  586:   sk_set_rbsize(ifa->sk, rbsize);
  587:   sk_set_tbsize(ifa->sk, tbsize);
  588: 
  589:   uint headers = (rip_is_v2(ifa->rip) ? IP4_HEADER_LENGTH : IP6_HEADER_LENGTH) + UDP_HEADER_LENGTH;
  590:   ifa->tx_plen = tbsize - headers;
  591: 
  592:   if (ifa->cf->auth_type == RIP_AUTH_CRYPTO)
  593:     ifa->tx_plen -= RIP_AUTH_TAIL_LENGTH + max_mac_length(ifa->cf->passwords);
  594: }
  595: 
  596: static inline void
  597: rip_iface_update_bfd(struct rip_iface *ifa)
  598: {
  599:   struct rip_proto *p = ifa->rip;
  600:   struct rip_neighbor *n;
  601: 
  602:   WALK_LIST(n, ifa->neigh_list)
  603:     rip_update_bfd(p, n);
  604: }
  605: 
  606: 
  607: static void
  608: rip_iface_locked(struct object_lock *lock)
  609: {
  610:   struct rip_iface *ifa = lock->data;
  611:   struct rip_proto *p = ifa->rip;
  612: 
  613:   if (!rip_open_socket(ifa))
  614:   {
  615:     log(L_ERR "%s: Cannot open socket for %s", p->p.name, ifa->iface->name);
  616:     return;
  617:   }
  618: 
  619:   rip_iface_update_buffers(ifa);
  620:   rip_iface_update_state(ifa);
  621: }
  622: 
  623: 
  624: static struct rip_iface *
  625: rip_find_iface(struct rip_proto *p, struct iface *what)
  626: {
  627:   struct rip_iface *ifa;
  628: 
  629:   WALK_LIST(ifa, p->iface_list)
  630:     if (ifa->iface == what)
  631:       return ifa;
  632: 
  633:   return NULL;
  634: }
  635: 
  636: static void
  637: rip_add_iface(struct rip_proto *p, struct iface *iface, struct rip_iface_config *ic)
  638: {
  639:   struct rip_iface *ifa;
  640: 
  641:   TRACE(D_EVENTS, "Adding interface %s", iface->name);
  642: 
  643:   ifa = mb_allocz(p->p.pool, sizeof(struct rip_iface));
  644:   ifa->rip = p;
  645:   ifa->iface = iface;
  646:   ifa->cf = ic;
  647: 
  648:   if (ipa_nonzero(ic->address))
  649:     ifa->addr = ic->address;
  650:   else if (ic->mode == RIP_IM_MULTICAST)
  651:     ifa->addr = rip_is_v2(p) ? IP4_RIP_ROUTERS : IP6_RIP_ROUTERS;
  652:   else /* Broadcast */
  653:     ifa->addr = iface->addr->brd;
  654: 
  655:   init_list(&ifa->neigh_list);
  656: 
  657:   add_tail(&p->iface_list, NODE ifa);
  658: 
  659:   ifa->timer = tm_new_set(p->p.pool, rip_iface_timer, ifa, 0, 0);
  660: 
  661:   struct object_lock *lock = olock_new(p->p.pool);
  662:   lock->type = OBJLOCK_UDP;
  663:   lock->port = ic->port;
  664:   lock->iface = iface;
  665:   lock->data = ifa;
  666:   lock->hook = rip_iface_locked;
  667:   ifa->lock = lock;
  668: 
  669:   olock_acquire(lock);
  670: }
  671: 
  672: static void
  673: rip_remove_iface(struct rip_proto *p, struct rip_iface *ifa)
  674: {
  675:   rip_iface_stop(ifa);
  676: 
  677:   TRACE(D_EVENTS, "Removing interface %s", ifa->iface->name);
  678: 
  679:   rem_node(NODE ifa);
  680: 
  681:   rfree(ifa->sk);
  682:   rfree(ifa->lock);
  683:   rfree(ifa->timer);
  684: 
  685:   mb_free(ifa);
  686: }
  687: 
  688: static int
  689: rip_reconfigure_iface(struct rip_proto *p, struct rip_iface *ifa, struct rip_iface_config *new)
  690: {
  691:   struct rip_iface_config *old = ifa->cf;
  692: 
  693:   /* Change of these options would require to reset the iface socket */
  694:   if ((new->mode != old->mode) ||
  695:       (new->port != old->port) ||
  696:       (new->tx_tos != old->tx_tos) ||
  697:       (new->tx_priority != old->tx_priority) ||
  698:       (new->ttl_security != old->ttl_security))
  699:     return 0;
  700: 
  701:   TRACE(D_EVENTS, "Reconfiguring interface %s", ifa->iface->name);
  702: 
  703:   ifa->cf = new;
  704: 
  705:   rip_iface_update_buffers(ifa);
  706: 
  707:   if (ifa->next_regular > (now + new->update_time))
  708:     ifa->next_regular = now + (random() % new->update_time) + 1;
  709: 
  710:   if (new->check_link != old->check_link)
  711:     rip_iface_update_state(ifa);
  712: 
  713:   if (new->bfd != old->bfd)
  714:     rip_iface_update_bfd(ifa);
  715: 
  716:   if (ifa->up)
  717:     rip_iface_kick_timer(ifa);
  718: 
  719:   return 1;
  720: }
  721: 
  722: static void
  723: rip_reconfigure_ifaces(struct rip_proto *p, struct rip_config *cf)
  724: {
  725:   struct iface *iface;
  726: 
  727:   WALK_LIST(iface, iface_list)
  728:   {
  729:     if (! (iface->flags & IF_UP))
  730:       continue;
  731: 
  732:     struct rip_iface *ifa = rip_find_iface(p, iface);
  733:     struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
  734: 
  735:     if (ifa && ic)
  736:     {
  737:       if (rip_reconfigure_iface(p, ifa, ic))
  738: 	continue;
  739: 
  740:       /* Hard restart */
  741:       log(L_INFO "%s: Restarting interface %s", p->p.name, ifa->iface->name);
  742:       rip_remove_iface(p, ifa);
  743:       rip_add_iface(p, iface, ic);
  744:     }
  745: 
  746:     if (ifa && !ic)
  747:       rip_remove_iface(p, ifa);
  748: 
  749:     if (!ifa && ic)
  750:       rip_add_iface(p, iface, ic);
  751:   }
  752: }
  753: 
  754: static void
  755: rip_if_notify(struct proto *P, unsigned flags, struct iface *iface)
  756: {
  757:   struct rip_proto *p = (void *) P;
  758:   struct rip_config *cf = (void *) P->cf;
  759: 
  760:   if (iface->flags & IF_IGNORE)
  761:     return;
  762: 
  763:   if (flags & IF_CHANGE_UP)
  764:   {
  765:     struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
  766: 
  767:     if (ic)
  768:       rip_add_iface(p, iface, ic);
  769: 
  770:     return;
  771:   }
  772: 
  773:   struct rip_iface *ifa = rip_find_iface(p, iface);
  774: 
  775:   if (!ifa)
  776:     return;
  777: 
  778:   if (flags & IF_CHANGE_DOWN)
  779:   {
  780:     rip_remove_iface(p, ifa);
  781:     return;
  782:   }
  783: 
  784:   if (flags & IF_CHANGE_MTU)
  785:     rip_iface_update_buffers(ifa);
  786: 
  787:   if (flags & IF_CHANGE_LINK)
  788:     rip_iface_update_state(ifa);
  789: }
  790: 
  791: 
  792: /*
  793:  *	RIP timer events
  794:  */
  795: 
  796: /**
  797:  * rip_timer - RIP main timer hook
  798:  * @t: timer
  799:  *
  800:  * The RIP main timer is responsible for routing table maintenance. Invalid or
  801:  * expired routes (&rip_rte) are removed and garbage collection of stale routing
  802:  * table entries (&rip_entry) is done. Changes are propagated to core tables,
  803:  * route reload is also done here. Note that garbage collection uses a maximal
  804:  * GC time, while interfaces maintain an illusion of per-interface GC times in
  805:  * rip_send_response().
  806:  *
  807:  * Keeping incoming routes and the selected outgoing route are two independent
  808:  * functions, therefore after garbage collection some entries now considered
  809:  * invalid (RIP_ENTRY_DUMMY) still may have non-empty list of incoming routes,
  810:  * while some valid entries (representing an outgoing route) may have that list
  811:  * empty.
  812:  *
  813:  * The main timer is not scheduled periodically but it uses the time of the
  814:  * current next event and the minimal interval of any possible event to compute
  815:  * the time of the next run.
  816:  */
  817: static void
  818: rip_timer(timer *t)
  819: {
  820:   struct rip_proto *p = t->data;
  821:   struct rip_config *cf = (void *) (p->p.cf);
  822:   struct rip_iface *ifa;
  823:   struct rip_neighbor *n, *nn;
  824:   struct fib_iterator fit;
  825:   bird_clock_t next = now + MIN(cf->min_timeout_time, cf->max_garbage_time);
  826:   bird_clock_t expires = 0;
  827: 
  828:   TRACE(D_EVENTS, "Main timer fired");
  829: 
  830:   FIB_ITERATE_INIT(&fit, &p->rtable);
  831: 
  832:   loop:
  833:   FIB_ITERATE_START(&p->rtable, &fit, node)
  834:   {
  835:     struct rip_entry *en = (struct rip_entry *) node;
  836:     struct rip_rte *rt, **rp;
  837:     int changed = 0;
  838: 
  839:     /* Checking received routes for timeout and for dead neighbors */
  840:     for (rp = &en->routes; rt = *rp; /* rp = &rt->next */)
  841:     {
  842:       if (!rip_valid_rte(rt) || (rt->expires <= now))
  843:       {
  844: 	rip_remove_rte(p, rp);
  845: 	changed = 1;
  846: 	continue;
  847:       }
  848: 
  849:       next = MIN(next, rt->expires);
  850:       rp = &rt->next;
  851:     }
  852: 
  853:     /* Propagating eventual change */
  854:     if (changed || p->rt_reload)
  855:     {
  856:       /*
  857:        * We have to restart the iteration because there may be a cascade of
  858:        * synchronous events rip_announce_rte() -> nest table change ->
  859:        * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
  860:        */
  861: 
  862:       FIB_ITERATE_PUT_NEXT(&fit, &p->rtable, node);
  863:       rip_announce_rte(p, en);
  864:       goto loop;
  865:     }
  866: 
  867:     /* Checking stale entries for garbage collection timeout */
  868:     if (en->valid == RIP_ENTRY_STALE)
  869:     {
  870:       expires = en->changed + cf->max_garbage_time;
  871: 
  872:       if (expires <= now)
  873:       {
  874: 	// TRACE(D_EVENTS, "entry is too old: %I/%d", en->n.prefix, en->n.pxlen);
  875: 	en->valid = 0;
  876:       }
  877:       else
  878: 	next = MIN(next, expires);
  879:     }
  880: 
  881:     /* Remove empty nodes */
  882:     if (!en->valid && !en->routes)
  883:     {
  884:       FIB_ITERATE_PUT(&fit, node);
  885:       fib_delete(&p->rtable, node);
  886:       goto loop;
  887:     }
  888:   }
  889:   FIB_ITERATE_END(node);
  890: 
  891:   p->rt_reload = 0;
  892: 
  893:   /* Handling neighbor expiration */
  894:   WALK_LIST(ifa, p->iface_list)
  895:     WALK_LIST_DELSAFE(n, nn, ifa->neigh_list)
  896:       if (n->last_seen)
  897:       {
  898: 	expires = n->last_seen + n->ifa->cf->timeout_time;
  899: 
  900: 	if (expires <= now)
  901: 	  rip_remove_neighbor(p, n);
  902: 	else
  903: 	  next = MIN(next, expires);
  904:       }
  905: 
  906:   tm_start(p->timer, MAX(next - now, 1));
  907: }
  908: 
  909: static inline void
  910: rip_kick_timer(struct rip_proto *p)
  911: {
  912:   if (p->timer->expires > (now + 1))
  913:     tm_start(p->timer, 1);	/* Or 100 ms */
  914: }
  915: 
  916: /**
  917:  * rip_iface_timer - RIP interface timer hook
  918:  * @t: timer
  919:  *
  920:  * RIP interface timers are responsible for scheduling both regular and
  921:  * triggered updates. Fixed, delay-independent period is used for regular
  922:  * updates, while minimal separating interval is enforced for triggered updates.
  923:  * The function also ensures that a new update is not started when the old one
  924:  * is still running.
  925:  */
  926: static void
  927: rip_iface_timer(timer *t)
  928: {
  929:   struct rip_iface *ifa = t->data;
  930:   struct rip_proto *p = ifa->rip;
  931:   bird_clock_t period = ifa->cf->update_time;
  932: 
  933:   if (ifa->cf->passive)
  934:     return;
  935: 
  936:   TRACE(D_EVENTS, "Interface timer fired for %s", ifa->iface->name);
  937: 
  938:   if (ifa->tx_active)
  939:   {
  940:     if (now < (ifa->next_regular + period))
  941:       { tm_start(ifa->timer, 1); return; }
  942: 
  943:     /* We are too late, reset is done by rip_send_table() */
  944:     log(L_WARN "%s: Too slow update on %s, resetting", p->p.name, ifa->iface->name);
  945:   }
  946: 
  947:   if (now >= ifa->next_regular)
  948:   {
  949:     /* Send regular update, set timer for next period (or following one if necessay) */
  950:     TRACE(D_EVENTS, "Sending regular updates for %s", ifa->iface->name);
  951:     rip_send_table(p, ifa, ifa->addr, 0);
  952:     ifa->next_regular += period * (1 + ((now - ifa->next_regular) / period));
  953:     ifa->want_triggered = 0;
  954:     p->triggered = 0;
  955:   }
  956:   else if (ifa->want_triggered && (now >= ifa->next_triggered))
  957:   {
  958:     /* Send triggered update, enforce interval between triggered updates */
  959:     TRACE(D_EVENTS, "Sending triggered updates for %s", ifa->iface->name);
  960:     rip_send_table(p, ifa, ifa->addr, ifa->want_triggered);
  961:     ifa->next_triggered = now + MIN(5, period / 2 + 1);
  962:     ifa->want_triggered = 0;
  963:     p->triggered = 0;
  964:   }
  965: 
  966:   tm_start(ifa->timer, ifa->want_triggered ? 1 : (ifa->next_regular - now));
  967: }
  968: 
  969: static inline void
  970: rip_iface_kick_timer(struct rip_iface *ifa)
  971: {
  972:   if (ifa->timer->expires > (now + 1))
  973:     tm_start(ifa->timer, 1);	/* Or 100 ms */
  974: }
  975: 
  976: static void
  977: rip_trigger_update(struct rip_proto *p)
  978: {
  979:   if (p->triggered)
  980:     return;
  981: 
  982:   struct rip_iface *ifa;
  983:   WALK_LIST(ifa, p->iface_list)
  984:   {
  985:     /* Interface not active */
  986:     if (! ifa->up)
  987:       continue;
  988: 
  989:     /* Already scheduled */
  990:     if (ifa->want_triggered)
  991:       continue;
  992: 
  993:     TRACE(D_EVENTS, "Scheduling triggered updates for %s", ifa->iface->name);
  994:     ifa->want_triggered = now;
  995:     rip_iface_kick_timer(ifa);
  996:   }
  997: 
  998:   p->triggered = 1;
  999: }
 1000: 
 1001: 
 1002: /*
 1003:  *	RIP protocol glue
 1004:  */
 1005: 
 1006: static struct ea_list *
 1007: rip_prepare_attrs(struct linpool *pool, ea_list *next, u8 metric, u16 tag)
 1008: {
 1009:   struct ea_list *l = lp_alloc(pool, sizeof(struct ea_list) + 2 * sizeof(eattr));
 1010: 
 1011:   l->next = next;
 1012:   l->flags = EALF_SORTED;
 1013:   l->count = 2;
 1014: 
 1015:   l->attrs[0].id = EA_RIP_METRIC;
 1016:   l->attrs[0].flags = 0;
 1017:   l->attrs[0].type = EAF_TYPE_INT | EAF_TEMP;
 1018:   l->attrs[0].u.data = metric;
 1019: 
 1020:   l->attrs[1].id = EA_RIP_TAG;
 1021:   l->attrs[1].flags = 0;
 1022:   l->attrs[1].type = EAF_TYPE_INT | EAF_TEMP;
 1023:   l->attrs[1].u.data = tag;
 1024: 
 1025:   return l;
 1026: }
 1027: 
 1028: static int
 1029: rip_import_control(struct proto *P UNUSED, struct rte **rt, struct ea_list **attrs, struct linpool *pool)
 1030: {
 1031:   /* Prepare attributes with initial values */
 1032:   if ((*rt)->attrs->source != RTS_RIP)
 1033:     *attrs = rip_prepare_attrs(pool, *attrs, 1, 0);
 1034: 
 1035:   return 0;
 1036: }
 1037: 
 1038: static int
 1039: rip_reload_routes(struct proto *P)
 1040: {
 1041:   struct rip_proto *p = (struct rip_proto *) P;
 1042: 
 1043:   if (p->rt_reload)
 1044:     return 1;
 1045: 
 1046:   TRACE(D_EVENTS, "Scheduling route reload");
 1047:   p->rt_reload = 1;
 1048:   rip_kick_timer(p);
 1049: 
 1050:   return 1;
 1051: }
 1052: 
 1053: static struct ea_list *
 1054: rip_make_tmp_attrs(struct rte *rt, struct linpool *pool)
 1055: {
 1056:   return rip_prepare_attrs(pool, NULL, rt->u.rip.metric, rt->u.rip.tag);
 1057: }
 1058: 
 1059: static void
 1060: rip_store_tmp_attrs(struct rte *rt, struct ea_list *attrs)
 1061: {
 1062:   rt->u.rip.metric = ea_get_int(attrs, EA_RIP_METRIC, 1);
 1063:   rt->u.rip.tag = ea_get_int(attrs, EA_RIP_TAG, 0);
 1064: }
 1065: 
 1066: static int
 1067: rip_rte_better(struct rte *new, struct rte *old)
 1068: {
 1069:   return new->u.rip.metric < old->u.rip.metric;
 1070: }
 1071: 
 1072: static int
 1073: rip_rte_same(struct rte *new, struct rte *old)
 1074: {
 1075:   return ((new->u.rip.metric == old->u.rip.metric) &&
 1076: 	  (new->u.rip.tag == old->u.rip.tag) &&
 1077: 	  (new->u.rip.from == old->u.rip.from));
 1078: }
 1079: 
 1080: 
 1081: static struct proto *
 1082: rip_init(struct proto_config *cfg)
 1083: {
 1084:   struct proto *P = proto_new(cfg, sizeof(struct rip_proto));
 1085: 
 1086:   P->accept_ra_types = RA_OPTIMAL;
 1087:   P->if_notify = rip_if_notify;
 1088:   P->rt_notify = rip_rt_notify;
 1089:   P->neigh_notify = rip_neigh_notify;
 1090:   P->import_control = rip_import_control;
 1091:   P->reload_routes = rip_reload_routes;
 1092:   P->make_tmp_attrs = rip_make_tmp_attrs;
 1093:   P->store_tmp_attrs = rip_store_tmp_attrs;
 1094:   P->rte_better = rip_rte_better;
 1095:   P->rte_same = rip_rte_same;
 1096: 
 1097:   return P;
 1098: }
 1099: 
 1100: static int
 1101: rip_start(struct proto *P)
 1102: {
 1103:   struct rip_proto *p = (void *) P;
 1104:   struct rip_config *cf = (void *) (P->cf);
 1105: 
 1106:   init_list(&p->iface_list);
 1107:   fib_init(&p->rtable, P->pool, sizeof(struct rip_entry), 0, rip_init_entry);
 1108:   p->rte_slab = sl_new(P->pool, sizeof(struct rip_rte));
 1109:   p->timer = tm_new_set(P->pool, rip_timer, p, 0, 0);
 1110: 
 1111:   p->ecmp = cf->ecmp;
 1112:   p->infinity = cf->infinity;
 1113:   p->triggered = 0;
 1114: 
 1115:   p->log_pkt_tbf = (struct tbf){ .rate = 1, .burst = 5 };
 1116:   p->log_rte_tbf = (struct tbf){ .rate = 4, .burst = 20 };
 1117: 
 1118:   tm_start(p->timer, MIN(cf->min_timeout_time, cf->max_garbage_time));
 1119: 
 1120:   return PS_UP;
 1121: }
 1122: 
 1123: static int
 1124: rip_reconfigure(struct proto *P, struct proto_config *c)
 1125: {
 1126:   struct rip_proto *p = (void *) P;
 1127:   struct rip_config *new = (void *) c;
 1128:   // struct rip_config *old = (void *) (P->cf);
 1129: 
 1130:   if (new->infinity != p->infinity)
 1131:     return 0;
 1132: 
 1133:   TRACE(D_EVENTS, "Reconfiguring");
 1134: 
 1135:   p->p.cf = c;
 1136:   p->ecmp = new->ecmp;
 1137:   rip_reconfigure_ifaces(p, new);
 1138: 
 1139:   p->rt_reload = 1;
 1140:   rip_kick_timer(p);
 1141: 
 1142:   return 1;
 1143: }
 1144: 
 1145: static void
 1146: rip_get_route_info(rte *rte, byte *buf, ea_list *attrs UNUSED)
 1147: {
 1148:   buf += bsprintf(buf, " (%d/%d)", rte->pref, rte->u.rip.metric);
 1149: 
 1150:   if (rte->u.rip.tag)
 1151:     bsprintf(buf, " [%04x]", rte->u.rip.tag);
 1152: }
 1153: 
 1154: static int
 1155: rip_get_attr(eattr *a, byte *buf, int buflen UNUSED)
 1156: {
 1157:   switch (a->id)
 1158:   {
 1159:   case EA_RIP_METRIC:
 1160:     bsprintf(buf, "metric: %d", a->u.data);
 1161:     return GA_FULL;
 1162: 
 1163:   case EA_RIP_TAG:
 1164:     bsprintf(buf, "tag: %04x", a->u.data);
 1165:     return GA_FULL;
 1166: 
 1167:   default:
 1168:     return GA_UNKNOWN;
 1169:   }
 1170: }
 1171: 
 1172: void
 1173: rip_show_interfaces(struct proto *P, char *iff)
 1174: {
 1175:   struct rip_proto *p = (void *) P;
 1176:   struct rip_iface *ifa = NULL;
 1177:   struct rip_neighbor *n = NULL;
 1178: 
 1179:   if (p->p.proto_state != PS_UP)
 1180:   {
 1181:     cli_msg(-1021, "%s: is not up", p->p.name);
 1182:     cli_msg(0, "");
 1183:     return;
 1184:   }
 1185: 
 1186:   cli_msg(-1021, "%s:", p->p.name);
 1187:   cli_msg(-1021, "%-10s %-6s %6s %6s %6s",
 1188: 	  "Interface", "State", "Metric", "Nbrs", "Timer");
 1189: 
 1190:   WALK_LIST(ifa, p->iface_list)
 1191:   {
 1192:     if (iff && !patmatch(iff, ifa->iface->name))
 1193:       continue;
 1194: 
 1195:     int nbrs = 0;
 1196:     WALK_LIST(n, ifa->neigh_list)
 1197:       if (n->last_seen)
 1198: 	nbrs++;
 1199: 
 1200:     int timer = MAX(ifa->next_regular - now, 0);
 1201:     cli_msg(-1021, "%-10s %-6s %6u %6u %6u",
 1202: 	    ifa->iface->name, (ifa->up ? "Up" : "Down"), ifa->cf->metric, nbrs, timer);
 1203:   }
 1204: 
 1205:   cli_msg(0, "");
 1206: }
 1207: 
 1208: void
 1209: rip_show_neighbors(struct proto *P, char *iff)
 1210: {
 1211:   struct rip_proto *p = (void *) P;
 1212:   struct rip_iface *ifa = NULL;
 1213:   struct rip_neighbor *n = NULL;
 1214: 
 1215:   if (p->p.proto_state != PS_UP)
 1216:   {
 1217:     cli_msg(-1022, "%s: is not up", p->p.name);
 1218:     cli_msg(0, "");
 1219:     return;
 1220:   }
 1221: 
 1222:   cli_msg(-1022, "%s:", p->p.name);
 1223:   cli_msg(-1022, "%-25s %-10s %6s %6s %6s",
 1224: 	  "IP address", "Interface", "Metric", "Routes", "Seen");
 1225: 
 1226:   WALK_LIST(ifa, p->iface_list)
 1227:   {
 1228:     if (iff && !patmatch(iff, ifa->iface->name))
 1229:       continue;
 1230: 
 1231:     WALK_LIST(n, ifa->neigh_list)
 1232:     {
 1233:       if (!n->last_seen)
 1234: 	continue;
 1235: 
 1236:       int timer = now - n->last_seen;
 1237:       cli_msg(-1022, "%-25I %-10s %6u %6u %6u",
 1238: 	      n->nbr->addr, ifa->iface->name, ifa->cf->metric, n->uc, timer);
 1239:     }
 1240:   }
 1241: 
 1242:   cli_msg(0, "");
 1243: }
 1244: 
 1245: static void
 1246: rip_dump(struct proto *P)
 1247: {
 1248:   struct rip_proto *p = (struct rip_proto *) P;
 1249:   struct rip_iface *ifa;
 1250:   int i;
 1251: 
 1252:   i = 0;
 1253:   FIB_WALK(&p->rtable, e)
 1254:   {
 1255:     struct rip_entry *en = (struct rip_entry *) e;
 1256:     debug("RIP: entry #%d: %I/%d via %I dev %s valid %d metric %d age %d s\n",
 1257: 	  i++, en->n.prefix, en->n.pxlen, en->next_hop, en->iface->name,
 1258: 	  en->valid, en->metric, now - en->changed);
 1259:   }
 1260:   FIB_WALK_END;
 1261: 
 1262:   i = 0;
 1263:   WALK_LIST(ifa, p->iface_list)
 1264:   {
 1265:     debug("RIP: interface #%d: %s, %I, up = %d, busy = %d\n",
 1266: 	  i++, ifa->iface->name, ifa->sk ? ifa->sk->daddr : IPA_NONE,
 1267: 	  ifa->up, ifa->tx_active);
 1268:   }
 1269: }
 1270: 
 1271: 
 1272: struct protocol proto_rip = {
 1273:   .name =		"RIP",
 1274:   .template =		"rip%d",
 1275:   .attr_class =		EAP_RIP,
 1276:   .preference =		DEF_PREF_RIP,
 1277:   .config_size =	sizeof(struct rip_config),
 1278:   .init =		rip_init,
 1279:   .dump =		rip_dump,
 1280:   .start =		rip_start,
 1281:   .reconfigure =	rip_reconfigure,
 1282:   .get_route_info =	rip_get_route_info,
 1283:   .get_attr =		rip_get_attr
 1284: };

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>