Annotation of embedaddon/bird2/proto/rip/rip.c, revision 1.1.1.1
1.1 misho 1: /*
2: * BIRD -- Routing Information Protocol (RIP)
3: *
4: * (c) 1998--1999 Pavel Machek <pavel@ucw.cz>
5: * (c) 2004--2013 Ondrej Filip <feela@network.cz>
6: * (c) 2009--2015 Ondrej Zajicek <santiago@crfreenet.org>
7: * (c) 2009--2015 CZ.NIC z.s.p.o.
8: *
9: * Can be freely distributed and used under the terms of the GNU GPL.
10: */
11:
12: /**
13: * DOC: Routing Information Protocol (RIP)
14: *
15: * The RIP protocol is implemented in two files: |rip.c| containing the protocol
16: * logic, route management and the protocol glue with BIRD core, and |packets.c|
17: * handling RIP packet processing, RX, TX and protocol sockets.
18: *
19: * Each instance of RIP is described by a structure &rip_proto, which contains
20: * an internal RIP routing table, a list of protocol interfaces and the main
21: * timer responsible for RIP routing table cleanup.
22: *
23: * RIP internal routing table contains incoming and outgoing routes. For each
24: * network (represented by structure &rip_entry) there is one outgoing route
25: * stored directly in &rip_entry and an one-way linked list of incoming routes
26: * (structures &rip_rte). The list contains incoming routes from different RIP
27: * neighbors, but only routes with the lowest metric are stored (i.e., all
28: * stored incoming routes have the same metric).
29: *
30: * Note that RIP itself does not select outgoing route, that is done by the core
31: * routing table. When a new incoming route is received, it is propagated to the
32: * RIP table by rip_update_rte() and possibly stored in the list of incoming
33: * routes. Then the change may be propagated to the core by rip_announce_rte().
34: * The core selects the best route and propagate it to RIP by rip_rt_notify(),
35: * which updates outgoing route part of &rip_entry and possibly triggers route
36: * propagation by rip_trigger_update().
37: *
38: * RIP interfaces are represented by structures &rip_iface. A RIP interface
39: * contains a per-interface socket, a list of associated neighbors, interface
40: * configuration, and state information related to scheduled interface events
41: * and running update sessions. RIP interfaces are added and removed based on
42: * core interface notifications.
43: *
44: * There are two RIP interface events - regular updates and triggered updates.
45: * Both are managed from the RIP interface timer (rip_iface_timer()). Regular
46: * updates are called at fixed interval and propagate the whole routing table,
47: * while triggered updates are scheduled by rip_trigger_update() due to some
48: * routing table change and propagate only the routes modified since the time
49: * they were scheduled. There are also unicast-destined requested updates, but
50: * these are sent directly as a reaction to received RIP request message. The
51: * update session is started by rip_send_table(). There may be at most one
52: * active update session per interface, as the associated state (including the
53: * fib iterator) is stored directly in &rip_iface structure.
54: *
55: * RIP neighbors are represented by structures &rip_neighbor. Compared to
56: * neighbor handling in other routing protocols, RIP does not have explicit
57: * neighbor discovery and adjacency maintenance, which makes the &rip_neighbor
58: * related code a bit peculiar. RIP neighbors are interlinked with core neighbor
59: * structures (&neighbor) and use core neighbor notifications to ensure that RIP
60: * neighbors are timely removed. RIP neighbors are added based on received route
61: * notifications and removed based on core neighbor and RIP interface events.
62: *
63: * RIP neighbors are linked by RIP routes and use counter to track the number of
64: * associated routes, but when these RIP routes timeout, associated RIP neighbor
65: * is still alive (with zero counter). When RIP neighbor is removed but still
66: * has some associated routes, it is not freed, just changed to detached state
67: * (core neighbors and RIP ifaces are unlinked), then during the main timer
68: * cleanup phase the associated routes are removed and the &rip_neighbor
69: * structure is finally freed.
70: *
71: * Supported standards:
72: * - RFC 1058 - RIPv1
73: * - RFC 2453 - RIPv2
74: * - RFC 2080 - RIPng
75: * - RFC 4822 - RIP cryptographic authentication
76: */
77:
78: #include <stdlib.h>
79: #include "rip.h"
80:
81:
82: static inline void rip_lock_neighbor(struct rip_neighbor *n);
83: static inline void rip_unlock_neighbor(struct rip_neighbor *n);
84: static inline int rip_iface_link_up(struct rip_iface *ifa);
85: static inline void rip_kick_timer(struct rip_proto *p);
86: static inline void rip_iface_kick_timer(struct rip_iface *ifa);
87: static void rip_iface_timer(timer *timer);
88: static void rip_trigger_update(struct rip_proto *p);
89:
90:
91: /*
92: * RIP routes
93: */
94:
95: static struct rip_rte *
96: rip_add_rte(struct rip_proto *p, struct rip_rte **rp, struct rip_rte *src)
97: {
98: struct rip_rte *rt = sl_alloc(p->rte_slab);
99:
100: memcpy(rt, src, sizeof(struct rip_rte));
101: rt->next = *rp;
102: *rp = rt;
103:
104: rip_lock_neighbor(rt->from);
105:
106: return rt;
107: }
108:
109: static inline void
110: rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
111: {
112: struct rip_rte *rt = *rp;
113:
114: rip_unlock_neighbor(rt->from);
115:
116: *rp = rt->next;
117: sl_free(p->rte_slab, rt);
118: }
119:
120: static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)
121: { return a->metric == b->metric && a->tag == b->tag && ipa_equal(a->next_hop, b->next_hop); }
122:
123: static inline int rip_valid_rte(struct rip_rte *rt)
124: { return rt->from->ifa != NULL; }
125:
126: /**
127: * rip_announce_rte - announce route from RIP routing table to the core
128: * @p: RIP instance
129: * @en: related network
130: *
131: * The function takes a list of incoming routes from @en, prepare appropriate
132: * &rte for the core and propagate it by rte_update().
133: */
134: static void
135: rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
136: {
137: struct rip_rte *rt = en->routes;
138:
139: /* Find first valid rte */
140: while (rt && !rip_valid_rte(rt))
141: rt = rt->next;
142:
143: if (rt)
144: {
145: /* Update */
146: rta a0 = {
147: .src = p->p.main_source,
148: .source = RTS_RIP,
149: .scope = SCOPE_UNIVERSE,
150: .dest = RTD_UNICAST,
151: };
152:
153: u8 rt_metric = rt->metric;
154: u16 rt_tag = rt->tag;
155:
156: if (p->ecmp)
157: {
158: /* ECMP route */
159: struct nexthop *nhs = NULL;
160: int num = 0;
161:
162: for (rt = en->routes; rt && (num < p->ecmp); rt = rt->next)
163: {
164: if (!rip_valid_rte(rt))
165: continue;
166:
167: struct nexthop *nh = allocz(sizeof(struct nexthop));
168:
169: nh->gw = rt->next_hop;
170: nh->iface = rt->from->nbr->iface;
171: nh->weight = rt->from->ifa->cf->ecmp_weight;
172:
173: nexthop_insert(&nhs, nh);
174: num++;
175:
176: if (rt->tag != rt_tag)
177: rt_tag = 0;
178: }
179:
180: a0.nh = *nhs;
181: }
182: else
183: {
184: /* Unipath route */
185: a0.from = rt->from->nbr->addr;
186: a0.nh.gw = rt->next_hop;
187: a0.nh.iface = rt->from->nbr->iface;
188: }
189:
190: rta *a = rta_lookup(&a0);
191: rte *e = rte_get_temp(a);
192:
193: e->u.rip.from = a0.nh.iface;
194: e->u.rip.metric = rt_metric;
195: e->u.rip.tag = rt_tag;
196: e->pflags = EA_ID_FLAG(EA_RIP_METRIC) | EA_ID_FLAG(EA_RIP_TAG);
197:
198: rte_update(&p->p, en->n.addr, e);
199: }
200: else
201: {
202: /* Withdraw */
203: rte_update(&p->p, en->n.addr, NULL);
204: }
205: }
206:
207: /**
208: * rip_update_rte - enter a route update to RIP routing table
209: * @p: RIP instance
210: * @addr: network address
211: * @new: a &rip_rte representing the new route
212: *
213: * The function is called by the RIP packet processing code whenever it receives
214: * a reachable route. The appropriate routing table entry is found and the list
215: * of incoming routes is updated. Eventually, the change is also propagated to
216: * the core by rip_announce_rte(). Note that for unreachable routes,
217: * rip_withdraw_rte() should be called instead of rip_update_rte().
218: */
219: void
220: rip_update_rte(struct rip_proto *p, net_addr *n, struct rip_rte *new)
221: {
222: struct rip_entry *en = fib_get(&p->rtable, n);
223: struct rip_rte *rt, **rp;
224: int changed = 0;
225:
226: /* If the new route is better, remove all current routes */
227: if (en->routes && new->metric < en->routes->metric)
228: while (en->routes)
229: rip_remove_rte(p, &en->routes);
230:
231: /* Find the old route (also set rp for later) */
232: for (rp = &en->routes; rt = *rp; rp = &rt->next)
233: if (rt->from == new->from)
234: {
235: if (rip_same_rte(rt, new))
236: {
237: rt->expires = new->expires;
238: return;
239: }
240:
241: /* Remove the old route */
242: rip_remove_rte(p, rp);
243: changed = 1;
244: break;
245: }
246:
247: /* If the new route is optimal, add it to the list */
248: if (!en->routes || new->metric == en->routes->metric)
249: {
250: rt = rip_add_rte(p, rp, new);
251: changed = 1;
252: }
253:
254: /* Announce change if on relevant position (the first or any for ECMP) */
255: if (changed && (rp == &en->routes || p->ecmp))
256: rip_announce_rte(p, en);
257: }
258:
259: /**
260: * rip_withdraw_rte - enter a route withdraw to RIP routing table
261: * @p: RIP instance
262: * @addr: network address
263: * @from: a &rip_neighbor propagating the withdraw
264: *
265: * The function is called by the RIP packet processing code whenever it receives
266: * an unreachable route. The incoming route for given network from nbr @from is
267: * removed. Eventually, the change is also propagated by rip_announce_rte().
268: */
269: void
270: rip_withdraw_rte(struct rip_proto *p, net_addr *n, struct rip_neighbor *from)
271: {
272: struct rip_entry *en = fib_find(&p->rtable, n);
273: struct rip_rte *rt, **rp;
274:
275: if (!en)
276: return;
277:
278: /* Find the old route */
279: for (rp = &en->routes; rt = *rp; rp = &rt->next)
280: if (rt->from == from)
281: break;
282:
283: if (!rt)
284: return;
285:
286: /* Remove the old route */
287: rip_remove_rte(p, rp);
288:
289: /* Announce change if on relevant position */
290: if (rp == &en->routes || p->ecmp)
291: rip_announce_rte(p, en);
292: }
293:
294: /*
295: * rip_rt_notify - core tells us about new route, so store
296: * it into our data structures.
297: */
298: static void
299: rip_rt_notify(struct proto *P, struct channel *ch UNUSED, struct network *net, struct rte *new,
300: struct rte *old UNUSED)
301: {
302: struct rip_proto *p = (struct rip_proto *) P;
303: struct rip_entry *en;
304: int old_metric;
305:
306: if (new)
307: {
308: /* Update */
309: u32 rt_metric = ea_get_int(new->attrs->eattrs, EA_RIP_METRIC, 1);
310: u32 rt_tag = ea_get_int(new->attrs->eattrs, EA_RIP_TAG, 0);
311:
312: if (rt_metric > p->infinity)
313: {
314: log(L_WARN "%s: Invalid rip_metric value %u for route %N",
315: p->p.name, rt_metric, net->n.addr);
316: rt_metric = p->infinity;
317: }
318:
319: if (rt_tag > 0xffff)
320: {
321: log(L_WARN "%s: Invalid rip_tag value %u for route %N",
322: p->p.name, rt_tag, net->n.addr);
323: rt_metric = p->infinity;
324: rt_tag = 0;
325: }
326:
327: /*
328: * Note that we accept exported routes with infinity metric (this could
329: * happen if rip_metric is modified in filters). Such entry has infinity
330: * metric but is RIP_ENTRY_VALID and therefore is not subject to garbage
331: * collection.
332: */
333:
334: en = fib_get(&p->rtable, net->n.addr);
335:
336: old_metric = en->valid ? en->metric : -1;
337:
338: en->valid = RIP_ENTRY_VALID;
339: en->metric = rt_metric;
340: en->tag = rt_tag;
341: en->from = (new->attrs->src->proto == P) ? new->u.rip.from : NULL;
342: en->iface = new->attrs->nh.iface;
343: en->next_hop = new->attrs->nh.gw;
344: }
345: else
346: {
347: /* Withdraw */
348: en = fib_find(&p->rtable, net->n.addr);
349:
350: if (!en || en->valid != RIP_ENTRY_VALID)
351: return;
352:
353: old_metric = en->metric;
354:
355: en->valid = RIP_ENTRY_STALE;
356: en->metric = p->infinity;
357: en->tag = 0;
358: en->from = NULL;
359: en->iface = NULL;
360: en->next_hop = IPA_NONE;
361: }
362:
363: /* Activate triggered updates */
364: if (en->metric != old_metric)
365: {
366: en->changed = current_time();
367: rip_trigger_update(p);
368: }
369: }
370:
371:
372: /*
373: * RIP neighbors
374: */
375:
376: struct rip_neighbor *
377: rip_get_neighbor(struct rip_proto *p, ip_addr *a, struct rip_iface *ifa)
378: {
379: neighbor *nbr = neigh_find(&p->p, *a, ifa->iface, 0);
380:
381: if (!nbr || (nbr->scope == SCOPE_HOST) || !rip_iface_link_up(ifa))
382: return NULL;
383:
384: if (nbr->data)
385: return nbr->data;
386:
387: TRACE(D_EVENTS, "New neighbor %I on %s", *a, ifa->iface->name);
388:
389: struct rip_neighbor *n = mb_allocz(p->p.pool, sizeof(struct rip_neighbor));
390: n->ifa = ifa;
391: n->nbr = nbr;
392: nbr->data = n;
393: n->csn = nbr->aux;
394:
395: add_tail(&ifa->neigh_list, NODE n);
396:
397: return n;
398: }
399:
400: static void
401: rip_remove_neighbor(struct rip_proto *p, struct rip_neighbor *n)
402: {
403: neighbor *nbr = n->nbr;
404:
405: TRACE(D_EVENTS, "Removing neighbor %I on %s", nbr->addr, nbr->iface->name);
406:
407: rem_node(NODE n);
408: n->ifa = NULL;
409: n->nbr = NULL;
410: nbr->data = NULL;
411: nbr->aux = n->csn;
412:
413: rfree(n->bfd_req);
414: n->bfd_req = NULL;
415: n->last_seen = 0;
416:
417: if (!n->uc)
418: mb_free(n);
419:
420: /* Related routes are removed in rip_timer() */
421: rip_kick_timer(p);
422: }
423:
424: static inline void
425: rip_lock_neighbor(struct rip_neighbor *n)
426: {
427: n->uc++;
428: }
429:
430: static inline void
431: rip_unlock_neighbor(struct rip_neighbor *n)
432: {
433: n->uc--;
434:
435: if (!n->nbr && !n->uc)
436: mb_free(n);
437: }
438:
439: static void
440: rip_neigh_notify(struct neighbor *nbr)
441: {
442: struct rip_proto *p = (struct rip_proto *) nbr->proto;
443: struct rip_neighbor *n = nbr->data;
444:
445: if (!n)
446: return;
447:
448: /*
449: * We assume that rip_neigh_notify() is called before rip_if_notify() for
450: * IF_CHANGE_DOWN and therefore n->ifa is still valid. We have no such
451: * ordering assumption for IF_CHANGE_LINK, so we test link state of the
452: * underlying iface instead of just rip_iface state.
453: */
454: if ((nbr->scope <= 0) || !rip_iface_link_up(n->ifa))
455: rip_remove_neighbor(p, n);
456: }
457:
458: static void
459: rip_bfd_notify(struct bfd_request *req)
460: {
461: struct rip_neighbor *n = req->data;
462: struct rip_proto *p = n->ifa->rip;
463:
464: if (req->down)
465: {
466: TRACE(D_EVENTS, "BFD session down for nbr %I on %s",
467: n->nbr->addr, n->ifa->iface->name);
468: rip_remove_neighbor(p, n);
469: }
470: }
471:
472: void
473: rip_update_bfd(struct rip_proto *p, struct rip_neighbor *n)
474: {
475: int use_bfd = n->ifa->cf->bfd && n->last_seen;
476:
477: if (use_bfd && !n->bfd_req)
478: {
479: /*
480: * For RIPv2, use the same address as rip_open_socket(). For RIPng, neighbor
481: * should contain an address from the same prefix, thus also link-local. It
482: * may cause problems if two link-local addresses are assigned to one iface.
483: */
484: ip_addr saddr = rip_is_v2(p) ? n->ifa->sk->saddr : n->nbr->ifa->ip;
485: n->bfd_req = bfd_request_session(p->p.pool, n->nbr->addr, saddr,
486: n->nbr->iface, p->p.vrf,
487: rip_bfd_notify, n);
488: }
489:
490: if (!use_bfd && n->bfd_req)
491: {
492: rfree(n->bfd_req);
493: n->bfd_req = NULL;
494: }
495: }
496:
497:
498: /*
499: * RIP interfaces
500: */
501:
502: static void
503: rip_iface_start(struct rip_iface *ifa)
504: {
505: struct rip_proto *p = ifa->rip;
506:
507: TRACE(D_EVENTS, "Starting interface %s", ifa->iface->name);
508:
509: ifa->next_regular = current_time() + (random() % ifa->cf->update_time) + 100 MS;
510: ifa->next_triggered = current_time(); /* Available immediately */
511: ifa->want_triggered = 1; /* All routes in triggered update */
512: tm_start(ifa->timer, 100 MS);
513: ifa->up = 1;
514:
515: if (!ifa->cf->passive)
516: rip_send_request(ifa->rip, ifa);
517: }
518:
519: static void
520: rip_iface_stop(struct rip_iface *ifa)
521: {
522: struct rip_proto *p = ifa->rip;
523: struct rip_neighbor *n;
524:
525: TRACE(D_EVENTS, "Stopping interface %s", ifa->iface->name);
526:
527: rip_reset_tx_session(p, ifa);
528:
529: WALK_LIST_FIRST(n, ifa->neigh_list)
530: rip_remove_neighbor(p, n);
531:
532: tm_stop(ifa->timer);
533: ifa->up = 0;
534: }
535:
536: static inline int
537: rip_iface_link_up(struct rip_iface *ifa)
538: {
539: return !ifa->cf->check_link || (ifa->iface->flags & IF_LINK_UP);
540: }
541:
542: static void
543: rip_iface_update_state(struct rip_iface *ifa)
544: {
545: int up = ifa->sk && rip_iface_link_up(ifa);
546:
547: if (up == ifa->up)
548: return;
549:
550: if (up)
551: rip_iface_start(ifa);
552: else
553: rip_iface_stop(ifa);
554: }
555:
556: static void
557: rip_iface_update_buffers(struct rip_iface *ifa)
558: {
559: if (!ifa->sk)
560: return;
561:
562: uint rbsize = ifa->cf->rx_buffer ?: ifa->iface->mtu;
563: uint tbsize = ifa->cf->tx_length ?: ifa->iface->mtu;
564: rbsize = MAX(rbsize, tbsize);
565:
566: sk_set_rbsize(ifa->sk, rbsize);
567: sk_set_tbsize(ifa->sk, tbsize);
568:
569: uint headers = (rip_is_v2(ifa->rip) ? IP4_HEADER_LENGTH : IP6_HEADER_LENGTH) + UDP_HEADER_LENGTH;
570: ifa->tx_plen = tbsize - headers;
571:
572: if (ifa->cf->auth_type == RIP_AUTH_CRYPTO)
573: ifa->tx_plen -= RIP_AUTH_TAIL_LENGTH + max_mac_length(ifa->cf->passwords);
574: }
575:
576: static inline void
577: rip_iface_update_bfd(struct rip_iface *ifa)
578: {
579: struct rip_proto *p = ifa->rip;
580: struct rip_neighbor *n;
581:
582: WALK_LIST(n, ifa->neigh_list)
583: rip_update_bfd(p, n);
584: }
585:
586:
587: static void
588: rip_iface_locked(struct object_lock *lock)
589: {
590: struct rip_iface *ifa = lock->data;
591: struct rip_proto *p = ifa->rip;
592:
593: if (!rip_open_socket(ifa))
594: {
595: log(L_ERR "%s: Cannot open socket for %s", p->p.name, ifa->iface->name);
596: return;
597: }
598:
599: rip_iface_update_buffers(ifa);
600: rip_iface_update_state(ifa);
601: }
602:
603:
604: static struct rip_iface *
605: rip_find_iface(struct rip_proto *p, struct iface *what)
606: {
607: struct rip_iface *ifa;
608:
609: WALK_LIST(ifa, p->iface_list)
610: if (ifa->iface == what)
611: return ifa;
612:
613: return NULL;
614: }
615:
616: static void
617: rip_add_iface(struct rip_proto *p, struct iface *iface, struct rip_iface_config *ic)
618: {
619: struct rip_iface *ifa;
620:
621: TRACE(D_EVENTS, "Adding interface %s", iface->name);
622:
623: ifa = mb_allocz(p->p.pool, sizeof(struct rip_iface));
624: ifa->rip = p;
625: ifa->iface = iface;
626: ifa->cf = ic;
627:
628: if (ipa_nonzero(ic->address))
629: ifa->addr = ic->address;
630: else if (ic->mode == RIP_IM_MULTICAST)
631: ifa->addr = rip_is_v2(p) ? IP4_RIP_ROUTERS : IP6_RIP_ROUTERS;
632: else /* Broadcast */
633: ifa->addr = iface->addr4->brd;
634: /*
635: * The above is just a workaround for BSD as it can't send broadcasts
636: * to 255.255.255.255. BSD systems need the network broadcast address instead.
637: *
638: * TODO: move this to sysdep code
639: */
640:
641: init_list(&ifa->neigh_list);
642:
643: add_tail(&p->iface_list, NODE ifa);
644:
645: ifa->timer = tm_new_init(p->p.pool, rip_iface_timer, ifa, 0, 0);
646:
647: struct object_lock *lock = olock_new(p->p.pool);
648: lock->type = OBJLOCK_UDP;
649: lock->port = ic->port;
650: lock->iface = iface;
651: lock->data = ifa;
652: lock->hook = rip_iface_locked;
653: ifa->lock = lock;
654:
655: olock_acquire(lock);
656: }
657:
658: static void
659: rip_remove_iface(struct rip_proto *p, struct rip_iface *ifa)
660: {
661: rip_iface_stop(ifa);
662:
663: TRACE(D_EVENTS, "Removing interface %s", ifa->iface->name);
664:
665: rem_node(NODE ifa);
666:
667: rfree(ifa->sk);
668: rfree(ifa->lock);
669: rfree(ifa->timer);
670:
671: mb_free(ifa);
672: }
673:
674: static int
675: rip_reconfigure_iface(struct rip_proto *p, struct rip_iface *ifa, struct rip_iface_config *new)
676: {
677: struct rip_iface_config *old = ifa->cf;
678:
679: /* Change of these options would require to reset the iface socket */
680: if ((new->mode != old->mode) ||
681: (new->port != old->port) ||
682: (new->tx_tos != old->tx_tos) ||
683: (new->tx_priority != old->tx_priority) ||
684: (new->ttl_security != old->ttl_security))
685: return 0;
686:
687: TRACE(D_EVENTS, "Reconfiguring interface %s", ifa->iface->name);
688:
689: ifa->cf = new;
690:
691: rip_iface_update_buffers(ifa);
692:
693: if (ifa->next_regular > (current_time() + new->update_time))
694: ifa->next_regular = current_time() + (random() % new->update_time) + 100 MS;
695:
696: if (new->check_link != old->check_link)
697: rip_iface_update_state(ifa);
698:
699: if (new->bfd != old->bfd)
700: rip_iface_update_bfd(ifa);
701:
702: if (ifa->up)
703: rip_iface_kick_timer(ifa);
704:
705: return 1;
706: }
707:
708: static void
709: rip_reconfigure_ifaces(struct rip_proto *p, struct rip_config *cf)
710: {
711: struct iface *iface;
712:
713: WALK_LIST(iface, iface_list)
714: {
715: if (!(iface->flags & IF_UP))
716: continue;
717:
718: /* Ignore ifaces without appropriate address */
719: if (rip_is_v2(p) ? !iface->addr4 : !iface->llv6)
720: continue;
721:
722: struct rip_iface *ifa = rip_find_iface(p, iface);
723: struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
724:
725: if (ifa && ic)
726: {
727: if (rip_reconfigure_iface(p, ifa, ic))
728: continue;
729:
730: /* Hard restart */
731: log(L_INFO "%s: Restarting interface %s", p->p.name, ifa->iface->name);
732: rip_remove_iface(p, ifa);
733: rip_add_iface(p, iface, ic);
734: }
735:
736: if (ifa && !ic)
737: rip_remove_iface(p, ifa);
738:
739: if (!ifa && ic)
740: rip_add_iface(p, iface, ic);
741: }
742: }
743:
744: static void
745: rip_if_notify(struct proto *P, unsigned flags, struct iface *iface)
746: {
747: struct rip_proto *p = (void *) P;
748: struct rip_config *cf = (void *) P->cf;
749: struct rip_iface *ifa = rip_find_iface(p, iface);
750:
751: if (iface->flags & IF_IGNORE)
752: return;
753:
754: /* Add, remove or restart interface */
755: if (flags & (IF_CHANGE_UPDOWN | (rip_is_v2(p) ? IF_CHANGE_ADDR4 : IF_CHANGE_LLV6)))
756: {
757: if (ifa)
758: rip_remove_iface(p, ifa);
759:
760: if (!(iface->flags & IF_UP))
761: return;
762:
763: /* Ignore ifaces without appropriate address */
764: if (rip_is_v2(p) ? !iface->addr4 : !iface->llv6)
765: return;
766:
767: struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
768: if (ic)
769: rip_add_iface(p, iface, ic);
770:
771: return;
772: }
773:
774: if (!ifa)
775: return;
776:
777: if (flags & IF_CHANGE_MTU)
778: rip_iface_update_buffers(ifa);
779:
780: if (flags & IF_CHANGE_LINK)
781: rip_iface_update_state(ifa);
782: }
783:
784:
785: /*
786: * RIP timer events
787: */
788:
789: /**
790: * rip_timer - RIP main timer hook
791: * @t: timer
792: *
793: * The RIP main timer is responsible for routing table maintenance. Invalid or
794: * expired routes (&rip_rte) are removed and garbage collection of stale routing
795: * table entries (&rip_entry) is done. Changes are propagated to core tables,
796: * route reload is also done here. Note that garbage collection uses a maximal
797: * GC time, while interfaces maintain an illusion of per-interface GC times in
798: * rip_send_response().
799: *
800: * Keeping incoming routes and the selected outgoing route are two independent
801: * functions, therefore after garbage collection some entries now considered
802: * invalid (RIP_ENTRY_DUMMY) still may have non-empty list of incoming routes,
803: * while some valid entries (representing an outgoing route) may have that list
804: * empty.
805: *
806: * The main timer is not scheduled periodically but it uses the time of the
807: * current next event and the minimal interval of any possible event to compute
808: * the time of the next run.
809: */
810: static void
811: rip_timer(timer *t)
812: {
813: struct rip_proto *p = t->data;
814: struct rip_config *cf = (void *) (p->p.cf);
815: struct rip_iface *ifa;
816: struct rip_neighbor *n, *nn;
817: struct fib_iterator fit;
818: btime now_ = current_time();
819: btime next = now_ + MIN(cf->min_timeout_time, cf->max_garbage_time);
820: btime expires = 0;
821:
822: TRACE(D_EVENTS, "Main timer fired");
823:
824: FIB_ITERATE_INIT(&fit, &p->rtable);
825:
826: loop:
827: FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
828: {
829: struct rip_rte *rt, **rp;
830: int changed = 0;
831:
832: /* Checking received routes for timeout and for dead neighbors */
833: for (rp = &en->routes; rt = *rp; /* rp = &rt->next */)
834: {
835: if (!rip_valid_rte(rt) || (rt->expires <= now_))
836: {
837: rip_remove_rte(p, rp);
838: changed = 1;
839: continue;
840: }
841:
842: next = MIN(next, rt->expires);
843: rp = &rt->next;
844: }
845:
846: /* Propagating eventual change */
847: if (changed || p->rt_reload)
848: {
849: /*
850: * We have to restart the iteration because there may be a cascade of
851: * synchronous events rip_announce_rte() -> nest table change ->
852: * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
853: */
854:
855: FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
856: rip_announce_rte(p, en);
857: goto loop;
858: }
859:
860: /* Checking stale entries for garbage collection timeout */
861: if (en->valid == RIP_ENTRY_STALE)
862: {
863: expires = en->changed + cf->max_garbage_time;
864:
865: if (expires <= now_)
866: {
867: // TRACE(D_EVENTS, "entry is too old: %N", en->n.addr);
868: en->valid = 0;
869: }
870: else
871: next = MIN(next, expires);
872: }
873:
874: /* Remove empty nodes */
875: if (!en->valid && !en->routes)
876: {
877: FIB_ITERATE_PUT(&fit);
878: fib_delete(&p->rtable, en);
879: goto loop;
880: }
881: }
882: FIB_ITERATE_END;
883:
884: p->rt_reload = 0;
885:
886: /* Handling neighbor expiration */
887: WALK_LIST(ifa, p->iface_list)
888: WALK_LIST_DELSAFE(n, nn, ifa->neigh_list)
889: if (n->last_seen)
890: {
891: expires = n->last_seen + n->ifa->cf->timeout_time;
892:
893: if (expires <= now_)
894: rip_remove_neighbor(p, n);
895: else
896: next = MIN(next, expires);
897: }
898:
899: tm_start(p->timer, MAX(next - now_, 100 MS));
900: }
901:
902: static inline void
903: rip_kick_timer(struct rip_proto *p)
904: {
905: if (p->timer->expires > (current_time() + 100 MS))
906: tm_start(p->timer, 100 MS);
907: }
908:
909: /**
910: * rip_iface_timer - RIP interface timer hook
911: * @t: timer
912: *
913: * RIP interface timers are responsible for scheduling both regular and
914: * triggered updates. Fixed, delay-independent period is used for regular
915: * updates, while minimal separating interval is enforced for triggered updates.
916: * The function also ensures that a new update is not started when the old one
917: * is still running.
918: */
919: static void
920: rip_iface_timer(timer *t)
921: {
922: struct rip_iface *ifa = t->data;
923: struct rip_proto *p = ifa->rip;
924: btime now_ = current_time();
925: btime period = ifa->cf->update_time;
926:
927: if (ifa->cf->passive)
928: return;
929:
930: TRACE(D_EVENTS, "Interface timer fired for %s", ifa->iface->name);
931:
932: if (ifa->tx_active)
933: {
934: if (now_ < (ifa->next_regular + period))
935: { tm_start(ifa->timer, 100 MS); return; }
936:
937: /* We are too late, reset is done by rip_send_table() */
938: log(L_WARN "%s: Too slow update on %s, resetting", p->p.name, ifa->iface->name);
939: }
940:
941: if (now_ >= ifa->next_regular)
942: {
943: /* Send regular update, set timer for next period (or following one if necessay) */
944: TRACE(D_EVENTS, "Sending regular updates for %s", ifa->iface->name);
945: rip_send_table(p, ifa, ifa->addr, 0);
946: ifa->next_regular += period * (1 + ((now_ - ifa->next_regular) / period));
947: ifa->want_triggered = 0;
948: p->triggered = 0;
949: }
950: else if (ifa->want_triggered && (now_ >= ifa->next_triggered))
951: {
952: /* Send triggered update, enforce interval between triggered updates */
953: TRACE(D_EVENTS, "Sending triggered updates for %s", ifa->iface->name);
954: rip_send_table(p, ifa, ifa->addr, ifa->want_triggered);
955: ifa->next_triggered = now_ + MIN(5 S, period / 2);
956: ifa->want_triggered = 0;
957: p->triggered = 0;
958: }
959:
960: tm_start(ifa->timer, ifa->want_triggered ? (1 S) : (ifa->next_regular - now_));
961: }
962:
963: static inline void
964: rip_iface_kick_timer(struct rip_iface *ifa)
965: {
966: if (ifa->timer->expires > (current_time() + 100 MS))
967: tm_start(ifa->timer, 100 MS);
968: }
969:
970: static void
971: rip_trigger_update(struct rip_proto *p)
972: {
973: if (p->triggered)
974: return;
975:
976: struct rip_iface *ifa;
977: WALK_LIST(ifa, p->iface_list)
978: {
979: /* Interface not active */
980: if (! ifa->up)
981: continue;
982:
983: /* Already scheduled */
984: if (ifa->want_triggered)
985: continue;
986:
987: TRACE(D_EVENTS, "Scheduling triggered updates for %s", ifa->iface->name);
988: ifa->want_triggered = current_time();
989: rip_iface_kick_timer(ifa);
990: }
991:
992: p->triggered = 1;
993: }
994:
995:
996: /*
997: * RIP protocol glue
998: */
999:
1000: static void
1001: rip_reload_routes(struct channel *C)
1002: {
1003: struct rip_proto *p = (struct rip_proto *) C->proto;
1004:
1005: if (p->rt_reload)
1006: return;
1007:
1008: TRACE(D_EVENTS, "Scheduling route reload");
1009: p->rt_reload = 1;
1010: rip_kick_timer(p);
1011: }
1012:
1013: static void
1014: rip_make_tmp_attrs(struct rte *rt, struct linpool *pool)
1015: {
1016: rte_init_tmp_attrs(rt, pool, 2);
1017: rte_make_tmp_attr(rt, EA_RIP_METRIC, EAF_TYPE_INT, rt->u.rip.metric);
1018: rte_make_tmp_attr(rt, EA_RIP_TAG, EAF_TYPE_INT, rt->u.rip.tag);
1019: }
1020:
1021: static void
1022: rip_store_tmp_attrs(struct rte *rt, struct linpool *pool)
1023: {
1024: rte_init_tmp_attrs(rt, pool, 2);
1025: rt->u.rip.metric = rte_store_tmp_attr(rt, EA_RIP_METRIC);
1026: rt->u.rip.tag = rte_store_tmp_attr(rt, EA_RIP_TAG);
1027: }
1028:
1029: static int
1030: rip_rte_better(struct rte *new, struct rte *old)
1031: {
1032: return new->u.rip.metric < old->u.rip.metric;
1033: }
1034:
1035: static int
1036: rip_rte_same(struct rte *new, struct rte *old)
1037: {
1038: return ((new->u.rip.metric == old->u.rip.metric) &&
1039: (new->u.rip.tag == old->u.rip.tag) &&
1040: (new->u.rip.from == old->u.rip.from));
1041: }
1042:
1043:
1044: static void
1045: rip_postconfig(struct proto_config *CF)
1046: {
1047: // struct rip_config *cf = (void *) CF;
1048:
1049: /* Define default channel */
1050: if (EMPTY_LIST(CF->channels))
1051: channel_config_new(NULL, net_label[CF->net_type], CF->net_type, CF);
1052: }
1053:
1054: static struct proto *
1055: rip_init(struct proto_config *CF)
1056: {
1057: struct proto *P = proto_new(CF);
1058:
1059: P->main_channel = proto_add_channel(P, proto_cf_main_channel(CF));
1060:
1061: P->if_notify = rip_if_notify;
1062: P->rt_notify = rip_rt_notify;
1063: P->neigh_notify = rip_neigh_notify;
1064: P->reload_routes = rip_reload_routes;
1065: P->make_tmp_attrs = rip_make_tmp_attrs;
1066: P->store_tmp_attrs = rip_store_tmp_attrs;
1067: P->rte_better = rip_rte_better;
1068: P->rte_same = rip_rte_same;
1069:
1070: return P;
1071: }
1072:
1073: static int
1074: rip_start(struct proto *P)
1075: {
1076: struct rip_proto *p = (void *) P;
1077: struct rip_config *cf = (void *) (P->cf);
1078:
1079: init_list(&p->iface_list);
1080: fib_init(&p->rtable, P->pool, cf->rip2 ? NET_IP4 : NET_IP6,
1081: sizeof(struct rip_entry), OFFSETOF(struct rip_entry, n), 0, NULL);
1082: p->rte_slab = sl_new(P->pool, sizeof(struct rip_rte));
1083: p->timer = tm_new_init(P->pool, rip_timer, p, 0, 0);
1084:
1085: p->rip2 = cf->rip2;
1086: p->ecmp = cf->ecmp;
1087: p->infinity = cf->infinity;
1088: p->triggered = 0;
1089:
1090: p->log_pkt_tbf = (struct tbf){ .rate = 1, .burst = 5 };
1091: p->log_rte_tbf = (struct tbf){ .rate = 4, .burst = 20 };
1092:
1093: tm_start(p->timer, MIN(cf->min_timeout_time, cf->max_garbage_time));
1094:
1095: return PS_UP;
1096: }
1097:
1098: static int
1099: rip_reconfigure(struct proto *P, struct proto_config *CF)
1100: {
1101: struct rip_proto *p = (void *) P;
1102: struct rip_config *new = (void *) CF;
1103: // struct rip_config *old = (void *) (P->cf);
1104:
1105: if (new->rip2 != p->rip2)
1106: return 0;
1107:
1108: if (new->infinity != p->infinity)
1109: return 0;
1110:
1111: if (!proto_configure_channel(P, &P->main_channel, proto_cf_main_channel(CF)))
1112: return 0;
1113:
1114: TRACE(D_EVENTS, "Reconfiguring");
1115:
1116: p->p.cf = CF;
1117: p->ecmp = new->ecmp;
1118: rip_reconfigure_ifaces(p, new);
1119:
1120: p->rt_reload = 1;
1121: rip_kick_timer(p);
1122:
1123: return 1;
1124: }
1125:
1126: static void
1127: rip_get_route_info(rte *rte, byte *buf)
1128: {
1129: buf += bsprintf(buf, " (%d/%d)", rte->pref, rte->u.rip.metric);
1130:
1131: if (rte->u.rip.tag)
1132: bsprintf(buf, " [%04x]", rte->u.rip.tag);
1133: }
1134:
1135: static int
1136: rip_get_attr(eattr *a, byte *buf, int buflen UNUSED)
1137: {
1138: switch (a->id)
1139: {
1140: case EA_RIP_METRIC:
1141: bsprintf(buf, "metric: %d", a->u.data);
1142: return GA_FULL;
1143:
1144: case EA_RIP_TAG:
1145: bsprintf(buf, "tag: %04x", a->u.data);
1146: return GA_FULL;
1147:
1148: default:
1149: return GA_UNKNOWN;
1150: }
1151: }
1152:
1153: void
1154: rip_show_interfaces(struct proto *P, char *iff)
1155: {
1156: struct rip_proto *p = (void *) P;
1157: struct rip_iface *ifa = NULL;
1158: struct rip_neighbor *n = NULL;
1159:
1160: if (p->p.proto_state != PS_UP)
1161: {
1162: cli_msg(-1021, "%s: is not up", p->p.name);
1163: cli_msg(0, "");
1164: return;
1165: }
1166:
1167: cli_msg(-1021, "%s:", p->p.name);
1168: cli_msg(-1021, "%-10s %-6s %6s %6s %7s",
1169: "Interface", "State", "Metric", "Nbrs", "Timer");
1170:
1171: WALK_LIST(ifa, p->iface_list)
1172: {
1173: if (iff && !patmatch(iff, ifa->iface->name))
1174: continue;
1175:
1176: int nbrs = 0;
1177: WALK_LIST(n, ifa->neigh_list)
1178: if (n->last_seen)
1179: nbrs++;
1180:
1181: btime now_ = current_time();
1182: btime timer = (ifa->next_regular > now_) ? (ifa->next_regular - now_) : 0;
1183: cli_msg(-1021, "%-10s %-6s %6u %6u %7t",
1184: ifa->iface->name, (ifa->up ? "Up" : "Down"), ifa->cf->metric, nbrs, timer);
1185: }
1186:
1187: cli_msg(0, "");
1188: }
1189:
1190: void
1191: rip_show_neighbors(struct proto *P, char *iff)
1192: {
1193: struct rip_proto *p = (void *) P;
1194: struct rip_iface *ifa = NULL;
1195: struct rip_neighbor *n = NULL;
1196:
1197: if (p->p.proto_state != PS_UP)
1198: {
1199: cli_msg(-1022, "%s: is not up", p->p.name);
1200: cli_msg(0, "");
1201: return;
1202: }
1203:
1204: cli_msg(-1022, "%s:", p->p.name);
1205: cli_msg(-1022, "%-25s %-10s %6s %6s %7s",
1206: "IP address", "Interface", "Metric", "Routes", "Seen");
1207:
1208: WALK_LIST(ifa, p->iface_list)
1209: {
1210: if (iff && !patmatch(iff, ifa->iface->name))
1211: continue;
1212:
1213: WALK_LIST(n, ifa->neigh_list)
1214: {
1215: if (!n->last_seen)
1216: continue;
1217:
1218: btime timer = current_time() - n->last_seen;
1219: cli_msg(-1022, "%-25I %-10s %6u %6u %7t",
1220: n->nbr->addr, ifa->iface->name, ifa->cf->metric, n->uc, timer);
1221: }
1222: }
1223:
1224: cli_msg(0, "");
1225: }
1226:
1227: static void
1228: rip_dump(struct proto *P)
1229: {
1230: struct rip_proto *p = (struct rip_proto *) P;
1231: struct rip_iface *ifa;
1232: int i;
1233:
1234: i = 0;
1235: FIB_WALK(&p->rtable, struct rip_entry, en)
1236: {
1237: debug("RIP: entry #%d: %N via %I dev %s valid %d metric %d age %t\n",
1238: i++, en->n.addr, en->next_hop, en->iface->name,
1239: en->valid, en->metric, current_time() - en->changed);
1240: }
1241: FIB_WALK_END;
1242:
1243: i = 0;
1244: WALK_LIST(ifa, p->iface_list)
1245: {
1246: debug("RIP: interface #%d: %s, %I, up = %d, busy = %d\n",
1247: i++, ifa->iface->name, ifa->sk ? ifa->sk->daddr : IPA_NONE,
1248: ifa->up, ifa->tx_active);
1249: }
1250: }
1251:
1252:
1253: struct protocol proto_rip = {
1254: .name = "RIP",
1255: .template = "rip%d",
1256: .class = PROTOCOL_RIP,
1257: .preference = DEF_PREF_RIP,
1258: .channel_mask = NB_IP,
1259: .proto_size = sizeof(struct rip_proto),
1260: .config_size = sizeof(struct rip_config),
1261: .postconfig = rip_postconfig,
1262: .init = rip_init,
1263: .dump = rip_dump,
1264: .start = rip_start,
1265: .reconfigure = rip_reconfigure,
1266: .get_route_info = rip_get_route_info,
1267: .get_attr = rip_get_attr
1268: };
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>