Annotation of embedaddon/bird2/nest/rt-attr.c, revision 1.1.1.1
1.1 misho 1: /*
2: * BIRD -- Route Attribute Cache
3: *
4: * (c) 1998--2000 Martin Mares <mj@ucw.cz>
5: *
6: * Can be freely distributed and used under the terms of the GNU GPL.
7: */
8:
9: /**
10: * DOC: Route attribute cache
11: *
12: * Each route entry carries a set of route attributes. Several of them
13: * vary from route to route, but most attributes are usually common
14: * for a large number of routes. To conserve memory, we've decided to
15: * store only the varying ones directly in the &rte and hold the rest
16: * in a special structure called &rta which is shared among all the
17: * &rte's with these attributes.
18: *
19: * Each &rta contains all the static attributes of the route (i.e.,
20: * those which are always present) as structure members and a list of
21: * dynamic attributes represented by a linked list of &ea_list
22: * structures, each of them consisting of an array of &eattr's containing
23: * the individual attributes. An attribute can be specified more than once
24: * in the &ea_list chain and in such case the first occurrence overrides
25: * the others. This semantics is used especially when someone (for example
26: * a filter) wishes to alter values of several dynamic attributes, but
27: * it wants to preserve the original attribute lists maintained by
28: * another module.
29: *
30: * Each &eattr contains an attribute identifier (split to protocol ID and
31: * per-protocol attribute ID), protocol dependent flags, a type code (consisting
32: * of several bit fields describing attribute characteristics) and either an
33: * embedded 32-bit value or a pointer to a &adata structure holding attribute
34: * contents.
35: *
36: * There exist two variants of &rta's -- cached and un-cached ones. Un-cached
37: * &rta's can have arbitrarily complex structure of &ea_list's and they
38: * can be modified by any module in the route processing chain. Cached
39: * &rta's have their attribute lists normalized (that means at most one
40: * &ea_list is present and its values are sorted in order to speed up
41: * searching), they are stored in a hash table to make fast lookup possible
42: * and they are provided with a use count to allow sharing.
43: *
44: * Routing tables always contain only cached &rta's.
45: */
46:
47: #include "nest/bird.h"
48: #include "nest/route.h"
49: #include "nest/protocol.h"
50: #include "nest/iface.h"
51: #include "nest/cli.h"
52: #include "nest/attrs.h"
53: #include "lib/alloca.h"
54: #include "lib/hash.h"
55: #include "lib/idm.h"
56: #include "lib/resource.h"
57: #include "lib/string.h"
58:
59: #include <stddef.h>
60:
61: const adata null_adata; /* adata of length 0 */
62:
63: const char * const rta_src_names[RTS_MAX] = {
64: [RTS_DUMMY] = "",
65: [RTS_STATIC] = "static",
66: [RTS_INHERIT] = "inherit",
67: [RTS_DEVICE] = "device",
68: [RTS_STATIC_DEVICE] = "static-device",
69: [RTS_REDIRECT] = "redirect",
70: [RTS_RIP] = "RIP",
71: [RTS_OSPF] = "OSPF",
72: [RTS_OSPF_IA] = "OSPF-IA",
73: [RTS_OSPF_EXT1] = "OSPF-E1",
74: [RTS_OSPF_EXT2] = "OSPF-E2",
75: [RTS_BGP] = "BGP",
76: [RTS_PIPE] = "pipe",
77: [RTS_BABEL] = "Babel",
78: [RTS_RPKI] = "RPKI",
79: };
80:
81: const char * rta_dest_names[RTD_MAX] = {
82: [RTD_NONE] = "",
83: [RTD_UNICAST] = "unicast",
84: [RTD_BLACKHOLE] = "blackhole",
85: [RTD_UNREACHABLE] = "unreachable",
86: [RTD_PROHIBIT] = "prohibited",
87: };
88:
89: pool *rta_pool;
90:
91: static slab *rta_slab_[4];
92: static slab *nexthop_slab_[4];
93: static slab *rte_src_slab;
94:
95: static struct idm src_ids;
96: #define SRC_ID_INIT_SIZE 4
97:
98: /* rte source hash */
99:
100: #define RSH_KEY(n) n->proto, n->private_id
101: #define RSH_NEXT(n) n->next
102: #define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
103: #define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
104:
105: #define RSH_REHASH rte_src_rehash
106: #define RSH_PARAMS /2, *2, 1, 1, 8, 20
107: #define RSH_INIT_ORDER 6
108:
109: static HASH(struct rte_src) src_hash;
110:
111: static void
112: rte_src_init(void)
113: {
114: rte_src_slab = sl_new(rta_pool, sizeof(struct rte_src));
115:
116: idm_init(&src_ids, rta_pool, SRC_ID_INIT_SIZE);
117:
118: HASH_INIT(src_hash, rta_pool, RSH_INIT_ORDER);
119: }
120:
121:
122: HASH_DEFINE_REHASH_FN(RSH, struct rte_src)
123:
124: struct rte_src *
125: rt_find_source(struct proto *p, u32 id)
126: {
127: return HASH_FIND(src_hash, RSH, p, id);
128: }
129:
130: struct rte_src *
131: rt_get_source(struct proto *p, u32 id)
132: {
133: struct rte_src *src = rt_find_source(p, id);
134:
135: if (src)
136: return src;
137:
138: src = sl_alloc(rte_src_slab);
139: src->proto = p;
140: src->private_id = id;
141: src->global_id = idm_alloc(&src_ids);
142: src->uc = 0;
143:
144: HASH_INSERT2(src_hash, RSH, rta_pool, src);
145:
146: return src;
147: }
148:
149: void
150: rt_prune_sources(void)
151: {
152: HASH_WALK_FILTER(src_hash, next, src, sp)
153: {
154: if (src->uc == 0)
155: {
156: HASH_DO_REMOVE(src_hash, RSH, sp);
157: idm_free(&src_ids, src->global_id);
158: sl_free(rte_src_slab, src);
159: }
160: }
161: HASH_WALK_FILTER_END;
162:
163: HASH_MAY_RESIZE_DOWN(src_hash, RSH, rta_pool);
164: }
165:
166:
167: /*
168: * Multipath Next Hop
169: */
170:
171: static inline u32
172: nexthop_hash(struct nexthop *x)
173: {
174: u32 h = 0;
175: for (; x; x = x->next)
176: {
177: h ^= ipa_hash(x->gw) ^ (h << 5) ^ (h >> 9);
178:
179: for (int i = 0; i < x->labels; i++)
180: h ^= x->label[i] ^ (h << 6) ^ (h >> 7);
181: }
182:
183: return h;
184: }
185:
186: int
187: nexthop__same(struct nexthop *x, struct nexthop *y)
188: {
189: for (; x && y; x = x->next, y = y->next)
190: {
191: if (!ipa_equal(x->gw, y->gw) || (x->iface != y->iface) ||
192: (x->flags != y->flags) || (x->weight != y->weight) ||
193: (x->labels_orig != y->labels_orig) || (x->labels != y->labels))
194: return 0;
195:
196: for (int i = 0; i < x->labels; i++)
197: if (x->label[i] != y->label[i])
198: return 0;
199: }
200:
201: return x == y;
202: }
203:
204: static int
205: nexthop_compare_node(const struct nexthop *x, const struct nexthop *y)
206: {
207: int r;
208:
209: if (!x)
210: return 1;
211:
212: if (!y)
213: return -1;
214:
215: /* Should we also compare flags ? */
216:
217: r = ((int) y->weight) - ((int) x->weight);
218: if (r)
219: return r;
220:
221: r = ipa_compare(x->gw, y->gw);
222: if (r)
223: return r;
224:
225: r = ((int) y->labels) - ((int) x->labels);
226: if (r)
227: return r;
228:
229: for (int i = 0; i < y->labels; i++)
230: {
231: r = ((int) y->label[i]) - ((int) x->label[i]);
232: if (r)
233: return r;
234: }
235:
236: return ((int) x->iface->index) - ((int) y->iface->index);
237: }
238:
239: static inline struct nexthop *
240: nexthop_copy_node(const struct nexthop *src, linpool *lp)
241: {
242: struct nexthop *n = lp_alloc(lp, nexthop_size(src));
243:
244: memcpy(n, src, nexthop_size(src));
245: n->next = NULL;
246:
247: return n;
248: }
249:
250: /**
251: * nexthop_merge - merge nexthop lists
252: * @x: list 1
253: * @y: list 2
254: * @rx: reusability of list @x
255: * @ry: reusability of list @y
256: * @max: max number of nexthops
257: * @lp: linpool for allocating nexthops
258: *
259: * The nexthop_merge() function takes two nexthop lists @x and @y and merges them,
260: * eliminating possible duplicates. The input lists must be sorted and the
261: * result is sorted too. The number of nexthops in result is limited by @max.
262: * New nodes are allocated from linpool @lp.
263: *
264: * The arguments @rx and @ry specify whether corresponding input lists may be
265: * consumed by the function (i.e. their nodes reused in the resulting list), in
266: * that case the caller should not access these lists after that. To eliminate
267: * issues with deallocation of these lists, the caller should use some form of
268: * bulk deallocation (e.g. stack or linpool) to free these nodes when the
269: * resulting list is no longer needed. When reusability is not set, the
270: * corresponding lists are not modified nor linked from the resulting list.
271: */
272: struct nexthop *
273: nexthop_merge(struct nexthop *x, struct nexthop *y, int rx, int ry, int max, linpool *lp)
274: {
275: struct nexthop *root = NULL;
276: struct nexthop **n = &root;
277:
278: while ((x || y) && max--)
279: {
280: int cmp = nexthop_compare_node(x, y);
281: if (cmp < 0)
282: {
283: *n = rx ? x : nexthop_copy_node(x, lp);
284: x = x->next;
285: }
286: else if (cmp > 0)
287: {
288: *n = ry ? y : nexthop_copy_node(y, lp);
289: y = y->next;
290: }
291: else
292: {
293: *n = rx ? x : (ry ? y : nexthop_copy_node(x, lp));
294: x = x->next;
295: y = y->next;
296: }
297: n = &((*n)->next);
298: }
299: *n = NULL;
300:
301: return root;
302: }
303:
304: void
305: nexthop_insert(struct nexthop **n, struct nexthop *x)
306: {
307: for (; *n; n = &((*n)->next))
308: {
309: int cmp = nexthop_compare_node(*n, x);
310:
311: if (cmp < 0)
312: continue;
313: else if (cmp > 0)
314: break;
315: else
316: return;
317: }
318:
319: x->next = *n;
320: *n = x;
321: }
322:
323: struct nexthop *
324: nexthop_sort(struct nexthop *x)
325: {
326: struct nexthop *s = NULL;
327:
328: /* Simple insert-sort */
329: while (x)
330: {
331: struct nexthop *n = x;
332: x = n->next;
333: n->next = NULL;
334:
335: nexthop_insert(&s, n);
336: }
337:
338: return s;
339: }
340:
341: int
342: nexthop_is_sorted(struct nexthop *x)
343: {
344: for (; x && x->next; x = x->next)
345: if (nexthop_compare_node(x, x->next) >= 0)
346: return 0;
347:
348: return 1;
349: }
350:
351: static inline slab *
352: nexthop_slab(struct nexthop *nh)
353: {
354: return nexthop_slab_[MIN(nh->labels, 3)];
355: }
356:
357: static struct nexthop *
358: nexthop_copy(struct nexthop *o)
359: {
360: struct nexthop *first = NULL;
361: struct nexthop **last = &first;
362:
363: for (; o; o = o->next)
364: {
365: struct nexthop *n = sl_alloc(nexthop_slab(o));
366: n->gw = o->gw;
367: n->iface = o->iface;
368: n->next = NULL;
369: n->flags = o->flags;
370: n->weight = o->weight;
371: n->labels_orig = o->labels_orig;
372: n->labels = o->labels;
373: for (int i=0; i<o->labels; i++)
374: n->label[i] = o->label[i];
375:
376: *last = n;
377: last = &(n->next);
378: }
379:
380: return first;
381: }
382:
383: static void
384: nexthop_free(struct nexthop *o)
385: {
386: struct nexthop *n;
387:
388: while (o)
389: {
390: n = o->next;
391: sl_free(nexthop_slab(o), o);
392: o = n;
393: }
394: }
395:
396:
397: /*
398: * Extended Attributes
399: */
400:
401: static inline eattr *
402: ea__find(ea_list *e, unsigned id)
403: {
404: eattr *a;
405: int l, r, m;
406:
407: while (e)
408: {
409: if (e->flags & EALF_BISECT)
410: {
411: l = 0;
412: r = e->count - 1;
413: while (l <= r)
414: {
415: m = (l+r) / 2;
416: a = &e->attrs[m];
417: if (a->id == id)
418: return a;
419: else if (a->id < id)
420: l = m+1;
421: else
422: r = m-1;
423: }
424: }
425: else
426: for(m=0; m<e->count; m++)
427: if (e->attrs[m].id == id)
428: return &e->attrs[m];
429: e = e->next;
430: }
431: return NULL;
432: }
433:
434: /**
435: * ea_find - find an extended attribute
436: * @e: attribute list to search in
437: * @id: attribute ID to search for
438: *
439: * Given an extended attribute list, ea_find() searches for a first
440: * occurrence of an attribute with specified ID, returning either a pointer
441: * to its &eattr structure or %NULL if no such attribute exists.
442: */
443: eattr *
444: ea_find(ea_list *e, unsigned id)
445: {
446: eattr *a = ea__find(e, id & EA_CODE_MASK);
447:
448: if (a && (a->type & EAF_TYPE_MASK) == EAF_TYPE_UNDEF &&
449: !(id & EA_ALLOW_UNDEF))
450: return NULL;
451: return a;
452: }
453:
454: /**
455: * ea_walk - walk through extended attributes
456: * @s: walk state structure
457: * @id: start of attribute ID interval
458: * @max: length of attribute ID interval
459: *
460: * Given an extended attribute list, ea_walk() walks through the list looking
461: * for first occurrences of attributes with ID in specified interval from @id to
462: * (@id + @max - 1), returning pointers to found &eattr structures, storing its
463: * walk state in @s for subsequent calls.
464: *
465: * The function ea_walk() is supposed to be called in a loop, with initially
466: * zeroed walk state structure @s with filled the initial extended attribute
467: * list, returning one found attribute in each call or %NULL when no other
468: * attribute exists. The extended attribute list or the arguments should not be
469: * modified between calls. The maximum value of @max is 128.
470: */
471: eattr *
472: ea_walk(struct ea_walk_state *s, uint id, uint max)
473: {
474: ea_list *e = s->eattrs;
475: eattr *a = s->ea;
476: eattr *a_max;
477:
478: max = id + max;
479:
480: if (a)
481: goto step;
482:
483: for (; e; e = e->next)
484: {
485: if (e->flags & EALF_BISECT)
486: {
487: int l, r, m;
488:
489: l = 0;
490: r = e->count - 1;
491: while (l < r)
492: {
493: m = (l+r) / 2;
494: if (e->attrs[m].id < id)
495: l = m + 1;
496: else
497: r = m;
498: }
499: a = e->attrs + l;
500: }
501: else
502: a = e->attrs;
503:
504: step:
505: a_max = e->attrs + e->count;
506: for (; a < a_max; a++)
507: if ((a->id >= id) && (a->id < max))
508: {
509: int n = a->id - id;
510:
511: if (BIT32_TEST(s->visited, n))
512: continue;
513:
514: BIT32_SET(s->visited, n);
515:
516: if ((a->type & EAF_TYPE_MASK) == EAF_TYPE_UNDEF)
517: continue;
518:
519: s->eattrs = e;
520: s->ea = a;
521: return a;
522: }
523: else if (e->flags & EALF_BISECT)
524: break;
525: }
526:
527: return NULL;
528: }
529:
530: /**
531: * ea_get_int - fetch an integer attribute
532: * @e: attribute list
533: * @id: attribute ID
534: * @def: default value
535: *
536: * This function is a shortcut for retrieving a value of an integer attribute
537: * by calling ea_find() to find the attribute, extracting its value or returning
538: * a provided default if no such attribute is present.
539: */
540: int
541: ea_get_int(ea_list *e, unsigned id, int def)
542: {
543: eattr *a = ea_find(e, id);
544: if (!a)
545: return def;
546: return a->u.data;
547: }
548:
549: static inline void
550: ea_do_sort(ea_list *e)
551: {
552: unsigned n = e->count;
553: eattr *a = e->attrs;
554: eattr *b = alloca(n * sizeof(eattr));
555: unsigned s, ss;
556:
557: /* We need to use a stable sorting algorithm, hence mergesort */
558: do
559: {
560: s = ss = 0;
561: while (s < n)
562: {
563: eattr *p, *q, *lo, *hi;
564: p = b;
565: ss = s;
566: *p++ = a[s++];
567: while (s < n && p[-1].id <= a[s].id)
568: *p++ = a[s++];
569: if (s < n)
570: {
571: q = p;
572: *p++ = a[s++];
573: while (s < n && p[-1].id <= a[s].id)
574: *p++ = a[s++];
575: lo = b;
576: hi = q;
577: s = ss;
578: while (lo < q && hi < p)
579: if (lo->id <= hi->id)
580: a[s++] = *lo++;
581: else
582: a[s++] = *hi++;
583: while (lo < q)
584: a[s++] = *lo++;
585: while (hi < p)
586: a[s++] = *hi++;
587: }
588: }
589: }
590: while (ss);
591: }
592:
593: /**
594: * In place discard duplicates and undefs in sorted ea_list. We use stable sort
595: * for this reason.
596: **/
597: static inline void
598: ea_do_prune(ea_list *e)
599: {
600: eattr *s, *d, *l, *s0;
601: int i = 0;
602:
603: s = d = e->attrs; /* Beginning of the list. @s is source, @d is destination. */
604: l = e->attrs + e->count; /* End of the list */
605:
606: /* Walk from begin to end. */
607: while (s < l)
608: {
609: s0 = s++;
610: /* Find a consecutive block of the same attribute */
611: while (s < l && s->id == s[-1].id)
612: s++;
613:
614: /* Now s0 is the most recent version, s[-1] the oldest one */
615: /* Drop undefs */
616: if ((s0->type & EAF_TYPE_MASK) == EAF_TYPE_UNDEF)
617: continue;
618:
619: /* Copy the newest version to destination */
620: *d = *s0;
621:
622: /* Preserve info whether it originated locally */
623: d->type = (d->type & ~(EAF_ORIGINATED|EAF_FRESH)) | (s[-1].type & EAF_ORIGINATED);
624:
625: /* Next destination */
626: d++;
627: i++;
628: }
629:
630: e->count = i;
631: }
632:
633: /**
634: * ea_sort - sort an attribute list
635: * @e: list to be sorted
636: *
637: * This function takes a &ea_list chain and sorts the attributes
638: * within each of its entries.
639: *
640: * If an attribute occurs multiple times in a single &ea_list,
641: * ea_sort() leaves only the first (the only significant) occurrence.
642: */
643: void
644: ea_sort(ea_list *e)
645: {
646: while (e)
647: {
648: if (!(e->flags & EALF_SORTED))
649: {
650: ea_do_sort(e);
651: ea_do_prune(e);
652: e->flags |= EALF_SORTED;
653: }
654: if (e->count > 5)
655: e->flags |= EALF_BISECT;
656: e = e->next;
657: }
658: }
659:
660: /**
661: * ea_scan - estimate attribute list size
662: * @e: attribute list
663: *
664: * This function calculates an upper bound of the size of
665: * a given &ea_list after merging with ea_merge().
666: */
667: unsigned
668: ea_scan(ea_list *e)
669: {
670: unsigned cnt = 0;
671:
672: while (e)
673: {
674: cnt += e->count;
675: e = e->next;
676: }
677: return sizeof(ea_list) + sizeof(eattr)*cnt;
678: }
679:
680: /**
681: * ea_merge - merge segments of an attribute list
682: * @e: attribute list
683: * @t: buffer to store the result to
684: *
685: * This function takes a possibly multi-segment attribute list
686: * and merges all of its segments to one.
687: *
688: * The primary use of this function is for &ea_list normalization:
689: * first call ea_scan() to determine how much memory will the result
690: * take, then allocate a buffer (usually using alloca()), merge the
691: * segments with ea_merge() and finally sort and prune the result
692: * by calling ea_sort().
693: */
694: void
695: ea_merge(ea_list *e, ea_list *t)
696: {
697: eattr *d = t->attrs;
698:
699: t->flags = 0;
700: t->count = 0;
701: t->next = NULL;
702: while (e)
703: {
704: memcpy(d, e->attrs, sizeof(eattr)*e->count);
705: t->count += e->count;
706: d += e->count;
707: e = e->next;
708: }
709: }
710:
711: /**
712: * ea_same - compare two &ea_list's
713: * @x: attribute list
714: * @y: attribute list
715: *
716: * ea_same() compares two normalized attribute lists @x and @y and returns
717: * 1 if they contain the same attributes, 0 otherwise.
718: */
719: int
720: ea_same(ea_list *x, ea_list *y)
721: {
722: int c;
723:
724: if (!x || !y)
725: return x == y;
726: ASSERT(!x->next && !y->next);
727: if (x->count != y->count)
728: return 0;
729: for(c=0; c<x->count; c++)
730: {
731: eattr *a = &x->attrs[c];
732: eattr *b = &y->attrs[c];
733:
734: if (a->id != b->id ||
735: a->flags != b->flags ||
736: a->type != b->type ||
737: ((a->type & EAF_EMBEDDED) ? a->u.data != b->u.data : !adata_same(a->u.ptr, b->u.ptr)))
738: return 0;
739: }
740: return 1;
741: }
742:
743: static inline ea_list *
744: ea_list_copy(ea_list *o)
745: {
746: ea_list *n;
747: unsigned i, len;
748:
749: if (!o)
750: return NULL;
751: ASSERT(!o->next);
752: len = sizeof(ea_list) + sizeof(eattr) * o->count;
753: n = mb_alloc(rta_pool, len);
754: memcpy(n, o, len);
755: n->flags |= EALF_CACHED;
756: for(i=0; i<o->count; i++)
757: {
758: eattr *a = &n->attrs[i];
759: if (!(a->type & EAF_EMBEDDED))
760: {
761: unsigned size = sizeof(struct adata) + a->u.ptr->length;
762: struct adata *d = mb_alloc(rta_pool, size);
763: memcpy(d, a->u.ptr, size);
764: a->u.ptr = d;
765: }
766: }
767: return n;
768: }
769:
770: static inline void
771: ea_free(ea_list *o)
772: {
773: int i;
774:
775: if (o)
776: {
777: ASSERT(!o->next);
778: for(i=0; i<o->count; i++)
779: {
780: eattr *a = &o->attrs[i];
781: if (!(a->type & EAF_EMBEDDED))
782: mb_free((void *) a->u.ptr);
783: }
784: mb_free(o);
785: }
786: }
787:
788: static int
789: get_generic_attr(eattr *a, byte **buf, int buflen UNUSED)
790: {
791: if (a->id == EA_GEN_IGP_METRIC)
792: {
793: *buf += bsprintf(*buf, "igp_metric");
794: return GA_NAME;
795: }
796:
797: return GA_UNKNOWN;
798: }
799:
800: void
801: ea_format_bitfield(struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max)
802: {
803: byte *bound = buf + bufsize - 32;
804: u32 data = a->u.data;
805: int i;
806:
807: for (i = min; i < max; i++)
808: if ((data & (1u << i)) && names[i])
809: {
810: if (buf > bound)
811: {
812: strcpy(buf, " ...");
813: return;
814: }
815:
816: buf += bsprintf(buf, " %s", names[i]);
817: data &= ~(1u << i);
818: }
819:
820: if (data)
821: bsprintf(buf, " %08x", data);
822:
823: return;
824: }
825:
826: static inline void
827: opaque_format(const struct adata *ad, byte *buf, uint size)
828: {
829: byte *bound = buf + size - 10;
830: uint i;
831:
832: for(i = 0; i < ad->length; i++)
833: {
834: if (buf > bound)
835: {
836: strcpy(buf, " ...");
837: return;
838: }
839: if (i)
840: *buf++ = ' ';
841:
842: buf += bsprintf(buf, "%02x", ad->data[i]);
843: }
844:
845: *buf = 0;
846: return;
847: }
848:
849: static inline void
850: ea_show_int_set(struct cli *c, const struct adata *ad, int way, byte *pos, byte *buf, byte *end)
851: {
852: int i = int_set_format(ad, way, 0, pos, end - pos);
853: cli_printf(c, -1012, "\t%s", buf);
854: while (i)
855: {
856: i = int_set_format(ad, way, i, buf, end - buf - 1);
857: cli_printf(c, -1012, "\t\t%s", buf);
858: }
859: }
860:
861: static inline void
862: ea_show_ec_set(struct cli *c, const struct adata *ad, byte *pos, byte *buf, byte *end)
863: {
864: int i = ec_set_format(ad, 0, pos, end - pos);
865: cli_printf(c, -1012, "\t%s", buf);
866: while (i)
867: {
868: i = ec_set_format(ad, i, buf, end - buf - 1);
869: cli_printf(c, -1012, "\t\t%s", buf);
870: }
871: }
872:
873: static inline void
874: ea_show_lc_set(struct cli *c, const struct adata *ad, byte *pos, byte *buf, byte *end)
875: {
876: int i = lc_set_format(ad, 0, pos, end - pos);
877: cli_printf(c, -1012, "\t%s", buf);
878: while (i)
879: {
880: i = lc_set_format(ad, i, buf, end - buf - 1);
881: cli_printf(c, -1012, "\t\t%s", buf);
882: }
883: }
884:
885: /**
886: * ea_show - print an &eattr to CLI
887: * @c: destination CLI
888: * @e: attribute to be printed
889: *
890: * This function takes an extended attribute represented by its &eattr
891: * structure and prints it to the CLI according to the type information.
892: *
893: * If the protocol defining the attribute provides its own
894: * get_attr() hook, it's consulted first.
895: */
896: void
897: ea_show(struct cli *c, eattr *e)
898: {
899: struct protocol *p;
900: int status = GA_UNKNOWN;
901: const struct adata *ad = (e->type & EAF_EMBEDDED) ? NULL : e->u.ptr;
902: byte buf[CLI_MSG_SIZE];
903: byte *pos = buf, *end = buf + sizeof(buf);
904:
905: if (EA_IS_CUSTOM(e->id))
906: {
907: const char *name = ea_custom_name(e->id);
908: if (name)
909: {
910: pos += bsprintf(pos, "%s", name);
911: status = GA_NAME;
912: }
913: else
914: pos += bsprintf(pos, "%02x.", EA_PROTO(e->id));
915: }
916: else if (p = class_to_protocol[EA_PROTO(e->id)])
917: {
918: pos += bsprintf(pos, "%s.", p->name);
919: if (p->get_attr)
920: status = p->get_attr(e, pos, end - pos);
921: pos += strlen(pos);
922: }
923: else if (EA_PROTO(e->id))
924: pos += bsprintf(pos, "%02x.", EA_PROTO(e->id));
925: else
926: status = get_generic_attr(e, &pos, end - pos);
927:
928: if (status < GA_NAME)
929: pos += bsprintf(pos, "%02x", EA_ID(e->id));
930: if (status < GA_FULL)
931: {
932: *pos++ = ':';
933: *pos++ = ' ';
934: switch (e->type & EAF_TYPE_MASK)
935: {
936: case EAF_TYPE_INT:
937: bsprintf(pos, "%u", e->u.data);
938: break;
939: case EAF_TYPE_OPAQUE:
940: opaque_format(ad, pos, end - pos);
941: break;
942: case EAF_TYPE_IP_ADDRESS:
943: bsprintf(pos, "%I", *(ip_addr *) ad->data);
944: break;
945: case EAF_TYPE_ROUTER_ID:
946: bsprintf(pos, "%R", e->u.data);
947: break;
948: case EAF_TYPE_AS_PATH:
949: as_path_format(ad, pos, end - pos);
950: break;
951: case EAF_TYPE_BITFIELD:
952: bsprintf(pos, "%08x", e->u.data);
953: break;
954: case EAF_TYPE_INT_SET:
955: ea_show_int_set(c, ad, 1, pos, buf, end);
956: return;
957: case EAF_TYPE_EC_SET:
958: ea_show_ec_set(c, ad, pos, buf, end);
959: return;
960: case EAF_TYPE_LC_SET:
961: ea_show_lc_set(c, ad, pos, buf, end);
962: return;
963: case EAF_TYPE_UNDEF:
964: default:
965: bsprintf(pos, "<type %02x>", e->type);
966: }
967: }
968: cli_printf(c, -1012, "\t%s", buf);
969: }
970:
971: /**
972: * ea_dump - dump an extended attribute
973: * @e: attribute to be dumped
974: *
975: * ea_dump() dumps contents of the extended attribute given to
976: * the debug output.
977: */
978: void
979: ea_dump(ea_list *e)
980: {
981: int i;
982:
983: if (!e)
984: {
985: debug("NONE");
986: return;
987: }
988: while (e)
989: {
990: debug("[%c%c%c]",
991: (e->flags & EALF_SORTED) ? 'S' : 's',
992: (e->flags & EALF_BISECT) ? 'B' : 'b',
993: (e->flags & EALF_CACHED) ? 'C' : 'c');
994: for(i=0; i<e->count; i++)
995: {
996: eattr *a = &e->attrs[i];
997: debug(" %02x:%02x.%02x", EA_PROTO(a->id), EA_ID(a->id), a->flags);
998: debug("=%c", "?iO?I?P???S?????" [a->type & EAF_TYPE_MASK]);
999: if (a->type & EAF_ORIGINATED)
1000: debug("o");
1001: if (a->type & EAF_EMBEDDED)
1002: debug(":%08x", a->u.data);
1003: else
1004: {
1005: int j, len = a->u.ptr->length;
1006: debug("[%d]:", len);
1007: for(j=0; j<len; j++)
1008: debug("%02x", a->u.ptr->data[j]);
1009: }
1010: }
1011: if (e = e->next)
1012: debug(" | ");
1013: }
1014: }
1015:
1016: /**
1017: * ea_hash - calculate an &ea_list hash key
1018: * @e: attribute list
1019: *
1020: * ea_hash() takes an extended attribute list and calculated a hopefully
1021: * uniformly distributed hash value from its contents.
1022: */
1023: inline uint
1024: ea_hash(ea_list *e)
1025: {
1026: const u64 mul = 0x68576150f3d6847;
1027: u64 h = 0xafcef24eda8b29;
1028: int i;
1029:
1030: if (e) /* Assuming chain of length 1 */
1031: {
1032: for(i=0; i<e->count; i++)
1033: {
1034: struct eattr *a = &e->attrs[i];
1035: h ^= a->id; h *= mul;
1036: if (a->type & EAF_EMBEDDED)
1037: h ^= a->u.data;
1038: else
1039: {
1040: const struct adata *d = a->u.ptr;
1041: h ^= mem_hash(d->data, d->length);
1042: }
1043: h *= mul;
1044: }
1045: }
1046: return (h >> 32) ^ (h & 0xffffffff);
1047: }
1048:
1049: /**
1050: * ea_append - concatenate &ea_list's
1051: * @to: destination list (can be %NULL)
1052: * @what: list to be appended (can be %NULL)
1053: *
1054: * This function appends the &ea_list @what at the end of
1055: * &ea_list @to and returns a pointer to the resulting list.
1056: */
1057: ea_list *
1058: ea_append(ea_list *to, ea_list *what)
1059: {
1060: ea_list *res;
1061:
1062: if (!to)
1063: return what;
1064: res = to;
1065: while (to->next)
1066: to = to->next;
1067: to->next = what;
1068: return res;
1069: }
1070:
1071: /*
1072: * rta's
1073: */
1074:
1075: static uint rta_cache_count;
1076: static uint rta_cache_size = 32;
1077: static uint rta_cache_limit;
1078: static uint rta_cache_mask;
1079: static rta **rta_hash_table;
1080:
1081: static void
1082: rta_alloc_hash(void)
1083: {
1084: rta_hash_table = mb_allocz(rta_pool, sizeof(rta *) * rta_cache_size);
1085: if (rta_cache_size < 32768)
1086: rta_cache_limit = rta_cache_size * 2;
1087: else
1088: rta_cache_limit = ~0;
1089: rta_cache_mask = rta_cache_size - 1;
1090: }
1091:
1092: static inline uint
1093: rta_hash(rta *a)
1094: {
1095: u64 h;
1096: mem_hash_init(&h);
1097: #define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f));
1098: MIX(src);
1099: MIX(hostentry);
1100: MIX(from);
1101: MIX(igp_metric);
1102: MIX(source);
1103: MIX(scope);
1104: MIX(dest);
1105: #undef MIX
1106:
1107: return mem_hash_value(&h) ^ nexthop_hash(&(a->nh)) ^ ea_hash(a->eattrs);
1108: }
1109:
1110: static inline int
1111: rta_same(rta *x, rta *y)
1112: {
1113: return (x->src == y->src &&
1114: x->source == y->source &&
1115: x->scope == y->scope &&
1116: x->dest == y->dest &&
1117: x->igp_metric == y->igp_metric &&
1118: ipa_equal(x->from, y->from) &&
1119: x->hostentry == y->hostentry &&
1120: nexthop_same(&(x->nh), &(y->nh)) &&
1121: ea_same(x->eattrs, y->eattrs));
1122: }
1123:
1124: static inline slab *
1125: rta_slab(rta *a)
1126: {
1127: return rta_slab_[a->nh.labels > 2 ? 3 : a->nh.labels];
1128: }
1129:
1130: static rta *
1131: rta_copy(rta *o)
1132: {
1133: rta *r = sl_alloc(rta_slab(o));
1134:
1135: memcpy(r, o, rta_size(o));
1136: r->uc = 1;
1137: r->nh.next = nexthop_copy(o->nh.next);
1138: r->eattrs = ea_list_copy(o->eattrs);
1139: return r;
1140: }
1141:
1142: static inline void
1143: rta_insert(rta *r)
1144: {
1145: uint h = r->hash_key & rta_cache_mask;
1146: r->next = rta_hash_table[h];
1147: if (r->next)
1148: r->next->pprev = &r->next;
1149: r->pprev = &rta_hash_table[h];
1150: rta_hash_table[h] = r;
1151: }
1152:
1153: static void
1154: rta_rehash(void)
1155: {
1156: uint ohs = rta_cache_size;
1157: uint h;
1158: rta *r, *n;
1159: rta **oht = rta_hash_table;
1160:
1161: rta_cache_size = 2*rta_cache_size;
1162: DBG("Rehashing rta cache from %d to %d entries.\n", ohs, rta_cache_size);
1163: rta_alloc_hash();
1164: for(h=0; h<ohs; h++)
1165: for(r=oht[h]; r; r=n)
1166: {
1167: n = r->next;
1168: rta_insert(r);
1169: }
1170: mb_free(oht);
1171: }
1172:
1173: /**
1174: * rta_lookup - look up a &rta in attribute cache
1175: * @o: a un-cached &rta
1176: *
1177: * rta_lookup() gets an un-cached &rta structure and returns its cached
1178: * counterpart. It starts with examining the attribute cache to see whether
1179: * there exists a matching entry. If such an entry exists, it's returned and
1180: * its use count is incremented, else a new entry is created with use count
1181: * set to 1.
1182: *
1183: * The extended attribute lists attached to the &rta are automatically
1184: * converted to the normalized form.
1185: */
1186: rta *
1187: rta_lookup(rta *o)
1188: {
1189: rta *r;
1190: uint h;
1191:
1192: ASSERT(!(o->aflags & RTAF_CACHED));
1193: if (o->eattrs)
1194: ea_normalize(o->eattrs);
1195:
1196: h = rta_hash(o);
1197: for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next)
1198: if (r->hash_key == h && rta_same(r, o))
1199: return rta_clone(r);
1200:
1201: r = rta_copy(o);
1202: r->hash_key = h;
1203: r->aflags = RTAF_CACHED;
1204: rt_lock_source(r->src);
1205: rt_lock_hostentry(r->hostentry);
1206: rta_insert(r);
1207:
1208: if (++rta_cache_count > rta_cache_limit)
1209: rta_rehash();
1210:
1211: return r;
1212: }
1213:
1214: void
1215: rta__free(rta *a)
1216: {
1217: ASSERT(rta_cache_count && (a->aflags & RTAF_CACHED));
1218: rta_cache_count--;
1219: *a->pprev = a->next;
1220: if (a->next)
1221: a->next->pprev = a->pprev;
1222: rt_unlock_hostentry(a->hostentry);
1223: rt_unlock_source(a->src);
1224: if (a->nh.next)
1225: nexthop_free(a->nh.next);
1226: ea_free(a->eattrs);
1227: a->aflags = 0; /* Poison the entry */
1228: sl_free(rta_slab(a), a);
1229: }
1230:
1231: rta *
1232: rta_do_cow(rta *o, linpool *lp)
1233: {
1234: rta *r = lp_alloc(lp, rta_size(o));
1235: memcpy(r, o, rta_size(o));
1236: for (struct nexthop **nhn = &(r->nh.next), *nho = o->nh.next; nho; nho = nho->next)
1237: {
1238: *nhn = lp_alloc(lp, nexthop_size(nho));
1239: memcpy(*nhn, nho, nexthop_size(nho));
1240: nhn = &((*nhn)->next);
1241: }
1242: r->aflags = 0;
1243: r->uc = 0;
1244: return r;
1245: }
1246:
1247: /**
1248: * rta_dump - dump route attributes
1249: * @a: attribute structure to dump
1250: *
1251: * This function takes a &rta and dumps its contents to the debug output.
1252: */
1253: void
1254: rta_dump(rta *a)
1255: {
1256: static char *rts[] = { "RTS_DUMMY", "RTS_STATIC", "RTS_INHERIT", "RTS_DEVICE",
1257: "RTS_STAT_DEV", "RTS_REDIR", "RTS_RIP",
1258: "RTS_OSPF", "RTS_OSPF_IA", "RTS_OSPF_EXT1",
1259: "RTS_OSPF_EXT2", "RTS_BGP", "RTS_PIPE", "RTS_BABEL" };
1260: static char *rtd[] = { "", " DEV", " HOLE", " UNREACH", " PROHIBIT" };
1261:
1262: debug("p=%s uc=%d %s %s%s h=%04x",
1263: a->src->proto->name, a->uc, rts[a->source], ip_scope_text(a->scope),
1264: rtd[a->dest], a->hash_key);
1265: if (!(a->aflags & RTAF_CACHED))
1266: debug(" !CACHED");
1267: debug(" <-%I", a->from);
1268: if (a->dest == RTD_UNICAST)
1269: for (struct nexthop *nh = &(a->nh); nh; nh = nh->next)
1270: {
1271: if (ipa_nonzero(nh->gw)) debug(" ->%I", nh->gw);
1272: if (nh->labels) debug(" L %d", nh->label[0]);
1273: for (int i=1; i<nh->labels; i++)
1274: debug("/%d", nh->label[i]);
1275: debug(" [%s]", nh->iface ? nh->iface->name : "???");
1276: }
1277: if (a->eattrs)
1278: {
1279: debug(" EA: ");
1280: ea_dump(a->eattrs);
1281: }
1282: }
1283:
1284: /**
1285: * rta_dump_all - dump attribute cache
1286: *
1287: * This function dumps the whole contents of route attribute cache
1288: * to the debug output.
1289: */
1290: void
1291: rta_dump_all(void)
1292: {
1293: rta *a;
1294: uint h;
1295:
1296: debug("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count, rta_cache_limit);
1297: for(h=0; h<rta_cache_size; h++)
1298: for(a=rta_hash_table[h]; a; a=a->next)
1299: {
1300: debug("%p ", a);
1301: rta_dump(a);
1302: debug("\n");
1303: }
1304: debug("\n");
1305: }
1306:
1307: void
1308: rta_show(struct cli *c, rta *a)
1309: {
1310: cli_printf(c, -1008, "\tType: %s %s", rta_src_names[a->source], ip_scope_text(a->scope));
1311:
1312: for(ea_list *eal = a->eattrs; eal; eal=eal->next)
1313: for(int i=0; i<eal->count; i++)
1314: ea_show(c, &eal->attrs[i]);
1315: }
1316:
1317: /**
1318: * rta_init - initialize route attribute cache
1319: *
1320: * This function is called during initialization of the routing
1321: * table module to set up the internals of the attribute cache.
1322: */
1323: void
1324: rta_init(void)
1325: {
1326: rta_pool = rp_new(&root_pool, "Attributes");
1327:
1328: rta_slab_[0] = sl_new(rta_pool, sizeof(rta));
1329: rta_slab_[1] = sl_new(rta_pool, sizeof(rta) + sizeof(u32));
1330: rta_slab_[2] = sl_new(rta_pool, sizeof(rta) + sizeof(u32)*2);
1331: rta_slab_[3] = sl_new(rta_pool, sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK);
1332:
1333: nexthop_slab_[0] = sl_new(rta_pool, sizeof(struct nexthop));
1334: nexthop_slab_[1] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32));
1335: nexthop_slab_[2] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*2);
1336: nexthop_slab_[3] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK);
1337:
1338: rta_alloc_hash();
1339: rte_src_init();
1340: }
1341:
1342: /*
1343: * Documentation for functions declared inline in route.h
1344: */
1345: #if 0
1346:
1347: /**
1348: * rta_clone - clone route attributes
1349: * @r: a &rta to be cloned
1350: *
1351: * rta_clone() takes a cached &rta and returns its identical cached
1352: * copy. Currently it works by just returning the original &rta with
1353: * its use count incremented.
1354: */
1355: static inline rta *rta_clone(rta *r)
1356: { DUMMY; }
1357:
1358: /**
1359: * rta_free - free route attributes
1360: * @r: a &rta to be freed
1361: *
1362: * If you stop using a &rta (for example when deleting a route which uses
1363: * it), you need to call rta_free() to notify the attribute cache the
1364: * attribute is no longer in use and can be freed if you were the last
1365: * user (which rta_free() tests by inspecting the use count).
1366: */
1367: static inline void rta_free(rta *r)
1368: { DUMMY; }
1369:
1370: #endif
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>