1: /*
2: * BIRD Internet Routing Daemon -- Routing Table
3: *
4: * (c) 1998--2000 Martin Mares <mj@ucw.cz>
5: *
6: * Can be freely distributed and used under the terms of the GNU GPL.
7: */
8:
9: #ifndef _BIRD_ROUTE_H_
10: #define _BIRD_ROUTE_H_
11:
12: #include "lib/lists.h"
13: #include "lib/resource.h"
14: #include "lib/net.h"
15:
16: struct ea_list;
17: struct protocol;
18: struct proto;
19: struct rte_src;
20: struct symbol;
21: struct filter;
22: struct cli;
23:
24: /*
25: * Generic data structure for storing network prefixes. Also used
26: * for the master routing table. Currently implemented as a hash
27: * table.
28: *
29: * Available operations:
30: * - insertion of new entry
31: * - deletion of entry
32: * - searching for entry by network prefix
33: * - asynchronous retrieval of fib contents
34: */
35:
36: struct fib_node {
37: struct fib_node *next; /* Next in hash chain */
38: struct fib_iterator *readers; /* List of readers of this node */
39: byte flags; /* User-defined, will be removed */
40: net_addr addr[0];
41: };
42:
43: struct fib_iterator { /* See lib/slists.h for an explanation */
44: struct fib_iterator *prev, *next; /* Must be synced with struct fib_node! */
45: byte efef; /* 0xff to distinguish between iterator and node */
46: byte pad[3];
47: struct fib_node *node; /* Or NULL if freshly merged */
48: uint hash;
49: };
50:
51: typedef void (*fib_init_fn)(void *);
52:
53: struct fib {
54: pool *fib_pool; /* Pool holding all our data */
55: slab *fib_slab; /* Slab holding all fib nodes */
56: struct fib_node **hash_table; /* Node hash table */
57: uint hash_size; /* Number of hash table entries (a power of two) */
58: uint hash_order; /* Binary logarithm of hash_size */
59: uint hash_shift; /* 32 - hash_order */
60: uint addr_type; /* Type of address data stored in fib (NET_*) */
61: uint node_size; /* FIB node size, 0 for nonuniform */
62: uint node_offset; /* Offset of fib_node struct inside of user data */
63: uint entries; /* Number of entries */
64: uint entries_min, entries_max; /* Entry count limits (else start rehashing) */
65: fib_init_fn init; /* Constructor */
66: };
67:
68: static inline void * fib_node_to_user(struct fib *f, struct fib_node *e)
69: { return e ? (void *) ((char *) e - f->node_offset) : NULL; }
70:
71: static inline struct fib_node * fib_user_to_node(struct fib *f, void *e)
72: { return e ? (void *) ((char *) e + f->node_offset) : NULL; }
73:
74: void fib_init(struct fib *f, pool *p, uint addr_type, uint node_size, uint node_offset, uint hash_order, fib_init_fn init);
75: void *fib_find(struct fib *, const net_addr *); /* Find or return NULL if doesn't exist */
76: void *fib_get_chain(struct fib *f, const net_addr *a); /* Find first node in linked list from hash table */
77: void *fib_get(struct fib *, const net_addr *); /* Find or create new if nonexistent */
78: void *fib_route(struct fib *, const net_addr *); /* Longest-match routing lookup */
79: void fib_delete(struct fib *, void *); /* Remove fib entry */
80: void fib_free(struct fib *); /* Destroy the fib */
81: void fib_check(struct fib *); /* Consistency check for debugging */
82:
83: void fit_init(struct fib_iterator *, struct fib *); /* Internal functions, don't call */
84: struct fib_node *fit_get(struct fib *, struct fib_iterator *);
85: void fit_put(struct fib_iterator *, struct fib_node *);
86: void fit_put_next(struct fib *f, struct fib_iterator *i, struct fib_node *n, uint hpos);
87:
88:
89: #define FIB_WALK(fib, type, z) do { \
90: struct fib_node *fn_, **ff_ = (fib)->hash_table; \
91: uint count_ = (fib)->hash_size; \
92: type *z; \
93: while (count_--) \
94: for (fn_ = *ff_++; z = fib_node_to_user(fib, fn_); fn_=fn_->next)
95:
96: #define FIB_WALK_END } while (0)
97:
98: #define FIB_ITERATE_INIT(it, fib) fit_init(it, fib)
99:
100: #define FIB_ITERATE_START(fib, it, type, z) do { \
101: struct fib_node *fn_ = fit_get(fib, it); \
102: uint count_ = (fib)->hash_size; \
103: uint hpos_ = (it)->hash; \
104: type *z; \
105: for(;;) { \
106: if (!fn_) \
107: { \
108: if (++hpos_ >= count_) \
109: break; \
110: fn_ = (fib)->hash_table[hpos_]; \
111: continue; \
112: } \
113: z = fib_node_to_user(fib, fn_);
114:
115: #define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
116:
117: #define FIB_ITERATE_PUT(it) fit_put(it, fn_)
118:
119: #define FIB_ITERATE_PUT_NEXT(it, fib) fit_put_next(fib, it, fn_, hpos_)
120:
121: #define FIB_ITERATE_UNLINK(it, fib) fit_get(fib, it)
122:
123:
124: /*
125: * Master Routing Tables. Generally speaking, each of them contains a FIB
126: * with each entry pointing to a list of route entries representing routes
127: * to given network (with the selected one at the head).
128: *
129: * Each of the RTE's contains variable data (the preference and protocol-dependent
130: * metrics) and a pointer to a route attribute block common for many routes).
131: *
132: * It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
133: */
134:
135: struct rtable_config {
136: node n;
137: char *name;
138: struct rtable *table;
139: struct proto_config *krt_attached; /* Kernel syncer attached to this table */
140: uint addr_type; /* Type of address data stored in table (NET_*) */
141: int gc_max_ops; /* Maximum number of operations before GC is run */
142: int gc_min_time; /* Minimum time between two consecutive GC runs */
143: byte sorted; /* Routes of network are sorted according to rte_better() */
144: };
145:
146: typedef struct rtable {
147: node n; /* Node in list of all tables */
148: struct fib fib;
149: char *name; /* Name of this table */
150: list channels; /* List of attached channels (struct channel) */
151: uint addr_type; /* Type of address data stored in table (NET_*) */
152: int pipe_busy; /* Pipe loop detection */
153: int use_count; /* Number of protocols using this table */
154: u32 rt_count; /* Number of routes in the table */
155: struct hostcache *hostcache;
156: struct rtable_config *config; /* Configuration of this table */
157: struct config *deleted; /* Table doesn't exist in current configuration,
158: * delete as soon as use_count becomes 0 and remove
159: * obstacle from this routing table.
160: */
161: struct event *rt_event; /* Routing table event */
162: btime gc_time; /* Time of last GC */
163: int gc_counter; /* Number of operations since last GC */
164: byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
165: byte hcu_scheduled; /* Hostcache update is scheduled */
166: byte nhu_state; /* Next Hop Update state */
167: struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
168: struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
169: } rtable;
170:
171: #define NHU_CLEAN 0
172: #define NHU_SCHEDULED 1
173: #define NHU_RUNNING 2
174: #define NHU_DIRTY 3
175:
176: typedef struct network {
177: struct rte *routes; /* Available routes for this network */
178: struct fib_node n; /* FIB flags reserved for kernel syncer */
179: } net;
180:
181: struct hostcache {
182: slab *slab; /* Slab holding all hostentries */
183: struct hostentry **hash_table; /* Hash table for hostentries */
184: unsigned hash_order, hash_shift;
185: unsigned hash_max, hash_min;
186: unsigned hash_items;
187: linpool *lp; /* Linpool for trie */
188: struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
189: list hostentries; /* List of all hostentries */
190: byte update_hostcache;
191: };
192:
193: struct hostentry {
194: node ln;
195: ip_addr addr; /* IP address of host, part of key */
196: ip_addr link; /* (link-local) IP address of host, used as gw
197: if host is directly attached */
198: struct rtable *tab; /* Dependent table, part of key */
199: struct hostentry *next; /* Next in hash chain */
200: unsigned hash_key; /* Hash key */
201: unsigned uc; /* Use count */
202: struct rta *src; /* Source rta entry */
203: byte dest; /* Chosen route destination type (RTD_...) */
204: byte nexthop_linkable; /* Nexthop list is completely non-device */
205: u32 igp_metric; /* Chosen route IGP metric */
206: };
207:
208: typedef struct rte {
209: struct rte *next;
210: net *net; /* Network this RTE belongs to */
211: struct channel *sender; /* Channel used to send the route to the routing table */
212: struct rta *attrs; /* Attributes of this route */
213: byte flags; /* Flags (REF_...) */
214: byte pflags; /* Protocol-specific flags */
215: word pref; /* Route preference */
216: btime lastmod; /* Last modified */
217: union { /* Protocol-dependent data (metrics etc.) */
218: #ifdef CONFIG_RIP
219: struct {
220: struct iface *from; /* Incoming iface */
221: u8 metric; /* RIP metric */
222: u16 tag; /* External route tag */
223: } rip;
224: #endif
225: #ifdef CONFIG_OSPF
226: struct {
227: u32 metric1, metric2; /* OSPF Type 1 and Type 2 metrics */
228: u32 tag; /* External route tag */
229: u32 router_id; /* Router that originated this route */
230: } ospf;
231: #endif
232: #ifdef CONFIG_BGP
233: struct {
234: u8 suppressed; /* Used for deterministic MED comparison */
235: s8 stale; /* Route is LLGR_STALE, -1 if unknown */
236: } bgp;
237: #endif
238: #ifdef CONFIG_BABEL
239: struct {
240: u16 seqno; /* Babel seqno */
241: u16 metric; /* Babel metric */
242: u64 router_id; /* Babel router id */
243: } babel;
244: #endif
245: struct { /* Routes generated by krt sync (both temporary and inherited ones) */
246: s8 src; /* Alleged route source (see krt.h) */
247: u8 proto; /* Kernel source protocol ID */
248: u8 seen; /* Seen during last scan */
249: u8 best; /* Best route in network, propagated to core */
250: u32 metric; /* Kernel metric */
251: } krt;
252: } u;
253: } rte;
254:
255: #define REF_COW 1 /* Copy this rte on write */
256: #define REF_FILTERED 2 /* Route is rejected by import filter */
257: #define REF_STALE 4 /* Route is stale in a refresh cycle */
258: #define REF_DISCARD 8 /* Route is scheduled for discard */
259: #define REF_MODIFY 16 /* Route is scheduled for modify */
260:
261: /* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
262: static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
263:
264: /* Route just has REF_FILTERED flag */
265: static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
266:
267:
268: /* Types of route announcement, also used as flags */
269: #define RA_UNDEF 0 /* Undefined RA type */
270: #define RA_OPTIMAL 1 /* Announcement of optimal route change */
271: #define RA_ACCEPTED 2 /* Announcement of first accepted route */
272: #define RA_ANY 3 /* Announcement of any route change */
273: #define RA_MERGED 4 /* Announcement of optimal route merged with next ones */
274:
275: /* Return value of preexport() callback */
276: #define RIC_ACCEPT 1 /* Accepted by protocol */
277: #define RIC_PROCESS 0 /* Process it through import filter */
278: #define RIC_REJECT -1 /* Rejected by protocol */
279: #define RIC_DROP -2 /* Silently dropped by protocol */
280:
281: extern list routing_tables;
282: struct config;
283:
284: void rt_init(void);
285: void rt_preconfig(struct config *);
286: void rt_commit(struct config *new, struct config *old);
287: void rt_lock_table(rtable *);
288: void rt_unlock_table(rtable *);
289: void rt_setup(pool *, rtable *, struct rtable_config *);
290: static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
291: static inline net *net_find_valid(rtable *tab, const net_addr *addr)
292: { net *n = net_find(tab, addr); return (n && rte_is_valid(n->routes)) ? n : NULL; }
293: static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
294: void *net_route(rtable *tab, const net_addr *n);
295: int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
296: rte *rte_find(net *net, struct rte_src *src);
297: rte *rte_get_temp(struct rta *);
298: void rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
299: /* rte_update() moved to protocol.h to avoid dependency conflicts */
300: int rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter);
301: rte *rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent);
302: void rt_refresh_begin(rtable *t, struct channel *c);
303: void rt_refresh_end(rtable *t, struct channel *c);
304: void rt_modify_stale(rtable *t, struct channel *c);
305: void rt_schedule_prune(rtable *t);
306: void rte_dump(rte *);
307: void rte_free(rte *);
308: rte *rte_do_cow(rte *);
309: static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
310: rte *rte_cow_rta(rte *r, linpool *lp);
311: void rte_init_tmp_attrs(struct rte *r, linpool *lp, uint max);
312: void rte_make_tmp_attr(struct rte *r, uint id, uint type, uintptr_t val);
313: void rte_make_tmp_attrs(struct rte **r, struct linpool *pool, struct rta **old_attrs);
314: uintptr_t rte_store_tmp_attr(struct rte *r, uint id);
315: void rt_dump(rtable *);
316: void rt_dump_all(void);
317: int rt_feed_channel(struct channel *c);
318: void rt_feed_channel_abort(struct channel *c);
319: int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
320: int rt_reload_channel(struct channel *c);
321: void rt_reload_channel_abort(struct channel *c);
322: void rt_prune_sync(rtable *t, int all);
323: int rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, int refeed);
324: struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
325:
326:
327: /* Default limit for ECMP next hops, defined in sysdep code */
328: extern const int rt_default_ecmp;
329:
330: struct rt_show_data_rtable {
331: node n;
332: rtable *table;
333: struct channel *export_channel;
334: };
335:
336: struct rt_show_data {
337: net_addr *addr;
338: list tables;
339: struct rt_show_data_rtable *tab; /* Iterator over table list */
340: struct rt_show_data_rtable *last_table; /* Last table in output */
341: struct fib_iterator fit; /* Iterator over networks in table */
342: int verbose, tables_defined_by;
343: const struct filter *filter;
344: struct proto *show_protocol;
345: struct proto *export_protocol;
346: struct channel *export_channel;
347: struct config *running_on_config;
348: int export_mode, primary_only, filtered, stats, show_for;
349:
350: int table_open; /* Iteration (fit) is open */
351: int net_counter, rt_counter, show_counter, table_counter;
352: int net_counter_last, rt_counter_last, show_counter_last;
353: };
354:
355: void rt_show(struct rt_show_data *);
356: struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t);
357:
358: /* Value of table definition mode in struct rt_show_data */
359: #define RSD_TDB_DEFAULT 0 /* no table specified */
360: #define RSD_TDB_INDIRECT 0 /* show route ... protocol P ... */
361: #define RSD_TDB_ALL RSD_TDB_SET /* show route ... table all ... */
362: #define RSD_TDB_DIRECT RSD_TDB_SET | RSD_TDB_NMN /* show route ... table X table Y ... */
363:
364: #define RSD_TDB_SET 0x1 /* internal: show empty tables */
365: #define RSD_TDB_NMN 0x2 /* internal: need matching net */
366:
367: /* Value of export_mode in struct rt_show_data */
368: #define RSEM_NONE 0 /* Export mode not used */
369: #define RSEM_PREEXPORT 1 /* Routes ready for export, before filtering */
370: #define RSEM_EXPORT 2 /* Routes accepted by export filter */
371: #define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
372:
373: /*
374: * Route Attributes
375: *
376: * Beware: All standard BGP attributes must be represented here instead
377: * of making them local to the route. This is needed to ensure proper
378: * construction of BGP route attribute lists.
379: */
380:
381: /* Nexthop structure */
382: struct nexthop {
383: ip_addr gw; /* Next hop */
384: struct iface *iface; /* Outgoing interface */
385: struct nexthop *next;
386: byte flags;
387: byte weight;
388: byte labels_orig; /* Number of labels before hostentry was applied */
389: byte labels; /* Number of all labels */
390: u32 label[0];
391: };
392:
393: #define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
394:
395:
396: struct rte_src {
397: struct rte_src *next; /* Hash chain */
398: struct proto *proto; /* Protocol the source is based on */
399: u32 private_id; /* Private ID, assigned by the protocol */
400: u32 global_id; /* Globally unique ID of the source */
401: unsigned uc; /* Use count */
402: };
403:
404:
405: typedef struct rta {
406: struct rta *next, **pprev; /* Hash chain */
407: u32 uc; /* Use count */
408: u32 hash_key; /* Hash over important fields */
409: struct ea_list *eattrs; /* Extended Attribute chain */
410: struct rte_src *src; /* Route source that created the route */
411: struct hostentry *hostentry; /* Hostentry for recursive next-hops */
412: ip_addr from; /* Advertising router */
413: u32 igp_metric; /* IGP metric to next hop (for iBGP routes) */
414: u8 source; /* Route source (RTS_...) */
415: u8 scope; /* Route scope (SCOPE_... -- see ip.h) */
416: u8 dest; /* Route destination type (RTD_...) */
417: u8 aflags;
418: struct nexthop nh; /* Next hop */
419: } rta;
420:
421: #define RTS_DUMMY 0 /* Dummy route to be removed soon */
422: #define RTS_STATIC 1 /* Normal static route */
423: #define RTS_INHERIT 2 /* Route inherited from kernel */
424: #define RTS_DEVICE 3 /* Device route */
425: #define RTS_STATIC_DEVICE 4 /* Static device route */
426: #define RTS_REDIRECT 5 /* Learned via redirect */
427: #define RTS_RIP 6 /* RIP route */
428: #define RTS_OSPF 7 /* OSPF route */
429: #define RTS_OSPF_IA 8 /* OSPF inter-area route */
430: #define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
431: #define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
432: #define RTS_BGP 11 /* BGP route */
433: #define RTS_PIPE 12 /* Inter-table wormhole */
434: #define RTS_BABEL 13 /* Babel route */
435: #define RTS_RPKI 14 /* Route Origin Authorization */
436: #define RTS_PERF 15 /* Perf checker */
437: #define RTS_MAX 16
438:
439: #define RTC_UNICAST 0
440: #define RTC_BROADCAST 1
441: #define RTC_MULTICAST 2
442: #define RTC_ANYCAST 3 /* IPv6 Anycast */
443:
444: #define RTD_NONE 0 /* Undefined next hop */
445: #define RTD_UNICAST 1 /* Next hop is neighbor router */
446: #define RTD_BLACKHOLE 2 /* Silently drop packets */
447: #define RTD_UNREACHABLE 3 /* Reject as unreachable */
448: #define RTD_PROHIBIT 4 /* Administratively prohibited */
449: #define RTD_MAX 5
450:
451: /* Flags for net->n.flags, used by kernel syncer */
452: #define KRF_INSTALLED 0x80 /* This route should be installed in the kernel */
453: #define KRF_SYNC_ERROR 0x40 /* Error during kernel table synchronization */
454:
455: #define RTAF_CACHED 1 /* This is a cached rta */
456:
457: #define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
458: protocol-specific metric is availabe */
459:
460:
461: const char * rta_dest_names[RTD_MAX];
462:
463: static inline const char *rta_dest_name(uint n)
464: { return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
465:
466: /* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
467: static inline int rte_is_reachable(rte *r)
468: { return r->attrs->dest == RTD_UNICAST; }
469:
470:
471: /*
472: * Extended Route Attributes
473: */
474:
475: typedef struct eattr {
476: word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
477: byte flags; /* Protocol-dependent flags */
478: byte type; /* Attribute type and several flags (EAF_...) */
479: union {
480: u32 data;
481: const struct adata *ptr; /* Attribute data elsewhere */
482: } u;
483: } eattr;
484:
485:
486: #define EA_CODE(proto,id) (((proto) << 8) | (id))
487: #define EA_ID(ea) ((ea) & 0xff)
488: #define EA_PROTO(ea) ((ea) >> 8)
489: #define EA_ID_FLAG(ea) (1 << EA_ID(ea))
490: #define EA_CUSTOM(id) ((id) | EA_CUSTOM_BIT)
491: #define EA_IS_CUSTOM(ea) ((ea) & EA_CUSTOM_BIT)
492: #define EA_CUSTOM_ID(ea) ((ea) & ~EA_CUSTOM_BIT)
493:
494: const char *ea_custom_name(uint ea);
495:
496: #define EA_GEN_IGP_METRIC EA_CODE(PROTOCOL_NONE, 0)
497:
498: #define EA_CODE_MASK 0xffff
499: #define EA_CUSTOM_BIT 0x8000
500: #define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
501: #define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
502: #define EA_BIT_GET(ea) ((ea) >> 24)
503:
504: #define EAF_TYPE_MASK 0x1f /* Mask with this to get type */
505: #define EAF_TYPE_INT 0x01 /* 32-bit unsigned integer number */
506: #define EAF_TYPE_OPAQUE 0x02 /* Opaque byte string (not filterable) */
507: #define EAF_TYPE_IP_ADDRESS 0x04 /* IP address */
508: #define EAF_TYPE_ROUTER_ID 0x05 /* Router ID (IPv4 address) */
509: #define EAF_TYPE_AS_PATH 0x06 /* BGP AS path (encoding per RFC 1771:4.3) */
510: #define EAF_TYPE_BITFIELD 0x09 /* 32-bit embedded bitfield */
511: #define EAF_TYPE_INT_SET 0x0a /* Set of u32's (e.g., a community list) */
512: #define EAF_TYPE_EC_SET 0x0e /* Set of pairs of u32's - ext. community list */
513: #define EAF_TYPE_LC_SET 0x12 /* Set of triplets of u32's - large community list */
514: #define EAF_TYPE_UNDEF 0x1f /* `force undefined' entry */
515: #define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
516: #define EAF_VAR_LENGTH 0x02 /* Attribute length is variable (part of type spec) */
517: #define EAF_ORIGINATED 0x20 /* The attribute has originated locally */
518: #define EAF_FRESH 0x40 /* An uncached attribute (e.g. modified in export filter) */
519:
520: typedef struct adata {
521: uint length; /* Length of data */
522: byte data[0];
523: } adata;
524:
525: extern const adata null_adata; /* adata of length 0 */
526:
527: static inline struct adata *
528: lp_alloc_adata(struct linpool *pool, uint len)
529: {
530: struct adata *ad = lp_alloc(pool, sizeof(struct adata) + len);
531: ad->length = len;
532: return ad;
533: }
534:
535: static inline int adata_same(const struct adata *a, const struct adata *b)
536: { return (a->length == b->length && !memcmp(a->data, b->data, a->length)); }
537:
538:
539: typedef struct ea_list {
540: struct ea_list *next; /* In case we have an override list */
541: byte flags; /* Flags: EALF_... */
542: byte rfu;
543: word count; /* Number of attributes */
544: eattr attrs[0]; /* Attribute definitions themselves */
545: } ea_list;
546:
547: #define EALF_SORTED 1 /* Attributes are sorted by code */
548: #define EALF_BISECT 2 /* Use interval bisection for searching */
549: #define EALF_CACHED 4 /* Attributes belonging to cached rta */
550: #define EALF_TEMP 8 /* Temporary ea_list added by make_tmp_attrs hooks */
551:
552: struct rte_src *rt_find_source(struct proto *p, u32 id);
553: struct rte_src *rt_get_source(struct proto *p, u32 id);
554: static inline void rt_lock_source(struct rte_src *src) { src->uc++; }
555: static inline void rt_unlock_source(struct rte_src *src) { src->uc--; }
556: void rt_prune_sources(void);
557:
558: struct ea_walk_state {
559: ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
560: eattr *ea; /* Current eattr, initially NULL */
561: u32 visited[4]; /* Bitfield, limiting max to 128 */
562: };
563:
564: eattr *ea_find(ea_list *, unsigned ea);
565: eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
566: int ea_get_int(ea_list *, unsigned ea, int def);
567: void ea_dump(ea_list *);
568: void ea_sort(ea_list *); /* Sort entries in all sub-lists */
569: unsigned ea_scan(ea_list *); /* How many bytes do we need for merged ea_list */
570: void ea_merge(ea_list *from, ea_list *to); /* Merge sub-lists to allocated buffer */
571: int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
572: uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
573: ea_list *ea_append(ea_list *to, ea_list *what);
574: void ea_format_bitfield(struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
575:
576: #define ea_normalize(ea) do { \
577: if (ea->next) { \
578: ea_list *t = alloca(ea_scan(ea)); \
579: ea_merge(ea, t); \
580: ea = t; \
581: } \
582: ea_sort(ea); \
583: if (ea->count == 0) \
584: ea = NULL; \
585: } while(0) \
586:
587: static inline eattr *
588: ea_set_attr(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, uintptr_t val)
589: {
590: ea_list *a = lp_alloc(pool, sizeof(ea_list) + sizeof(eattr));
591: eattr *e = &a->attrs[0];
592:
593: a->flags = EALF_SORTED;
594: a->count = 1;
595: a->next = *to;
596: *to = a;
597:
598: e->id = id;
599: e->type = type;
600: e->flags = flags;
601:
602: if (type & EAF_EMBEDDED)
603: e->u.data = (u32) val;
604: else
605: e->u.ptr = (struct adata *) val;
606:
607: return e;
608: }
609:
610: static inline void
611: ea_set_attr_u32(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, u32 val)
612: { ea_set_attr(to, pool, id, flags, type, (uintptr_t) val); }
613:
614: static inline void
615: ea_set_attr_ptr(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, struct adata *val)
616: { ea_set_attr(to, pool, id, flags, type, (uintptr_t) val); }
617:
618: static inline void
619: ea_set_attr_data(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, void *data, uint len)
620: {
621: struct adata *a = lp_alloc_adata(pool, len);
622: memcpy(a->data, data, len);
623: ea_set_attr(to, pool, id, flags, type, (uintptr_t) a);
624: }
625:
626:
627: #define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
628:
629: static inline size_t nexthop_size(const struct nexthop *nh)
630: { return sizeof(struct nexthop) + sizeof(u32)*nh->labels; }
631: int nexthop__same(struct nexthop *x, struct nexthop *y); /* Compare multipath nexthops */
632: static inline int nexthop_same(struct nexthop *x, struct nexthop *y)
633: { return (x == y) || nexthop__same(x, y); }
634: struct nexthop *nexthop_merge(struct nexthop *x, struct nexthop *y, int rx, int ry, int max, linpool *lp);
635: struct nexthop *nexthop_sort(struct nexthop *x);
636: static inline void nexthop_link(struct rta *a, struct nexthop *from)
637: { memcpy(&a->nh, from, nexthop_size(from)); }
638: void nexthop_insert(struct nexthop **n, struct nexthop *y);
639: int nexthop_is_sorted(struct nexthop *x);
640:
641: void rta_init(void);
642: static inline size_t rta_size(const rta *a) { return sizeof(rta) + sizeof(u32)*a->nh.labels; }
643: #define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
644: rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
645: static inline int rta_is_cached(rta *r) { return r->aflags & RTAF_CACHED; }
646: static inline rta *rta_clone(rta *r) { r->uc++; return r; }
647: void rta__free(rta *r);
648: static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
649: rta *rta_do_cow(rta *o, linpool *lp);
650: static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
651: void rta_dump(rta *);
652: void rta_dump_all(void);
653: void rta_show(struct cli *, rta *);
654:
655: u32 rt_get_igp_metric(rte *rt);
656: struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
657: void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls);
658:
659: static inline void
660: rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, mpls_label_stack *mls)
661: {
662: rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), mls);
663: }
664:
665: /*
666: * rta_set_recursive_next_hop() acquires hostentry from hostcache and fills
667: * rta->hostentry field. New hostentry has zero use count. Cached rta locks its
668: * hostentry (increases its use count), uncached rta does not lock it. Hostentry
669: * with zero use count is removed asynchronously during host cache update,
670: * therefore it is safe to hold such hostentry temorarily. Hostentry holds a
671: * lock for a 'source' rta, mainly to share multipath nexthops.
672: *
673: * There is no need to hold a lock for hostentry->dep table, because that table
674: * contains routes responsible for that hostentry, and therefore is non-empty if
675: * given hostentry has non-zero use count. If the hostentry has zero use count,
676: * the entry is removed before dep is referenced.
677: *
678: * The protocol responsible for routes with recursive next hops should hold a
679: * lock for a 'source' table governing that routes (argument tab to
680: * rta_set_recursive_next_hop()), because its routes reference hostentries
681: * (through rta) related to the governing table. When all such routes are
682: * removed, rtas are immediately removed achieving zero uc. Then the 'source'
683: * table lock could be immediately released, although hostentries may still
684: * exist - they will be freed together with the 'source' table.
685: */
686:
687: static inline void rt_lock_hostentry(struct hostentry *he) { if (he) he->uc++; }
688: static inline void rt_unlock_hostentry(struct hostentry *he) { if (he) he->uc--; }
689:
690: /*
691: * Default protocol preferences
692: */
693:
694: #define DEF_PREF_DIRECT 240 /* Directly connected */
695: #define DEF_PREF_STATIC 200 /* Static route */
696: #define DEF_PREF_OSPF 150 /* OSPF intra-area, inter-area and type 1 external routes */
697: #define DEF_PREF_BABEL 130 /* Babel */
698: #define DEF_PREF_RIP 120 /* RIP */
699: #define DEF_PREF_BGP 100 /* BGP */
700: #define DEF_PREF_RPKI 100 /* RPKI */
701: #define DEF_PREF_INHERITED 10 /* Routes inherited from other routing daemons */
702:
703: /*
704: * Route Origin Authorization
705: */
706:
707: #define ROA_UNKNOWN 0
708: #define ROA_VALID 1
709: #define ROA_INVALID 2
710:
711: #endif
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>