File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / pimd / rp.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Mon Jun 12 07:59:37 2017 UTC (7 years ago) by misho
Branches: pimd, MAIN
CVS tags: v2_3_2, HEAD
pimd 2.3.2

    1: /*
    2:  * Copyright (c) 1998-2001
    3:  * University of Southern California/Information Sciences Institute.
    4:  * All rights reserved.
    5:  *
    6:  * Redistribution and use in source and binary forms, with or without
    7:  * modification, are permitted provided that the following conditions
    8:  * are met:
    9:  * 1. Redistributions of source code must retain the above copyright
   10:  *    notice, this list of conditions and the following disclaimer.
   11:  * 2. Redistributions in binary form must reproduce the above copyright
   12:  *    notice, this list of conditions and the following disclaimer in the
   13:  *    documentation and/or other materials provided with the distribution.
   14:  * 3. Neither the name of the project nor the names of its contributors
   15:  *    may be used to endorse or promote products derived from this software
   16:  *    without specific prior written permission.
   17:  *
   18:  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
   19:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
   22:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28:  * SUCH DAMAGE.
   29:  */
   30: 
   31: #include "defs.h"
   32: 
   33: 
   34: /*
   35:  * The hash function. Stollen from Eddy's (eddy@isi.edu)
   36:  * implementation (for compatibility ;)
   37:  */
   38: #define SEED1   1103515245
   39: #define SEED2   12345
   40: #define RP_HASH_VALUE(G, M, C) (((SEED1) * (((SEED1) * ((G) & (M)) + (SEED2)) ^ (C)) + (SEED2)) % 0x80000000)
   41: 
   42: cand_rp_t               *cand_rp_list;
   43: grp_mask_t              *grp_mask_list;
   44: cand_rp_t               *segmented_cand_rp_list;
   45: grp_mask_t              *segmented_grp_mask_list;
   46: uint16_t                 curr_bsr_fragment_tag;
   47: uint8_t                  curr_bsr_priority;
   48: uint32_t                 curr_bsr_address;
   49: uint32_t                 curr_bsr_hash_mask;
   50: uint16_t                 pim_bootstrap_timer;   /* For electing the BSR and
   51: 						 * sending Cand-RP-set msgs */
   52: uint8_t                  my_bsr_priority;
   53: uint32_t                 my_bsr_address;
   54: uint32_t                 my_bsr_hash_mask;
   55: uint8_t                  cand_bsr_flag = FALSE; /* Set to TRUE if I am
   56: 						 * a candidate BSR */
   57: uint32_t                 my_cand_rp_address;
   58: uint8_t                  my_cand_rp_priority;
   59: uint16_t                 my_cand_rp_holdtime;
   60: uint16_t                 my_cand_rp_adv_period; /* The locally configured
   61: 						 * Cand-RP adv. period. */
   62: uint16_t                 pim_cand_rp_adv_timer;
   63: uint8_t                  cand_rp_flag  = FALSE;  /* Candidate RP flag */
   64: struct cand_rp_adv_message_ cand_rp_adv_message;
   65: uint32_t                 rp_my_ipv4_hashmask;
   66: 
   67: 
   68: /*
   69:  * Local functions definition.
   70:  */
   71: static cand_rp_t  *add_cand_rp          (cand_rp_t **used_cand_rp_list, uint32_t address);
   72: static grp_mask_t *add_grp_mask         (grp_mask_t **used_grp_mask_list,
   73: 					 uint32_t group_addr,
   74: 					 uint32_t group_mask,
   75: 					 uint32_t hash_mask);
   76: static void       delete_grp_mask_entry (cand_rp_t **used_cand_rp_list,
   77: 					 grp_mask_t **used_grp_mask_list,
   78: 					 grp_mask_t *grp_mask_delete);
   79: static void       delete_rp_entry       (cand_rp_t **used_cand_rp_list,
   80: 					 grp_mask_t **used_grp_mask_list,
   81: 					 cand_rp_t *cand_rp_ptr);
   82: 
   83: 
   84: void init_rp_and_bsr(void)
   85: {
   86:     /* TODO: if the grplist is not NULL, remap all groups ASAP! */
   87:     delete_rp_list(&cand_rp_list, &grp_mask_list);
   88:     delete_rp_list(&segmented_cand_rp_list, &segmented_grp_mask_list);
   89: 
   90:     if (cand_bsr_flag == FALSE) {
   91: 	/*
   92: 	 * If I am not candidat BSR, initialize the "current BSR"
   93: 	 * as having the lowest priority.
   94: 	 */
   95: 	curr_bsr_fragment_tag = 0;
   96: 	curr_bsr_priority = 0;             /* Lowest priority */
   97: 	curr_bsr_address  = INADDR_ANY_N;  /* Lowest priority */
   98: 	MASKLEN_TO_MASK(RP_DEFAULT_IPV4_HASHMASKLEN, curr_bsr_hash_mask);
   99: 	SET_TIMER(pim_bootstrap_timer, PIM_BOOTSTRAP_TIMEOUT);
  100:     } else {
  101: 	curr_bsr_fragment_tag = RANDOM();
  102: 	curr_bsr_priority = my_bsr_priority;
  103: 	curr_bsr_address = my_bsr_address;
  104: 	curr_bsr_hash_mask = my_bsr_hash_mask;
  105: 	SET_TIMER(pim_bootstrap_timer, bootstrap_initial_delay());
  106:     }
  107: 
  108:     if (cand_rp_flag != FALSE) {
  109: 	MASKLEN_TO_MASK(RP_DEFAULT_IPV4_HASHMASKLEN, rp_my_ipv4_hashmask);
  110: 	/* Setup the Cand-RP-Adv-Timer */
  111: 	SET_TIMER(pim_cand_rp_adv_timer, RANDOM() % my_cand_rp_adv_period);
  112:     }
  113: }
  114: 
  115: 
  116: uint16_t bootstrap_initial_delay(void)
  117: {
  118:     uint32_t addr_delay;
  119:     uint32_t delay;
  120:     uint32_t log_mask;
  121:     int32_t  log_of_2;
  122:     uint8_t  best_priority;
  123: 
  124:     /*
  125:      * The bootstrap timer initial value (if Cand-BSR).
  126:      * It depends of the bootstrap router priority:
  127:      * higher priority has shorter value:
  128:      *
  129:      * delay = 5 + 2 * log_2(1 + best_priority - myPriority) + addr_delay;
  130:      *
  131:      *    best_priority = Max(storedPriority, myPriority);
  132:      *    if (best_priority == myPriority)
  133:      *        addr_delay = log_2(bestAddr - myAddr)/16;
  134:      *    else
  135:      *        addr_delay = 2 - (myAddr/2^31);
  136:      */
  137: 
  138:     best_priority = max(curr_bsr_priority, my_bsr_priority);
  139:     if (best_priority == my_bsr_priority) {
  140: 	addr_delay = ntohl(curr_bsr_address) - ntohl(my_bsr_address);
  141: 	/* Calculate the integer part of log_2 of (bestAddr - myAddr) */
  142: 	/* To do so, have to find the position number of the first bit
  143: 	 * from left which is `1`
  144: 	 */
  145: 	log_mask = sizeof(addr_delay) << 3;
  146: 	log_mask = (1 << (log_mask - 1));  /* Set the leftmost bit to `1` */
  147: 	for (log_of_2 = (sizeof(addr_delay) << 3) - 1 ; log_of_2; log_of_2--) {
  148: 	    if (addr_delay & log_mask)
  149: 		break;
  150: 
  151: 	    log_mask >>= 1;  /* Start shifting `1` on right */
  152: 	}
  153: 	addr_delay = log_of_2 / 16;
  154:     } else {
  155: 	addr_delay = 2 - (ntohl(my_bsr_address) / ( 1 << 31));
  156:     }
  157: 
  158:     delay = 1 + best_priority - my_bsr_priority;
  159:     /* Calculate log_2(delay) */
  160:     log_mask = sizeof(delay) << 3;
  161:     log_mask = (1 << (log_mask - 1));  /* Set the leftmost bit to `1` */
  162:     for (log_of_2 = (sizeof(delay) << 3) - 1 ; log_of_2; log_of_2--) {
  163: 	if (delay & log_mask)
  164: 	    break;
  165: 
  166: 	log_mask >>= 1;  /* Start shifting `1` on right */
  167:     }
  168: 
  169:     delay = 5 + 2 * log_of_2 + addr_delay;
  170: 
  171:     return (uint16_t)delay;
  172: }
  173: 
  174: 
  175: static cand_rp_t *add_cand_rp(cand_rp_t **used_cand_rp_list, uint32_t address)
  176: {
  177:     cand_rp_t *prev = NULL;
  178:     cand_rp_t *next;
  179:     cand_rp_t *ptr;
  180:     rpentry_t *entry;
  181:     uint32_t addr_h = ntohl(address);
  182: 
  183:     /* The ordering is the bigger first */
  184:     for (next = *used_cand_rp_list; next; prev = next, next = next->next) {
  185: 	if (ntohl(next->rpentry->address) > addr_h)
  186: 	    continue;
  187: 
  188: 	if (next->rpentry->address == address)
  189: 	    return next;
  190: 	else
  191: 	    break;
  192:     }
  193: 
  194:     /* Create and insert the new entry between prev and next */
  195:     ptr = (cand_rp_t *)calloc(1, sizeof(cand_rp_t));
  196:     if (!ptr)
  197: 	logit(LOG_ERR, 0, "Ran out of memory in add_cand_rp()");
  198:     ptr->rp_grp_next = NULL;
  199:     ptr->next = next;
  200:     ptr->prev = prev;
  201:     if (next)
  202: 	next->prev = ptr;
  203:     if (prev == NULL)
  204: 	*used_cand_rp_list = ptr;
  205:     else
  206: 	prev->next = ptr;
  207: 
  208:     entry = (rpentry_t *)calloc(1, sizeof(rpentry_t));
  209:     if (!entry)
  210: 	logit(LOG_ERR, 0, "Ran out of memory in add_cand_rp()");
  211:     ptr->rpentry = entry;
  212:     entry->next = NULL;
  213:     entry->prev  = NULL;
  214:     entry->address = address;
  215:     entry->mrtlink = NULL;
  216:     entry->incoming = NO_VIF;
  217:     entry->upstream = NULL;
  218:     /* TODO: setup the metric and the preference as ~0 (the lowest)? */
  219:     entry->metric = ~0;
  220:     entry->preference = ~0;
  221:     RESET_TIMER(entry->timer);
  222:     entry->cand_rp = ptr;
  223: 
  224:     /* TODO: XXX: check whether there is a route to that RP: if return value
  225:      * is FALSE, then no route.
  226:      */
  227:     if (local_address(entry->address) == NO_VIF)
  228: 	/* TODO: check for error and delete */
  229: 	set_incoming(entry, PIM_IIF_RP);
  230:     else
  231: 	/* TODO: XXX: CHECK!!! */
  232: 	entry->incoming = reg_vif_num;
  233: 
  234:     return ptr;
  235: }
  236: 
  237: 
  238: static grp_mask_t *add_grp_mask(grp_mask_t **used_grp_mask_list, uint32_t group_addr, uint32_t group_mask, uint32_t hash_mask)
  239: {
  240:     grp_mask_t *prev = NULL;
  241:     grp_mask_t *next;
  242:     grp_mask_t *ptr;
  243:     uint32_t prefix_h = ntohl(group_addr & group_mask);
  244: 
  245:     /* The ordering of group_addr is: bigger first */
  246:     for (next = *used_grp_mask_list; next; prev = next, next = next->next) {
  247: 	if (ntohl(next->group_addr & next->group_mask) > prefix_h)
  248: 	    continue;
  249: 
  250: 	/* The ordering of group_mask is: bigger (longer) first */
  251: 	if ((next->group_addr & next->group_mask) == (group_addr & group_mask)) {
  252: 	    if (ntohl(next->group_mask) > ntohl(group_mask))
  253: 		continue;
  254: 	    else if (next->group_mask == group_mask)
  255: 		return next;
  256: 	    else
  257: 		break;
  258: 	}
  259:     }
  260: 
  261:     ptr = (grp_mask_t *)calloc(1, sizeof(grp_mask_t));
  262:     if (!ptr)
  263: 	logit(LOG_ERR, 0, "Ran out of memory in add_grp_mask()");
  264: 
  265:     ptr->grp_rp_next = (rp_grp_entry_t *)NULL;
  266:     ptr->next = next;
  267:     ptr->prev = prev;
  268:     if (next)
  269: 	next->prev = ptr;
  270:     if (prev == NULL)
  271: 	*used_grp_mask_list = ptr;
  272:     else
  273: 	prev->next = ptr;
  274: 
  275:     ptr->group_addr = group_addr;
  276:     ptr->group_mask = group_mask;
  277:     ptr->hash_mask  = hash_mask;
  278:     ptr->group_rp_number = 0;
  279:     ptr->fragment_tag = 0;
  280: 
  281:     return ptr;
  282: }
  283: 
  284: 
  285: /* TODO: XXX: BUG: a remapping for some groups currently using some other
  286:  * grp_mask may be required by the addition of the new entry!!!
  287:  * Remapping all groups might be a costly process...
  288:  */
  289: rp_grp_entry_t *add_rp_grp_entry(cand_rp_t  **used_cand_rp_list,
  290: 				 grp_mask_t **used_grp_mask_list,
  291: 				 uint32_t rp_addr,
  292: 				 uint8_t  rp_priority,
  293: 				 uint16_t rp_holdtime,
  294: 				 uint32_t group_addr,
  295: 				 uint32_t group_mask,
  296: 				 uint32_t bsr_hash_mask,
  297: 				 uint16_t fragment_tag)
  298: {
  299:     cand_rp_t *cand_rp_ptr;
  300:     grp_mask_t *mask_ptr;
  301:     rpentry_t *rpentry_ptr;
  302:     rp_grp_entry_t *entry_next;
  303:     rp_grp_entry_t *entry_new;
  304:     rp_grp_entry_t *entry_prev = NULL;
  305:     grpentry_t *grpentry_ptr_prev;
  306:     grpentry_t *grpentry_ptr_next;
  307:     uint32_t rp_addr_h;
  308:     uint8_t old_highest_priority = ~0;  /* Smaller value means "higher" */
  309: 
  310:     /* Input data verification */
  311:     if (!inet_valid_host(rp_addr))
  312: 	return NULL;
  313:     if (!IN_CLASSD(ntohl(group_addr)))
  314: 	return NULL;
  315: 
  316:     mask_ptr = add_grp_mask(used_grp_mask_list, group_addr, group_mask, bsr_hash_mask);
  317:     if (mask_ptr == NULL)
  318: 	return NULL;
  319: 
  320: /* TODO: delete */
  321: #if 0
  322:     if (mask_ptr->grp_rp_next) {
  323: 	/* Check for obsolete grp_rp chain */
  324: 	if ((my_bsr_address != curr_bsr_address) && (mask_ptr->grp_rp_next->fragment_tag != fragment_tag)) {
  325: 	    /* This grp_rp chain is obsolete. Delete it. */
  326: 	    delete_grp_mask(used_cand_rp_list, used_grp_mask_list, group_addr, group_mask);
  327: 	    mask_ptr = add_grp_mask(used_grp_mask_list, group_addr, group_mask, bsr_hash_mask);
  328: 
  329: 	    if (mask_ptr == NULL)
  330: 		return NULL;
  331: 	}
  332:     }
  333: #endif /* 0 */
  334: 
  335:     cand_rp_ptr = add_cand_rp(used_cand_rp_list, rp_addr);
  336:     if (cand_rp_ptr == NULL) {
  337: 	if (mask_ptr->grp_rp_next == NULL)
  338: 	    delete_grp_mask(used_cand_rp_list, used_grp_mask_list,
  339: 			    group_addr, group_mask);
  340: 	return NULL;
  341:     }
  342: 
  343:     rpentry_ptr = cand_rp_ptr->rpentry;
  344:     SET_TIMER(rpentry_ptr->timer, rp_holdtime);
  345:     rp_addr_h = ntohl(rp_addr);
  346:     mask_ptr->fragment_tag = fragment_tag;   /* For garbage collection */
  347: 
  348:     entry_prev = NULL;
  349:     entry_next = mask_ptr->grp_rp_next;
  350:     /* TODO: improve it */
  351:     if (entry_next != NULL)
  352: 	old_highest_priority = entry_next->priority;
  353:     for ( ; entry_next; entry_prev = entry_next,
  354: 	      entry_next = entry_next->grp_rp_next) {
  355: 	/* Smaller value means higher priority. The entries are
  356: 	 * sorted with the highest priority first.
  357: 	 */
  358: 	if (entry_next->priority < rp_priority)
  359: 	    continue;
  360: 	if (entry_next->priority > rp_priority)
  361: 	    break;
  362: 
  363: 	/*
  364: 	 * Here we don't care about higher/lower addresses, because
  365: 	 * higher address does not guarantee higher hash_value,
  366: 	 * but anyway we do order with the higher address first,
  367: 	 * so it will be easier to find an existing entry and update the
  368: 	 * holdtime.
  369: 	 */
  370: 	if (ntohl(entry_next->rp->rpentry->address) > rp_addr_h)
  371: 	    continue;
  372: 	if (ntohl(entry_next->rp->rpentry->address) < rp_addr_h)
  373: 	    break;
  374: 	/* We already have this entry. Update the holdtime */
  375: 	/* TODO: We shoudn't have old existing entry, because with the
  376: 	 * current implementation all of them will be deleted
  377: 	 * (different fragment_tag). Debug and check and eventually
  378: 	 * delete.
  379: 	 */
  380: 	entry_next->holdtime = rp_holdtime;
  381: 	entry_next->fragment_tag = fragment_tag;
  382: 
  383: 	return entry_next;
  384:     }
  385: 
  386:     /* Create and link the new entry */
  387:     entry_new = (rp_grp_entry_t *)calloc(1, sizeof(rp_grp_entry_t));
  388:     if (!entry_new)
  389: 	logit(LOG_ERR, 0, "Ran out of memory in add_rp_grp_entry()");
  390:     entry_new->grp_rp_next = entry_next;
  391:     entry_new->grp_rp_prev = entry_prev;
  392:     if (entry_next)
  393: 	entry_next->grp_rp_prev = entry_new;
  394:     if (entry_prev == NULL)
  395: 	mask_ptr->grp_rp_next = entry_new;
  396:     else
  397: 	entry_prev->grp_rp_next = entry_new;
  398: 
  399:     /*
  400:      * The rp_grp_entry chain is not ordered, so just plug
  401:      * the new entry at the head.
  402:      */
  403:     entry_new->rp_grp_next = cand_rp_ptr->rp_grp_next;
  404:     if (cand_rp_ptr->rp_grp_next)
  405: 	cand_rp_ptr->rp_grp_next->rp_grp_prev = entry_new;
  406:     entry_new->rp_grp_prev = NULL;
  407:     cand_rp_ptr->rp_grp_next = entry_new;
  408: 
  409:     entry_new->holdtime = rp_holdtime;
  410:     entry_new->fragment_tag = fragment_tag;
  411:     entry_new->priority = rp_priority;
  412:     entry_new->group = mask_ptr;
  413:     entry_new->rp = cand_rp_ptr;
  414:     entry_new->grplink = NULL;
  415: 
  416:     /* If I am BSR candidate and rp_addr is NOT hacked SSM address, then log it */
  417:     if( cand_bsr_flag && rp_addr != 0x0100fea9 ) {
  418: 	uint32_t mask;
  419: 	MASK_TO_MASKLEN(group_mask, mask);
  420: 	logit(LOG_INFO, 0, "New RP candidate %s for group %s/%d, priority %d",
  421: 	      inet_fmt(rp_addr, s1, sizeof(s1)), inet_fmt(group_addr, s2, sizeof(s2)), mask, rp_priority);
  422:     }
  423: 
  424:     mask_ptr->group_rp_number++;
  425: 
  426:     if (mask_ptr->grp_rp_next->priority == rp_priority) {
  427: 	/* The first entries are with the best priority. */
  428: 	/* Adding this rp_grp_entry may result in group_to_rp remapping */
  429: 	for (entry_next = mask_ptr->grp_rp_next; entry_next; entry_next = entry_next->grp_rp_next) {
  430: 	    if (entry_next->priority > old_highest_priority)
  431: 		break;
  432: 
  433: 	    for (grpentry_ptr_prev = entry_next->grplink; grpentry_ptr_prev; ) {
  434: 		grpentry_ptr_next = grpentry_ptr_prev->rpnext;
  435: 		remap_grpentry(grpentry_ptr_prev);
  436: 		grpentry_ptr_prev = grpentry_ptr_next;
  437: 	    }
  438: 	}
  439:     }
  440: 
  441:     return entry_new;
  442: }
  443: 
  444: 
  445: void delete_rp_grp_entry(cand_rp_t **used_cand_rp_list, grp_mask_t **used_grp_mask_list, rp_grp_entry_t *entry)
  446: {
  447:     grpentry_t *ptr;
  448:     grpentry_t *ptr_next;
  449: 
  450:     if (entry == NULL)
  451: 	return;
  452:     entry->group->group_rp_number--;
  453: 
  454:     /* Free the rp_grp* and grp_rp* links */
  455:     if (entry->rp_grp_prev)
  456: 	entry->rp_grp_prev->rp_grp_next = entry->rp_grp_next;
  457:     else
  458: 	entry->rp->rp_grp_next = entry->rp_grp_next;
  459:     if (entry->rp_grp_next)
  460: 	entry->rp_grp_next->rp_grp_prev = entry->rp_grp_prev;
  461: 
  462:     if (entry->grp_rp_prev)
  463: 	entry->grp_rp_prev->grp_rp_next = entry->grp_rp_next;
  464:     else
  465: 	entry->group->grp_rp_next = entry->grp_rp_next;
  466: 
  467:     if (entry->grp_rp_next)
  468: 	entry->grp_rp_next->grp_rp_prev = entry->grp_rp_prev;
  469: 
  470:     /* Delete Cand-RP or Group-prefix if useless */
  471:     if (entry->group->grp_rp_next == NULL)
  472: 	delete_grp_mask_entry(used_cand_rp_list, used_grp_mask_list, entry->group);
  473: 
  474:     if (entry->rp->rp_grp_next == NULL)
  475: 	delete_rp_entry(used_cand_rp_list, used_grp_mask_list, entry->rp);
  476: 
  477:     /* Remap all affected groups */
  478:     for (ptr = entry->grplink; ptr; ptr = ptr_next) {
  479: 	ptr_next = ptr->rpnext;
  480: 	remap_grpentry(ptr);
  481:     }
  482: 
  483:     free((char *)entry);
  484: }
  485: 
  486: /* TODO: XXX: the affected group entries will be partially
  487:  * setup, because may have group routing entry, but NULL pointers to RP.
  488:  * After the call to this function, must remap all group entries ASAP.
  489:  */
  490: void delete_rp_list(cand_rp_t  **used_cand_rp_list, grp_mask_t **used_grp_mask_list)
  491: {
  492:     cand_rp_t      *cand_ptr,  *cand_next;
  493:     rp_grp_entry_t *entry_ptr, *entry_next;
  494:     grp_mask_t     *mask_ptr, *mask_next;
  495:     grpentry_t     *gentry_ptr, *gentry_ptr_next;
  496: 
  497:     for (cand_ptr = *used_cand_rp_list; cand_ptr; ) {
  498: 	cand_next = cand_ptr->next;
  499: 
  500: 	/* Free the mrtentry (if any) for this RP */
  501: 	if (cand_ptr->rpentry->mrtlink) {
  502: 	    if (cand_ptr->rpentry->mrtlink->flags & MRTF_KERNEL_CACHE)
  503: 		delete_mrtentry_all_kernel_cache(cand_ptr->rpentry->mrtlink);
  504: 	    FREE_MRTENTRY(cand_ptr->rpentry->mrtlink);
  505: 	}
  506: 	free(cand_ptr->rpentry);
  507: 
  508: 	/* Free the whole chain of entry for this RP */
  509: 	for (entry_ptr = cand_ptr->rp_grp_next; entry_ptr; entry_ptr = entry_next) {
  510: 	    entry_next = entry_ptr->rp_grp_next;
  511: 
  512: 	    /* Clear the RP related invalid pointers for all group entries */
  513: 	    for (gentry_ptr = entry_ptr->grplink; gentry_ptr; gentry_ptr = gentry_ptr_next) {
  514: 		gentry_ptr_next = gentry_ptr->rpnext;
  515: 		gentry_ptr->rpnext = NULL;
  516: 		gentry_ptr->rpprev = NULL;
  517: 		gentry_ptr->active_rp_grp = NULL;
  518: 		gentry_ptr->rpaddr = INADDR_ANY_N;
  519: 	    }
  520: 
  521: 	    free(entry_ptr);
  522: 	}
  523: 
  524: 	free(cand_ptr);
  525: 	cand_ptr = cand_next;
  526:     }
  527:     *used_cand_rp_list = NULL;
  528: 
  529:     for (mask_ptr = *used_grp_mask_list; mask_ptr; mask_ptr = mask_next) {
  530: 	mask_next = mask_ptr->next;
  531: 	free(mask_ptr);
  532:     }
  533:     *used_grp_mask_list = NULL;
  534: }
  535: 
  536: 
  537: void delete_grp_mask(cand_rp_t **used_cand_rp_list, grp_mask_t **used_grp_mask_list, uint32_t group_addr, uint32_t group_mask)
  538: {
  539:     grp_mask_t *ptr;
  540:     uint32_t prefix_h = ntohl(group_addr & group_mask);
  541: 
  542:     for (ptr = *used_grp_mask_list; ptr; ptr = ptr->next) {
  543: 	if (ntohl(ptr->group_addr & ptr->group_mask) > prefix_h)
  544: 	    continue;
  545: 
  546: 	if (ptr->group_addr == group_addr) {
  547: 	    if (ntohl(ptr->group_mask) > ntohl(group_mask))
  548: 		continue;
  549: 	    else if (ptr->group_mask == group_mask)
  550: 		break;
  551: 	    else
  552: 		return;   /* Not found */
  553: 	}
  554:     }
  555: 
  556:     if (ptr == (grp_mask_t *)NULL)
  557: 	return;       /* Not found */
  558: 
  559:     delete_grp_mask_entry(used_cand_rp_list, used_grp_mask_list, ptr);
  560: }
  561: 
  562: static void delete_grp_mask_entry(cand_rp_t **used_cand_rp_list, grp_mask_t **used_grp_mask_list, grp_mask_t *grp_mask_delete)
  563: {
  564:     grpentry_t *grp_ptr, *grp_ptr_next;
  565:     rp_grp_entry_t *entry_ptr;
  566:     rp_grp_entry_t *entry_next;
  567: 
  568:     if (grp_mask_delete == NULL)
  569: 	return;
  570: 
  571:     /* Remove from the grp_mask_list first */
  572:     if (grp_mask_delete->prev)
  573: 	grp_mask_delete->prev->next = grp_mask_delete->next;
  574:     else
  575: 	*used_grp_mask_list = grp_mask_delete->next;
  576: 
  577:     if (grp_mask_delete->next)
  578: 	grp_mask_delete->next->prev = grp_mask_delete->prev;
  579: 
  580:     /* Remove all grp_rp entries for this grp_mask */
  581:     for (entry_ptr = grp_mask_delete->grp_rp_next; entry_ptr; entry_ptr = entry_next) {
  582: 	entry_next = entry_ptr->grp_rp_next;
  583: 
  584: 	/* Remap all related grpentry */
  585: 	for (grp_ptr = entry_ptr->grplink; grp_ptr; grp_ptr = grp_ptr_next) {
  586: 	    grp_ptr_next = grp_ptr->rpnext;
  587: 	    remap_grpentry(grp_ptr);
  588: 	}
  589: 
  590: 	if (entry_ptr->rp_grp_prev != (rp_grp_entry_t *)NULL)
  591: 	    entry_ptr->rp_grp_prev->rp_grp_next = entry_ptr->rp_grp_next;
  592: 	else
  593: 	    entry_ptr->rp->rp_grp_next = entry_ptr->rp_grp_next;
  594: 
  595: 	if (entry_ptr->rp_grp_next != NULL)
  596: 	    entry_ptr->rp_grp_next->rp_grp_prev = entry_ptr->rp_grp_prev;
  597: 
  598: 	/* Delete the RP entry */
  599: 	if (entry_ptr->rp->rp_grp_next == NULL)
  600: 	    delete_rp_entry(used_cand_rp_list, used_grp_mask_list, entry_ptr->rp);
  601: 
  602: 	free(entry_ptr);
  603:     }
  604: 
  605:     free(grp_mask_delete);
  606: }
  607: 
  608: /*
  609:  * TODO: currently not used.
  610:  */
  611: void delete_rp(cand_rp_t **used_cand_rp_list, grp_mask_t **used_grp_mask_list, uint32_t rp_addr)
  612: {
  613:     cand_rp_t *ptr;
  614:     uint32_t rp_addr_h = ntohl(rp_addr);
  615: 
  616:     for(ptr = *used_cand_rp_list; ptr != NULL; ptr = ptr->next) {
  617: 	if (ntohl(ptr->rpentry->address) > rp_addr_h)
  618: 	    continue;
  619: 
  620: 	if (ptr->rpentry->address == rp_addr)
  621: 	    break;
  622: 	else
  623: 	    return;   /* Not found */
  624:     }
  625: 
  626:     if (ptr == NULL)
  627: 	return;       /* Not found */
  628: 
  629:     delete_rp_entry(used_cand_rp_list, used_grp_mask_list, ptr);
  630: }
  631: 
  632: 
  633: static void delete_rp_entry(cand_rp_t **used_cand_rp_list, grp_mask_t **used_grp_mask_list, cand_rp_t *cand_rp_delete)
  634: {
  635:     rp_grp_entry_t *entry_ptr;
  636:     rp_grp_entry_t *entry_next;
  637:     grpentry_t *grp_ptr;
  638:     grpentry_t *grp_ptr_next;
  639: 
  640:     if (cand_rp_delete == NULL)
  641: 	return;
  642: 
  643:     /* Remove from the cand-RP chain */
  644:     if (cand_rp_delete->prev)
  645: 	cand_rp_delete->prev->next = cand_rp_delete->next;
  646:     else
  647: 	*used_cand_rp_list = cand_rp_delete->next;
  648: 
  649:     if (cand_rp_delete->next)
  650: 	cand_rp_delete->next->prev = cand_rp_delete->prev;
  651: 
  652:     if (cand_rp_delete->rpentry->mrtlink) {
  653: 	if (cand_rp_delete->rpentry->mrtlink->flags & MRTF_KERNEL_CACHE)
  654: 	    delete_mrtentry_all_kernel_cache(cand_rp_delete->rpentry->mrtlink);
  655: 
  656: 	FREE_MRTENTRY(cand_rp_delete->rpentry->mrtlink);
  657:     }
  658:     free ((char *)cand_rp_delete->rpentry);
  659: 
  660:     /* Remove all rp_grp entries for this RP */
  661:     for (entry_ptr = cand_rp_delete->rp_grp_next; entry_ptr; entry_ptr = entry_next) {
  662: 	entry_next = entry_ptr->rp_grp_next;
  663: 	entry_ptr->group->group_rp_number--;
  664: 
  665: 	/* First take care of the grp_rp chain */
  666: 	if (entry_ptr->grp_rp_prev)
  667: 	    entry_ptr->grp_rp_prev->grp_rp_next = entry_ptr->grp_rp_next;
  668: 	else
  669: 	    entry_ptr->group->grp_rp_next = entry_ptr->grp_rp_next;
  670: 
  671: 	if (entry_ptr->grp_rp_next)
  672: 	    entry_ptr->grp_rp_next->grp_rp_prev = entry_ptr->grp_rp_prev;
  673: 
  674: 	if (entry_ptr->grp_rp_next == NULL)
  675: 	    delete_grp_mask_entry(used_cand_rp_list, used_grp_mask_list, entry_ptr->group);
  676: 
  677: 	/* Remap the related groups */
  678: 	for (grp_ptr = entry_ptr->grplink; grp_ptr; grp_ptr = grp_ptr_next) {
  679: 	    grp_ptr_next = grp_ptr->rpnext;
  680: 	    remap_grpentry(grp_ptr);
  681: 	}
  682: 
  683: 	free(entry_ptr);
  684:     }
  685: 
  686:     free((char *)cand_rp_delete);
  687: }
  688: 
  689: 
  690: /*
  691:  * Rehash the RP for the group.
  692:  * XXX: currently, every time when remap_grpentry() is called, there has
  693:  * being a good reason to change the RP, so for performancy reasons
  694:  * no check is performed whether the RP will be really different one.
  695:  */
  696: int remap_grpentry(grpentry_t *grpentry_ptr)
  697: {
  698:     rpentry_t *rpentry_ptr;
  699:     rp_grp_entry_t *entry_ptr;
  700:     mrtentry_t *grp_route;
  701:     mrtentry_t *mrtentry_ptr;
  702: 
  703:     if (grpentry_ptr == NULL)
  704: 	return FALSE;
  705: 
  706:     /* Remove from the list of all groups matching to the same RP */
  707:     if (grpentry_ptr->rpprev) {
  708: 	grpentry_ptr->rpprev->rpnext = grpentry_ptr->rpnext;
  709:     } else {
  710: 	if (grpentry_ptr->active_rp_grp)
  711: 	    grpentry_ptr->active_rp_grp->grplink = grpentry_ptr->rpnext;
  712:     }
  713: 
  714:     if (grpentry_ptr->rpnext)
  715: 	grpentry_ptr->rpnext->rpprev = grpentry_ptr->rpprev;
  716: 
  717:     entry_ptr = rp_grp_match(grpentry_ptr->group);
  718:     if (entry_ptr == NULL) {
  719: 	/* If cannot remap, delete the group */
  720: 	delete_grpentry(grpentry_ptr);
  721: 	return FALSE;
  722:     }
  723:     rpentry_ptr = entry_ptr->rp->rpentry;
  724: 
  725:     /* Add to the new chain of all groups mapping to the same RP */
  726:     grpentry_ptr->rpaddr  = rpentry_ptr->address;
  727:     grpentry_ptr->active_rp_grp = entry_ptr;
  728:     grpentry_ptr->rpnext = entry_ptr->grplink;
  729:     if (grpentry_ptr->rpnext)
  730: 	grpentry_ptr->rpnext->rpprev = grpentry_ptr;
  731:     grpentry_ptr->rpprev = NULL;
  732:     entry_ptr->grplink = grpentry_ptr;
  733: 
  734:     grp_route = grpentry_ptr->grp_route;
  735:     if (grp_route) {
  736: 	grp_route->upstream   = rpentry_ptr->upstream;
  737: 	grp_route->metric     = rpentry_ptr->metric;
  738: 	grp_route->preference = rpentry_ptr->preference;
  739: 	change_interfaces(grp_route, rpentry_ptr->incoming,
  740: 			  grp_route->joined_oifs,
  741: 			  grp_route->pruned_oifs,
  742: 			  grp_route->leaves,
  743: 			  grp_route->asserted_oifs, MFC_UPDATE_FORCE);
  744:     }
  745: 
  746:     for (mrtentry_ptr = grpentry_ptr->mrtlink; mrtentry_ptr; mrtentry_ptr = mrtentry_ptr->grpnext) {
  747: 	if (!(mrtentry_ptr->flags & MRTF_RP))
  748: 	    continue;
  749: 
  750: 	mrtentry_ptr->upstream = rpentry_ptr->upstream;
  751: 	mrtentry_ptr->metric   = rpentry_ptr->metric;
  752: 	mrtentry_ptr->preference = rpentry_ptr->preference;
  753: 	change_interfaces(mrtentry_ptr, rpentry_ptr->incoming,
  754: 			  mrtentry_ptr->joined_oifs,
  755: 			  mrtentry_ptr->pruned_oifs,
  756: 			  mrtentry_ptr->leaves,
  757: 			  mrtentry_ptr->asserted_oifs, MFC_UPDATE_FORCE);
  758:     }
  759: 
  760:     return TRUE;
  761: }
  762: 
  763: 
  764: rpentry_t *rp_match(uint32_t group)
  765: {
  766:     rp_grp_entry_t *ptr;
  767: 
  768:     ptr = rp_grp_match(group);
  769:     if (ptr)
  770: 	return ptr->rp->rpentry;
  771: 
  772:     return NULL;
  773: }
  774: 
  775: rp_grp_entry_t *rp_grp_match(uint32_t group)
  776: {
  777:     grp_mask_t *mask_ptr;
  778:     rp_grp_entry_t *entry_ptr;
  779:     rp_grp_entry_t *best_entry = NULL;
  780:     uint8_t best_priority       = ~0; /* Smaller is better */
  781:     uint32_t best_hash_value    = 0;  /* Bigger is better */
  782:     uint32_t best_address_h     = 0;  /* Bigger is better */
  783:     uint32_t curr_hash_value    = 0;
  784:     uint32_t curr_address_h     = 0;
  785:     uint32_t group_h            = ntohl(group);
  786: 
  787:     if (grp_mask_list == NULL)
  788: 	return NULL;
  789: 
  790:     for (mask_ptr = grp_mask_list; mask_ptr; mask_ptr = mask_ptr->next) {
  791: 	/* Search the grp_mask (group-prefix) list */
  792: 	if ((group_h & ntohl(mask_ptr->group_mask))
  793: 	    != ntohl(mask_ptr->group_mask & mask_ptr->group_addr))
  794: 	    continue;
  795: 
  796: 	for (entry_ptr = mask_ptr->grp_rp_next; entry_ptr; entry_ptr = entry_ptr->grp_rp_next) {
  797: 	    if (best_priority < entry_ptr->priority)
  798: 		break;
  799: 
  800: 	    curr_hash_value = RP_HASH_VALUE(group_h, mask_ptr->hash_mask, curr_address_h);
  801: 	    curr_address_h = ntohl(entry_ptr->rp->rpentry->address);
  802: 
  803: 	    if (best_priority == entry_ptr->priority) {
  804: 		/* Compare the hash_value and then the addresses */
  805: 		if (curr_hash_value < best_hash_value)
  806: 		    continue;
  807: 
  808: 		if (curr_hash_value == best_hash_value) {
  809: 		    if (curr_address_h < best_address_h)
  810: 			continue;
  811: 		}
  812: 	    }
  813: 
  814: 	    /* The current entry in the loop is preferred */
  815: 	    best_entry = entry_ptr;
  816: 	    best_priority = best_entry->priority;
  817: 	    best_address_h = curr_address_h;
  818: 	    best_hash_value = curr_hash_value;
  819: 	}
  820:     }
  821: 
  822:     return best_entry;
  823: }
  824: 
  825: 
  826: rpentry_t *rp_find(uint32_t rp_address)
  827: {
  828:     cand_rp_t *cand_rp_ptr;
  829:     uint32_t address_h = ntohl(rp_address);
  830: 
  831:     for(cand_rp_ptr = cand_rp_list; cand_rp_ptr != NULL; cand_rp_ptr = cand_rp_ptr->next) {
  832: 	if (ntohl(cand_rp_ptr->rpentry->address) > address_h)
  833: 	    continue;
  834: 
  835: 	if (cand_rp_ptr->rpentry->address == rp_address)
  836: 	    return cand_rp_ptr->rpentry;
  837: 
  838: 	return NULL;
  839:     }
  840: 
  841:     return NULL;
  842: }
  843: 
  844: 
  845: /*
  846:  * Create a bootstrap message in "send_buff" and returns the data size
  847:  * (excluding the IP header and the PIM header) Can be used both by the
  848:  * Bootstrap router to multicast the RP-set or by the DR to unicast it to
  849:  * a new neighbor. It DOES NOT change any timers.
  850:  */
  851: int create_pim_bootstrap_message(char *send_buff)
  852: {
  853:     uint8_t *data_ptr;
  854:     grp_mask_t *mask_ptr;
  855:     rp_grp_entry_t *entry_ptr;
  856:     int datalen;
  857:     uint8_t masklen;
  858: 
  859:     if (curr_bsr_address == INADDR_ANY_N)
  860: 	return 0;
  861: 
  862:     data_ptr = (uint8_t *)(send_buff + sizeof(struct ip) + sizeof(pim_header_t));
  863:     if (curr_bsr_address == my_bsr_address)
  864: 	curr_bsr_fragment_tag++;
  865: 
  866:     PUT_HOSTSHORT(curr_bsr_fragment_tag, data_ptr);
  867:     MASK_TO_MASKLEN(curr_bsr_hash_mask, masklen);
  868:     PUT_BYTE(masklen, data_ptr);
  869:     PUT_BYTE(curr_bsr_priority, data_ptr);
  870:     PUT_EUADDR(curr_bsr_address, data_ptr);
  871: 
  872:     /* TODO: XXX: No fragmentation support (yet) */
  873:     for (mask_ptr = grp_mask_list; mask_ptr; mask_ptr = mask_ptr->next) {
  874: 	if (IN_PIM_SSM_RANGE(mask_ptr->group_addr)) {
  875: 	    continue;  /* Do not advertise internal virtual RP for SSM groups */
  876: 	}
  877: 	MASK_TO_MASKLEN(mask_ptr->group_mask, masklen);
  878: 	PUT_EGADDR(mask_ptr->group_addr, masklen, 0, data_ptr);
  879: 	PUT_BYTE(mask_ptr->group_rp_number, data_ptr);
  880: 	PUT_BYTE(mask_ptr->group_rp_number, data_ptr); /* TODO: if frag.*/
  881: 	PUT_HOSTSHORT(0, data_ptr);
  882: 
  883: 	for (entry_ptr = mask_ptr->grp_rp_next; entry_ptr; entry_ptr = entry_ptr->grp_rp_next) {
  884: 	    PUT_EUADDR(entry_ptr->rp->rpentry->address, data_ptr);
  885: 	    PUT_HOSTSHORT(entry_ptr->holdtime, data_ptr);
  886: 	    PUT_BYTE(entry_ptr->priority, data_ptr);
  887: 	    PUT_BYTE(0, data_ptr);  /* The reserved field */
  888: 	}
  889:     }
  890: 
  891:     datalen = (data_ptr - (uint8_t *)send_buff) - sizeof(struct ip) - sizeof(pim_header_t);
  892: 
  893:     return datalen;
  894: }
  895: 
  896: 
  897: /*
  898:  * Check if the addr is the RP for the group corresponding to mrt.
  899:  * Return TRUE or FALSE.
  900:  */
  901: int check_mrtentry_rp(mrtentry_t *mrt, uint32_t addr)
  902: {
  903:     rp_grp_entry_t *ptr;
  904: 
  905:     if (!mrt)
  906: 	return FALSE;
  907: 
  908:     if (addr == INADDR_ANY_N)
  909: 	return FALSE;
  910: 
  911:     ptr = mrt->group->active_rp_grp;
  912:     if (!ptr)
  913: 	return FALSE;
  914: 
  915:     if (mrt->group->rpaddr == addr)
  916: 	return TRUE;
  917: 
  918:     return FALSE;
  919: }
  920: 
  921: /**
  922:  * Local Variables:
  923:  *  version-control: t
  924:  *  indent-tabs-mode: t
  925:  *  c-file-style: "ellemtel"
  926:  *  c-basic-offset: 4
  927:  * End:
  928:  */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>