File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / ntp / ntpd / ntp_monitor.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Tue May 29 12:08:37 2012 UTC (12 years, 1 month ago) by misho
Branches: ntp, MAIN
CVS tags: v4_2_6p5p0, v4_2_6p5, HEAD
ntp 4.2.6p5

    1: /*
    2:  * ntp_monitor - monitor ntpd statistics
    3:  */
    4: #ifdef HAVE_CONFIG_H
    5: # include <config.h>
    6: #endif
    7: 
    8: #include "ntpd.h"
    9: #include "ntp_io.h"
   10: #include "ntp_if.h"
   11: #include "ntp_stdlib.h"
   12: #include <ntp_random.h>
   13: 
   14: #include <stdio.h>
   15: #include <signal.h>
   16: #ifdef HAVE_SYS_IOCTL_H
   17: # include <sys/ioctl.h>
   18: #endif
   19: 
   20: /*
   21:  * Record statistics based on source address, mode and version. The
   22:  * receive procedure calls us with the incoming rbufp before it does
   23:  * anything else. While at it, implement rate controls for inbound
   24:  * traffic.
   25:  *
   26:  * Each entry is doubly linked into two lists, a hash table and a most-
   27:  * recently-used (MRU) list. When a packet arrives it is looked up in
   28:  * the hash table. If found, the statistics are updated and the entry
   29:  * relinked at the head of the MRU list. If not found, a new entry is
   30:  * allocated, initialized and linked into both the hash table and at the
   31:  * head of the MRU list.
   32:  *
   33:  * Memory is usually allocated by grabbing a big chunk of new memory and
   34:  * cutting it up into littler pieces. The exception to this when we hit
   35:  * the memory limit. Then we free memory by grabbing entries off the
   36:  * tail for the MRU list, unlinking from the hash table, and
   37:  * reinitializing.
   38:  */
   39: /*
   40:  * Limits on the number of structures allocated.  This limit is picked
   41:  * with the illicit knowlege that we can only return somewhat less than
   42:  * 8K bytes in a mode 7 response packet, and that each structure will
   43:  * require about 20 bytes of space in the response.
   44:  *
   45:  * ... I don't believe the above is true anymore ... jdg
   46:  */
   47: #ifndef MAXMONMEM
   48: #define	MAXMONMEM	600	/* we allocate up to 600 structures */
   49: #endif
   50: #ifndef MONMEMINC
   51: #define	MONMEMINC	40	/* allocate them 40 at a time */
   52: #endif
   53: 
   54: /*
   55:  * Hashing stuff
   56:  */
   57: #define	MON_HASH_SIZE	NTP_HASH_SIZE
   58: #define	MON_HASH_MASK	NTP_HASH_MASK
   59: #define	MON_HASH(addr)	NTP_HASH_ADDR(addr)
   60: 
   61: /*
   62:  * Pointers to the hash table, the MRU list and the count table.  Memory
   63:  * for the hash and count tables is only allocated if monitoring is
   64:  * turned on.
   65:  */
   66: static	struct mon_data *mon_hash[MON_HASH_SIZE];  /* list ptrs */
   67: struct	mon_data mon_mru_list;
   68: 
   69: /*
   70:  * List of free structures structures, and counters of free and total
   71:  * structures. The free structures are linked with the hash_next field.
   72:  */
   73: static  struct mon_data *mon_free;      /* free list or null if none */
   74: static	int mon_total_mem;		/* total structures allocated */
   75: static	int mon_mem_increments;		/* times called malloc() */
   76: 
   77: /*
   78:  * Parameters of the RES_LIMITED restriction option. We define headway
   79:  * as the idle time between packets. A packet is discarded if the
   80:  * headway is less than the minimum, as well as if the average headway
   81:  * is less than eight times the increment.
   82:  */
   83: int	ntp_minpkt = NTP_MINPKT;	/* minimum (log 2 s) */
   84: int	ntp_minpoll = NTP_MINPOLL;	/* increment (log 2 s) */
   85: 
   86: /*
   87:  * Initialization state.  We may be monitoring, we may not.  If
   88:  * we aren't, we may not even have allocated any memory yet.
   89:  */
   90: int	mon_enabled;			/* enable switch */
   91: int	mon_age = 3000;			/* preemption limit */
   92: static	int mon_have_memory;
   93: static	void	mon_getmoremem	(void);
   94: static	void	remove_from_hash (struct mon_data *);
   95: 
   96: /*
   97:  * init_mon - initialize monitoring global data
   98:  */
   99: void
  100: init_mon(void)
  101: {
  102: 	/*
  103: 	 * Don't do much of anything here.  We don't allocate memory
  104: 	 * until someone explicitly starts us.
  105: 	 */
  106: 	mon_enabled = MON_OFF;
  107: 	mon_have_memory = 0;
  108: 	mon_total_mem = 0;
  109: 	mon_mem_increments = 0;
  110: 	mon_free = NULL;
  111: 	memset(&mon_hash[0], 0, sizeof mon_hash);
  112: 	memset(&mon_mru_list, 0, sizeof mon_mru_list);
  113: }
  114: 
  115: 
  116: /*
  117:  * mon_start - start up the monitoring software
  118:  */
  119: void
  120: mon_start(
  121: 	int mode
  122: 	)
  123: {
  124: 
  125: 	if (mon_enabled != MON_OFF) {
  126: 		mon_enabled |= mode;
  127: 		return;
  128: 	}
  129: 	if (mode == MON_OFF)
  130: 	    return;
  131: 	
  132: 	if (!mon_have_memory) {
  133: 		mon_total_mem = 0;
  134: 		mon_mem_increments = 0;
  135: 		mon_free = NULL;
  136: 		mon_getmoremem();
  137: 		mon_have_memory = 1;
  138: 	}
  139: 
  140: 	mon_mru_list.mru_next = &mon_mru_list;
  141: 	mon_mru_list.mru_prev = &mon_mru_list;
  142: 	mon_enabled = mode;
  143: }
  144: 
  145: 
  146: /*
  147:  * mon_stop - stop the monitoring software
  148:  */
  149: void
  150: mon_stop(
  151: 	int mode
  152: 	)
  153: {
  154: 	register struct mon_data *md, *md_next;
  155: 	register int i;
  156: 
  157: 	if (mon_enabled == MON_OFF)
  158: 		return;
  159: 	if ((mon_enabled & mode) == 0 || mode == MON_OFF)
  160: 		return;
  161: 
  162: 	mon_enabled &= ~mode;
  163: 	if (mon_enabled != MON_OFF)
  164: 		return;
  165: 	
  166: 	/*
  167: 	 * Put everything back on the free list
  168: 	 */
  169: 	for (i = 0; i < MON_HASH_SIZE; i++) {
  170: 		md = mon_hash[i];               /* get next list */
  171: 		mon_hash[i] = NULL;             /* zero the list head */
  172: 		while (md != NULL) {
  173: 			md_next = md->hash_next;
  174: 			md->hash_next = mon_free;
  175: 			mon_free = md;
  176: 			md = md_next;
  177: 		}
  178: 	}
  179: 	mon_mru_list.mru_next = &mon_mru_list;
  180: 	mon_mru_list.mru_prev = &mon_mru_list;
  181: }
  182: 
  183: void
  184: ntp_monclearinterface(struct interface *interface)
  185: {
  186:         struct mon_data *md;
  187: 
  188: 	for (md = mon_mru_list.mru_next; md != &mon_mru_list;
  189: 	    md = md->mru_next) {
  190: 		if (md->interface == interface) {
  191: 		      /* dequeue from mru list and put to free list */
  192: 		      md->mru_prev->mru_next = md->mru_next;
  193: 		      md->mru_next->mru_prev = md->mru_prev;
  194: 		      remove_from_hash(md);
  195: 		      md->hash_next = mon_free;
  196: 		      mon_free = md;
  197: 		}
  198: 	}
  199: }
  200: 
  201: 
  202: /*
  203:  * ntp_monitor - record stats about this packet
  204:  *
  205:  * Returns flags
  206:  */
  207: int
  208: ntp_monitor(
  209: 	struct recvbuf *rbufp,
  210: 	int	flags
  211: 	)
  212: {
  213: 	register struct pkt *pkt;
  214: 	register struct mon_data *md;
  215: 	sockaddr_u addr;
  216: 	register u_int hash;
  217: 	register int mode;
  218: 	int	interval;
  219: 
  220: 	if (mon_enabled == MON_OFF)
  221: 		return (flags);
  222: 
  223: 	pkt = &rbufp->recv_pkt;
  224: 	memset(&addr, 0, sizeof(addr));
  225: 	memcpy(&addr, &(rbufp->recv_srcadr), sizeof(addr));
  226: 	hash = MON_HASH(&addr);
  227: 	mode = PKT_MODE(pkt->li_vn_mode);
  228: 	md = mon_hash[hash];
  229: 	while (md != NULL) {
  230: 		int	head;		/* headway increment */
  231: 		int	leak;		/* new headway */
  232: 		int	limit;		/* average threshold */
  233: 
  234: 		/*
  235: 		 * Match address only to conserve MRU size.
  236: 		 */
  237: 		if (SOCK_EQ(&md->rmtadr, &addr)) {
  238: 			interval = current_time - md->lasttime;
  239: 			md->lasttime = current_time;
  240: 			md->count++;
  241: 			md->flags = flags;
  242: 			md->rmtport = NSRCPORT(&rbufp->recv_srcadr);
  243: 			md->mode = (u_char) mode;
  244: 			md->version = PKT_VERSION(pkt->li_vn_mode);
  245: 
  246: 			/*
  247: 			 * Shuffle to the head of the MRU list.
  248: 			 */
  249: 			md->mru_next->mru_prev = md->mru_prev;
  250: 			md->mru_prev->mru_next = md->mru_next;
  251: 			md->mru_next = mon_mru_list.mru_next;
  252: 			md->mru_prev = &mon_mru_list;
  253: 			mon_mru_list.mru_next->mru_prev = md;
  254: 			mon_mru_list.mru_next = md;
  255: 
  256: 			/*
  257: 			 * At this point the most recent arrival is
  258: 			 * first in the MRU list. Decrease the counter
  259: 			 * by the headway, but not less than zero.
  260: 			 */
  261: 			md->leak -= interval;
  262: 			if (md->leak < 0)
  263: 				md->leak = 0;
  264: 			head = 1 << ntp_minpoll;
  265: 			leak = md->leak + head;
  266: 			limit = NTP_SHIFT * head;
  267: #ifdef DEBUG
  268: 			if (debug > 1)
  269: 				printf("restrict: interval %d headway %d limit %d\n",
  270: 				    interval, leak, limit);
  271: #endif
  272: 
  273: 			/*
  274: 			 * If the minimum and average thresholds are not
  275: 			 * exceeded, douse the RES_LIMITED and RES_KOD
  276: 			 * bits and increase the counter by the headway
  277: 			 * increment. Note that we give a 1-s grace for
  278: 			 * the minimum threshold and a 2-s grace for the
  279: 			 * headway increment. If one or both thresholds
  280: 			 * are exceeded and the old counter is less than
  281: 			 * the average threshold, set the counter to the
  282: 			 * average threshold plus the inrcrment and
  283: 			 * leave the RES_KOD bit lit. Othewise, leave
  284: 			 * the counter alone and douse the RES_KOD bit.
  285: 			 * This rate-limits the KoDs to no less than the
  286: 			 * average headway.
  287: 			 */
  288: 			if (interval + 1 >= (1 << ntp_minpkt) &&
  289: 			    leak < limit) {
  290: 				md->leak = leak - 2;
  291: 				md->flags &= ~(RES_LIMITED | RES_KOD);
  292: 			} else if (md->leak < limit) {
  293: 				md->leak = limit + head;
  294: 			} else {
  295: 				md->flags &= ~RES_KOD;
  296: 			}
  297: 			return (md->flags);
  298: 		}
  299: 		md = md->hash_next;
  300: 	}
  301: 
  302: 	/*
  303: 	 * If we got here, this is the first we've heard of this
  304: 	 * guy.  Get him some memory, either from the free list
  305: 	 * or from the tail of the MRU list.
  306: 	 */
  307: 	if (mon_free == NULL && mon_total_mem >= MAXMONMEM) {
  308: 
  309: 		/*
  310: 		 * Preempt from the MRU list if old enough.
  311: 		 */
  312: 		md = mon_mru_list.mru_prev;
  313: 		if (ntp_random() / (2. * FRAC) > (double)(current_time
  314: 		    - md->lasttime) / mon_age)
  315: 			return (flags & ~(RES_LIMITED | RES_KOD));
  316: 
  317: 		md->mru_prev->mru_next = &mon_mru_list;
  318: 		mon_mru_list.mru_prev = md->mru_prev;
  319: 		remove_from_hash(md);
  320: 	} else {
  321: 		if (mon_free == NULL)
  322: 			mon_getmoremem();
  323: 		md = mon_free;
  324: 		mon_free = md->hash_next;
  325: 	}
  326: 
  327: 	/*
  328: 	 * Got one, initialize it
  329: 	 */
  330: 	md->lasttime = md->firsttime = current_time;
  331: 	md->count = 1;
  332: 	md->flags = flags & ~(RES_LIMITED | RES_KOD);
  333: 	md->leak = 0;
  334: 	memset(&md->rmtadr, 0, sizeof(md->rmtadr));
  335: 	memcpy(&md->rmtadr, &addr, sizeof(addr));
  336: 	md->rmtport = NSRCPORT(&rbufp->recv_srcadr);
  337: 	md->mode = (u_char) mode;
  338: 	md->version = PKT_VERSION(pkt->li_vn_mode);
  339: 	md->interface = rbufp->dstadr;
  340: 	md->cast_flags = (u_char)(((rbufp->dstadr->flags &
  341: 	    INT_MCASTOPEN) && rbufp->fd == md->interface->fd) ?
  342: 	    MDF_MCAST: rbufp->fd == md->interface->bfd ? MDF_BCAST :
  343: 	    MDF_UCAST);
  344: 
  345: 	/*
  346: 	 * Drop him into front of the hash table. Also put him on top of
  347: 	 * the MRU list.
  348: 	 */
  349: 	md->hash_next = mon_hash[hash];
  350: 	mon_hash[hash] = md;
  351: 	md->mru_next = mon_mru_list.mru_next;
  352: 	md->mru_prev = &mon_mru_list;
  353: 	mon_mru_list.mru_next->mru_prev = md;
  354: 	mon_mru_list.mru_next = md;
  355: 	return (md->flags);
  356: }
  357: 
  358: 
  359: /*
  360:  * mon_getmoremem - get more memory and put it on the free list
  361:  */
  362: static void
  363: mon_getmoremem(void)
  364: {
  365: 	register struct mon_data *md;
  366: 	register int i;
  367: 	struct mon_data *freedata;      /* 'old' free list (null) */
  368: 
  369: 	md = (struct mon_data *)emalloc(MONMEMINC *
  370: 	    sizeof(struct mon_data));
  371: 	freedata = mon_free;
  372: 	mon_free = md;
  373: 	for (i = 0; i < (MONMEMINC-1); i++) {
  374: 		md->hash_next = (md + 1);
  375: 		md++;
  376: 	}
  377: 
  378: 	/*
  379: 	 * md now points at the last.  Link in the rest of the chain.
  380: 	 */
  381: 	md->hash_next = freedata;
  382: 	mon_total_mem += MONMEMINC;
  383: 	mon_mem_increments++;
  384: }
  385: 
  386: static void
  387: remove_from_hash(
  388: 	struct mon_data *md
  389: 	)
  390: {
  391: 	register u_int hash;
  392: 	register struct mon_data *md_prev;
  393: 
  394: 	hash = MON_HASH(&md->rmtadr);
  395: 	if (mon_hash[hash] == md) {
  396: 		mon_hash[hash] = md->hash_next;
  397: 	} else {
  398: 		md_prev = mon_hash[hash];
  399: 		while (md_prev->hash_next != md) {
  400: 			md_prev = md_prev->hash_next;
  401: 			if (md_prev == NULL) {
  402: 				/* logic error */
  403: 				return;
  404: 			}
  405: 		}
  406: 		md_prev->hash_next = md->hash_next;
  407: 	}
  408: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>