File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / ntp / ntpd / ntp_restrict.c
Revision 1.1.1.1 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Tue May 29 12:08:38 2012 UTC (12 years, 1 month ago) by misho
Branches: ntp, MAIN
CVS tags: v4_2_6p5p0, v4_2_6p5, HEAD
ntp 4.2.6p5

    1: /*
    2:  * ntp_restrict.c - determine host restrictions
    3:  */
    4: #ifdef HAVE_CONFIG_H
    5: #include <config.h>
    6: #endif
    7: 
    8: #include <stdio.h>
    9: #include <sys/types.h>
   10: 
   11: #include "ntpd.h"
   12: #include "ntp_if.h"
   13: #include "ntp_lists.h"
   14: #include "ntp_stdlib.h"
   15: #include "ntp_assert.h"
   16: 
   17: /*
   18:  * This code keeps a simple address-and-mask list of hosts we want
   19:  * to place restrictions on (or remove them from). The restrictions
   20:  * are implemented as a set of flags which tell you what the host
   21:  * can't do. There is a subroutine entry to return the flags. The
   22:  * list is kept sorted to reduce the average number of comparisons
   23:  * and make sure you get the set of restrictions most specific to
   24:  * the address.
   25:  *
   26:  * The algorithm is that, when looking up a host, it is first assumed
   27:  * that the default set of restrictions will apply. It then searches
   28:  * down through the list. Whenever it finds a match it adopts the
   29:  * match's flags instead. When you hit the point where the sorted
   30:  * address is greater than the target, you return with the last set of
   31:  * flags you found. Because of the ordering of the list, the most
   32:  * specific match will provide the final set of flags.
   33:  *
   34:  * This was originally intended to restrict you from sync'ing to your
   35:  * own broadcasts when you are doing that, by restricting yourself from
   36:  * your own interfaces. It was also thought it would sometimes be useful
   37:  * to keep a misbehaving host or two from abusing your primary clock. It
   38:  * has been expanded, however, to suit the needs of those with more
   39:  * restrictive access policies.
   40:  */
   41: /*
   42:  * We will use two lists, one for IPv4 addresses and one for IPv6
   43:  * addresses. This is not protocol-independant but for now I can't
   44:  * find a way to respect this. We'll check this later... JFB 07/2001
   45:  */
   46: #define MASK_IPV6_ADDR(dst, src, msk)					\
   47: 	do {								\
   48: 		int idx;						\
   49: 		for (idx = 0; idx < COUNTOF((dst)->s6_addr); idx++) {	\
   50: 			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
   51: 					      & (msk)->s6_addr[idx];	\
   52: 		}							\
   53: 	} while (0)
   54: 
   55: /*
   56:  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
   57:  * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
   58:  * for allocator overhead).
   59:  */
   60: #define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
   61: #define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
   62: 
   63: /*
   64:  * The restriction list
   65:  */
   66: restrict_u *restrictlist4;
   67: restrict_u *restrictlist6;
   68: static int restrictcount;	/* count in the restrict lists */
   69: 
   70: /*
   71:  * The free list and associated counters.  Also some uninteresting
   72:  * stat counters.
   73:  */
   74: static restrict_u *resfree4;	/* available entries (free list) */
   75: static restrict_u *resfree6;
   76: 
   77: static u_long res_calls;
   78: static u_long res_found;
   79: static u_long res_not_found;
   80: 
   81: /*
   82:  * Count number of restriction entries referring to RES_LIMITED, to
   83:  * control implicit activation/deactivation of the MRU monlist.
   84:  */
   85: static	u_long res_limited_refcnt;
   86: 
   87: /*
   88:  * Our default entries.
   89:  */
   90: static	restrict_u	restrict_def4;
   91: static	restrict_u	restrict_def6;
   92: 
   93: /*
   94:  * "restrict source ..." enabled knob and restriction bits.
   95:  */
   96: static	int		restrict_source_enabled;
   97: static	u_short		restrict_source_flags;
   98: static	u_short		restrict_source_mflags;
   99: 
  100: /*
  101:  * private functions
  102:  */
  103: static restrict_u *	alloc_res4(void);
  104: static restrict_u *	alloc_res6(void);
  105: static void		free_res(restrict_u *, int);
  106: static void		inc_res_limited(void);
  107: static void		dec_res_limited(void);
  108: static restrict_u *	match_restrict4_addr(u_int32, u_short);
  109: static restrict_u *	match_restrict6_addr(const struct in6_addr *,
  110: 					     u_short);
  111: static restrict_u *	match_restrict_entry(const restrict_u *, int);
  112: static int		res_sorts_before4(restrict_u *, restrict_u *);
  113: static int		res_sorts_before6(restrict_u *, restrict_u *);
  114: 
  115: 
  116: /*
  117:  * init_restrict - initialize the restriction data structures
  118:  */
  119: void
  120: init_restrict(void)
  121: {
  122: 	/*
  123: 	 * The restriction lists begin with a default entry with address
  124: 	 * and mask 0, which will match any entry.  The lists are kept
  125: 	 * sorted by descending address followed by descending mask:
  126: 	 *
  127: 	 *   address	  mask
  128: 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
  129: 	 * 192.168.0.0	255.255.0.0	kod limited
  130: 	 * 0.0.0.0	0.0.0.0		kod limited noquery
  131: 	 *
  132: 	 * The first entry which matches an address is used.  With the
  133: 	 * example restrictions above, 192.168.0.0/24 matches the first
  134: 	 * entry, the rest of 192.168.0.0/16 matches the second, and
  135: 	 * everything else matches the third (default).
  136: 	 *
  137: 	 * Note this achieves the same result a little more efficiently
  138: 	 * than the documented behavior, which is to keep the lists
  139: 	 * sorted by ascending address followed by ascending mask, with
  140: 	 * the _last_ matching entry used.
  141: 	 *
  142: 	 * An additional wrinkle is we may have multiple entries with
  143: 	 * the same address and mask but differing match flags (mflags).
  144: 	 * At present there is only one, RESM_NTPONLY.  Entries with
  145: 	 * RESM_NTPONLY are sorted earlier so they take precedence over
  146: 	 * any otherwise similar entry without.  Again, this is the same
  147: 	 * behavior as but reversed implementation compared to the docs.
  148: 	 * 
  149: 	 */
  150: 	LINK_SLIST(restrictlist4, &restrict_def4, link);
  151: 	LINK_SLIST(restrictlist6, &restrict_def6, link);
  152: 	restrictcount = 2;
  153: }
  154: 
  155: 
  156: static restrict_u *
  157: alloc_res4(void)
  158: {
  159: 	const size_t	cb = V4_SIZEOF_RESTRICT_U;
  160: 	const size_t	count = INC_RESLIST4;
  161: 	restrict_u *	rl;
  162: 	restrict_u *	res;
  163: 	int		i;
  164: 
  165: 	UNLINK_HEAD_SLIST(res, resfree4, link);
  166: 	if (res != NULL)
  167: 		return res;
  168: 
  169: 	rl = emalloc(count * cb);
  170: 	memset(rl, 0, count * cb);
  171: 	/* link all but the first onto free list */
  172: 	res = (void *)((char *)rl + (count - 1) * cb);
  173: 	for (i = count - 1; i > 0; i--) {
  174: 		LINK_SLIST(resfree4, res, link);
  175: 		res = (void *)((char *)res - cb);
  176: 	}
  177: 	NTP_INSIST(rl == res);
  178: 	/* allocate the first */
  179: 	return res;
  180: }
  181: 
  182: 
  183: static restrict_u *
  184: alloc_res6(void)
  185: {
  186: 	const size_t	cb = V6_SIZEOF_RESTRICT_U;
  187: 	const size_t	count = INC_RESLIST6;
  188: 	restrict_u *	rl;
  189: 	restrict_u *	res;
  190: 	int		i;
  191: 
  192: 	UNLINK_HEAD_SLIST(res, resfree6, link);
  193: 	if (res != NULL)
  194: 		return res;
  195: 
  196: 	rl = emalloc(count * cb);
  197: 	memset(rl, 0, count * cb);
  198: 	/* link all but the first onto free list */
  199: 	res = (void *)((char *)rl + (count - 1) * cb);
  200: 	for (i = count - 1; i > 0; i--) {
  201: 		LINK_SLIST(resfree6, res, link);
  202: 		res = (void *)((char *)res - cb);
  203: 	}
  204: 	NTP_INSIST(rl == res);
  205: 	/* allocate the first */
  206: 	return res;
  207: }
  208: 
  209: 
  210: static void
  211: free_res(
  212: 	restrict_u *	res,
  213: 	int		v6
  214: 	)
  215: {
  216: 	restrict_u **	plisthead;
  217: 	restrict_u *	unlinked;
  218: 
  219: 	restrictcount--;
  220: 	if (RES_LIMITED && res->flags)
  221: 		dec_res_limited();
  222: 
  223: 	if (v6)
  224: 		plisthead = &restrictlist6;
  225: 	else
  226: 		plisthead = &restrictlist4;
  227: 	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
  228: 	NTP_INSIST(unlinked == res);
  229: 
  230: 	if (v6) {
  231: 		memset(res, 0, V6_SIZEOF_RESTRICT_U);
  232: 		plisthead = &resfree6;
  233: 	} else {
  234: 		memset(res, 0, V4_SIZEOF_RESTRICT_U);
  235: 		plisthead = &resfree4;
  236: 	}
  237: 	LINK_SLIST(*plisthead, res, link);
  238: }
  239: 
  240: 
  241: static void
  242: inc_res_limited(void)
  243: {
  244: 	if (!res_limited_refcnt)
  245: 		mon_start(MON_RES);
  246: 	res_limited_refcnt++;
  247: }
  248: 
  249: 
  250: static void
  251: dec_res_limited(void)
  252: {
  253: 	res_limited_refcnt--;
  254: 	if (!res_limited_refcnt)
  255: 		mon_stop(MON_RES);
  256: }
  257: 
  258: 
  259: static restrict_u *
  260: match_restrict4_addr(
  261: 	u_int32	addr,
  262: 	u_short	port
  263: 	)
  264: {
  265: 	restrict_u *	res;
  266: 	restrict_u *	next;
  267: 
  268: 	for (res = restrictlist4; res != NULL; res = next) {
  269: 		next = res->link;
  270: 		if (res->u.v4.addr == (addr & res->u.v4.mask)
  271: 		    && (!(RESM_NTPONLY & res->mflags)
  272: 			|| NTP_PORT == port))
  273: 			break;
  274: 	}
  275: 	return res;
  276: }
  277: 
  278: 
  279: static restrict_u *
  280: match_restrict6_addr(
  281: 	const struct in6_addr *	addr,
  282: 	u_short			port
  283: 	)
  284: {
  285: 	restrict_u *	res;
  286: 	restrict_u *	next;
  287: 	struct in6_addr	masked;
  288: 
  289: 	for (res = restrictlist6; res != NULL; res = next) {
  290: 		next = res->link;
  291: 		NTP_INSIST(next != res);
  292: 		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
  293: 		if (ADDR6_EQ(&masked, &res->u.v6.addr)
  294: 		    && (!(RESM_NTPONLY & res->mflags)
  295: 			|| NTP_PORT == port))
  296: 			break;
  297: 	}
  298: 	return res;
  299: }
  300: 
  301: 
  302: /*
  303:  * match_restrict_entry - find an exact match on a restrict list.
  304:  *
  305:  * Exact match is addr, mask, and mflags all equal.
  306:  * In order to use more common code for IPv4 and IPv6, this routine
  307:  * requires the caller to populate a restrict_u with mflags and either
  308:  * the v4 or v6 address and mask as appropriate.  Other fields in the
  309:  * input restrict_u are ignored.
  310:  */
  311: static restrict_u *
  312: match_restrict_entry(
  313: 	const restrict_u *	pmatch,
  314: 	int			v6
  315: 	)
  316: {
  317: 	restrict_u *res;
  318: 	restrict_u *rlist;
  319: 	size_t cb;
  320: 
  321: 	if (v6) {
  322: 		rlist = restrictlist6;
  323: 		cb = sizeof(pmatch->u.v6);
  324: 	} else {
  325: 		rlist = restrictlist4;
  326: 		cb = sizeof(pmatch->u.v4);
  327: 	}
  328: 
  329: 	for (res = rlist; res != NULL; res = res->link)
  330: 		if (res->mflags == pmatch->mflags &&
  331: 		    !memcmp(&res->u, &pmatch->u, cb))
  332: 			break;
  333: 	return res;
  334: }
  335: 
  336: 
  337: /*
  338:  * res_sorts_before4 - compare two restrict4 entries
  339:  *
  340:  * Returns nonzero if r1 sorts before r2.  We sort by descending
  341:  * address, then descending mask, then descending mflags, so sorting
  342:  * before means having a higher value.
  343:  */
  344: static int
  345: res_sorts_before4(
  346: 	restrict_u *r1,
  347: 	restrict_u *r2
  348: 	)
  349: {
  350: 	int r1_before_r2;
  351: 
  352: 	if (r1->u.v4.addr > r2->u.v4.addr)
  353: 		r1_before_r2 = 1;
  354: 	else if (r1->u.v4.addr < r2->u.v4.addr)
  355: 		r1_before_r2 = 0;
  356: 	else if (r1->u.v4.mask > r2->u.v4.mask)
  357: 		r1_before_r2 = 1;
  358: 	else if (r1->u.v4.mask < r2->u.v4.mask)
  359: 		r1_before_r2 = 0;
  360: 	else if (r1->mflags > r2->mflags)
  361: 		r1_before_r2 = 1;
  362: 	else
  363: 		r1_before_r2 = 0;
  364: 
  365: 	return r1_before_r2;
  366: }
  367: 
  368: 
  369: /*
  370:  * res_sorts_before6 - compare two restrict6 entries
  371:  *
  372:  * Returns nonzero if r1 sorts before r2.  We sort by descending
  373:  * address, then descending mask, then descending mflags, so sorting
  374:  * before means having a higher value.
  375:  */
  376: static int
  377: res_sorts_before6(
  378: 	restrict_u *r1,
  379: 	restrict_u *r2
  380: 	)
  381: {
  382: 	int r1_before_r2;
  383: 	int cmp;
  384: 
  385: 	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
  386: 	if (cmp > 0)		/* r1->addr > r2->addr */
  387: 		r1_before_r2 = 1;
  388: 	else if (cmp < 0)	/* r2->addr > r1->addr */
  389: 		r1_before_r2 = 0;
  390: 	else {
  391: 		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
  392: 		if (cmp > 0)		/* r1->mask > r2->mask*/
  393: 			r1_before_r2 = 1;
  394: 		else if (cmp < 0)	/* r2->mask > r1->mask */
  395: 			r1_before_r2 = 0;
  396: 		else if (r1->mflags > r2->mflags)
  397: 			r1_before_r2 = 1;
  398: 		else
  399: 			r1_before_r2 = 0;
  400: 	}
  401: 
  402: 	return r1_before_r2;
  403: }
  404: 
  405: 
  406: /*
  407:  * restrictions - return restrictions for this host
  408:  */
  409: u_short
  410: restrictions(
  411: 	sockaddr_u *srcadr
  412: 	)
  413: {
  414: 	restrict_u *match;
  415: 	struct in6_addr *pin6;
  416: 	u_short flags;
  417: 
  418: 	res_calls++;
  419: 	flags = 0;
  420: 	/* IPv4 source address */
  421: 	if (IS_IPV4(srcadr)) {
  422: 		/*
  423: 		 * Ignore any packets with a multicast source address
  424: 		 * (this should be done early in the receive process,
  425: 		 * not later!)
  426: 		 */
  427: 		if (IN_CLASSD(SRCADR(srcadr)))
  428: 			return (int)RES_IGNORE;
  429: 
  430: 		match = match_restrict4_addr(SRCADR(srcadr),
  431: 					     SRCPORT(srcadr));
  432: 		match->count++;
  433: 		/*
  434: 		 * res_not_found counts only use of the final default
  435: 		 * entry, not any "restrict default ntpport ...", which
  436: 		 * would be just before the final default.
  437: 		 */
  438: 		if (&restrict_def4 == match)
  439: 			res_not_found++;
  440: 		else
  441: 			res_found++;
  442: 		flags = match->flags;
  443: 	}
  444: 
  445: 	/* IPv6 source address */
  446: 	if (IS_IPV6(srcadr)) {
  447: 		pin6 = PSOCK_ADDR6(srcadr);
  448: 
  449: 		/*
  450: 		 * Ignore any packets with a multicast source address
  451: 		 * (this should be done early in the receive process,
  452: 		 * not later!)
  453: 		 */
  454: 		if (IN6_IS_ADDR_MULTICAST(pin6))
  455: 			return (int)RES_IGNORE;
  456: 
  457: 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
  458: 		match->count++;
  459: 		if (&restrict_def6 == match)
  460: 			res_not_found++;
  461: 		else
  462: 			res_found++;
  463: 		flags = match->flags;
  464: 	}
  465: 	return (flags);
  466: }
  467: 
  468: 
  469: /*
  470:  * hack_restrict - add/subtract/manipulate entries on the restrict list
  471:  */
  472: void
  473: hack_restrict(
  474: 	int		op,
  475: 	sockaddr_u *	resaddr,
  476: 	sockaddr_u *	resmask,
  477: 	u_short		mflags,
  478: 	u_short		flags
  479: 	)
  480: {
  481: 	int		v6;
  482: 	restrict_u	match;
  483: 	restrict_u *	res;
  484: 	restrict_u **	plisthead;
  485: 
  486: 	DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
  487: 		    op, stoa(resaddr), stoa(resmask), mflags, flags));
  488: 
  489: 	if (NULL == resaddr) {
  490: 		NTP_REQUIRE(NULL == resmask);
  491: 		NTP_REQUIRE(RESTRICT_FLAGS == op);
  492: 		restrict_source_flags = flags;
  493: 		restrict_source_mflags = mflags;
  494: 		restrict_source_enabled = 1;
  495: 		return;
  496: 	}
  497: 
  498: 	memset(&match, 0, sizeof(match));
  499: 	/* silence VC9 potentially uninit warnings */
  500: 	res = NULL;
  501: 	v6 = 0;
  502: 
  503: 	if (IS_IPV4(resaddr)) {
  504: 		v6 = 0;
  505: 		/*
  506: 		 * Get address and mask in host byte order for easy
  507: 		 * comparison as u_int32
  508: 		 */
  509: 		match.u.v4.addr = SRCADR(resaddr);
  510: 		match.u.v4.mask = SRCADR(resmask);
  511: 		match.u.v4.addr &= match.u.v4.mask;
  512: 
  513: 	} else if (IS_IPV6(resaddr)) {
  514: 		v6 = 1;
  515: 		/*
  516: 		 * Get address and mask in network byte order for easy
  517: 		 * comparison as byte sequences (e.g. memcmp())
  518: 		 */
  519: 		match.u.v6.mask = SOCK_ADDR6(resmask);
  520: 		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
  521: 			       &match.u.v6.mask);
  522: 
  523: 	} else	/* not IPv4 nor IPv6 */
  524: 		NTP_REQUIRE(0);
  525: 
  526: 	match.flags = flags;
  527: 	match.mflags = mflags;
  528: 	res = match_restrict_entry(&match, v6);
  529: 
  530: 	switch (op) {
  531: 
  532: 	case RESTRICT_FLAGS:
  533: 		/*
  534: 		 * Here we add bits to the flags. If this is a
  535: 		 * new restriction add it.
  536: 		 */
  537: 		if (NULL == res) {
  538: 			if (v6) {
  539: 				res = alloc_res6();
  540: 				memcpy(res, &match,
  541: 				       V6_SIZEOF_RESTRICT_U);
  542: 				plisthead = &restrictlist6;
  543: 			} else {
  544: 				res = alloc_res4();
  545: 				memcpy(res, &match,
  546: 				       V4_SIZEOF_RESTRICT_U);
  547: 				plisthead = &restrictlist4;
  548: 			}
  549: 			LINK_SORT_SLIST(
  550: 				*plisthead, res,
  551: 				(v6)
  552: 				  ? res_sorts_before6(res, L_S_S_CUR())
  553: 				  : res_sorts_before4(res, L_S_S_CUR()),
  554: 				link, restrict_u);
  555: 			restrictcount++;
  556: 			if (RES_LIMITED & flags)
  557: 				inc_res_limited();
  558: 		} else {
  559: 			if ((RES_LIMITED & flags) &&
  560: 			    !(RES_LIMITED & res->flags))
  561: 				inc_res_limited();
  562: 			res->flags |= flags;
  563: 		}
  564: 		break;
  565: 
  566: 	case RESTRICT_UNFLAG:
  567: 		/*
  568: 		 * Remove some bits from the flags. If we didn't
  569: 		 * find this one, just return.
  570: 		 */
  571: 		if (res != NULL) {
  572: 			if ((RES_LIMITED & res->flags)
  573: 			    && (RES_LIMITED & flags))
  574: 				dec_res_limited();
  575: 			res->flags &= ~flags;
  576: 		}
  577: 		break;
  578: 
  579: 	case RESTRICT_REMOVE:
  580: 	case RESTRICT_REMOVEIF:
  581: 		/*
  582: 		 * Remove an entry from the table entirely if we
  583: 		 * found one. Don't remove the default entry and
  584: 		 * don't remove an interface entry.
  585: 		 */
  586: 		if (res != NULL
  587: 		    && (RESTRICT_REMOVEIF == op
  588: 			|| !(RESM_INTERFACE & res->mflags))
  589: 		    && res != &restrict_def4
  590: 		    && res != &restrict_def6)
  591: 			free_res(res, v6);
  592: 		break;
  593: 
  594: 	default:	/* unknown op */
  595: 		NTP_INSIST(0);
  596: 		break;
  597: 	}
  598: 
  599: }
  600: 

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>