File:  [ELWIX - Embedded LightWeight unIX -] / libaitio / src / Attic / mem.c
Revision 1.1.4.6: download - view: text, annotated - select for diffs - revision graph
Mon May 28 07:59:18 2012 UTC (12 years, 2 months ago) by misho
Branches: io3_1
Diff to: branchpoint 1.1: preferred, unified
OpenBSD patch

    1: /*************************************************************************
    2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: mem.c,v 1.1.4.6 2012/05/28 07:59:18 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: 
   48: 
   49: /*
   50:  * mpool_init() - Init memory pool
   51:  *
   52:  * @maxmem = If !=0 set maximum memory quota
   53:  * return: =NULL error or !=NULL new allocated pool
   54:  */
   55: mpool_t *
   56: mpool_init(u_long maxmem)
   57: {
   58: 	mpool_t *mp;
   59: 	register int i;
   60: 
   61: 	mp = malloc(sizeof(mpool_t));
   62: 	if (!mp) {
   63: 		LOGERR;
   64: 		return NULL;
   65: 	} else
   66: 		memset(mp, 0, sizeof(mpool_t));
   67: 
   68: 	pthread_mutex_init(&mp->pool_mtx, NULL);
   69: 
   70: 	mp->pool_quota.max = maxmem;
   71: 
   72: 	mpool_lock(mp);
   73: 	for (i = 0; i < MEM_BUCKETS; i++) {
   74: 		TAILQ_INIT(&mp->pool_active[i]);
   75: 		TAILQ_INIT(&mp->pool_inactive[i]);
   76: 	}
   77: 	mpool_unlock(mp);
   78: 
   79: 	return mp;
   80: }
   81: 
   82: /*
   83:  * mpool_destroy() - Destroy memory pool
   84:  *
   85:  * @mp = Memory pool
   86:  * return: none
   87:  */
   88: void
   89: mpool_destroy(mpool_t ** __restrict mp)
   90: {
   91: 	struct tagAlloc *m;
   92: 	register int i;
   93: 
   94: 	if (!mp && !*mp)
   95: 		return;
   96: 
   97: 	mpool_lock(*mp);
   98: 
   99: 	for (i = 0; i < MEM_BUCKETS; i++) {
  100: 		while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
  101: 			TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
  102: 			if (m->alloc_mem)
  103: 				free(m->alloc_mem);
  104: 			free(m);
  105: 		}
  106: 		while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
  107: 			TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
  108: 			if (m->alloc_mem)
  109: 				free(m->alloc_mem);
  110: 			free(m);
  111: 		}
  112: 	}
  113: 
  114: 	mpool_unlock(*mp);
  115: 	pthread_mutex_destroy(&(*mp)->pool_mtx);
  116: 
  117: 	free(*mp);
  118: 	*mp = NULL;
  119: }
  120: 
  121: /* ----------------------------------------------------------- */
  122: 
  123: static inline long
  124: BucketIndex(u_int size)
  125: {
  126: 	register long b;
  127: 
  128: 	if (!size--)
  129: 		return 0;		/* min bucket position in array */
  130: 
  131: 	for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
  132: 		if (!(size >> b))
  133: 			break;
  134: 
  135: 	return b - MEM_MIN_BUCKET;	/* convert to bucket array index */
  136: }
  137: 
  138: static inline struct tagAlloc *
  139: pullInactive(mpool_t * __restrict mp, int idx)
  140: {
  141: 	struct tagAlloc *m = NULL;
  142: 
  143: 	/* must be locked pool before use this function */ 
  144: 	if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
  145: 		TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
  146: 		/* statistics */
  147: 		mp->pool_calls.cache--;
  148: 		mp->pool_bytes.cache -= mem_size(m);
  149: 
  150: 		/* clear name */
  151: 		*m->alloc_name = 0;
  152: 	}
  153: 
  154: 	return m;
  155: }
  156: 
  157: /*
  158:  * mpool_malloc() - Memory allocation
  159:  *
  160:  * @mp = Memory pool
  161:  * @size = Size
  162:  * @memname = Optional memory block name
  163:  * return: NULL error or !=NULL ok allocated memory
  164:  */
  165: void *
  166: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
  167: {
  168: 	struct tagAlloc *m;
  169: 	int idx;
  170: 	u_int align;
  171: 
  172: 	if (!mp) {
  173: 		io_SetErr(EINVAL, "Pool not specified");
  174: 		return NULL;
  175: 	}
  176: 	if (size > MEM_ALLOC_MAX) {
  177: 		io_SetErr(ENOMEM, "Memory size is too large");
  178: 		return NULL;
  179: 	} else
  180: 		size = (size + 3) & ~3;	/* must align to 4 because needed room for sentinels */
  181: 
  182: 	idx = BucketIndex(size);
  183: 
  184: 	mpool_lock(mp);
  185: 
  186: 	/* get memory from cache if exists */
  187: 	if (!(m = pullInactive(mp, idx))) {
  188: 		/* quota */
  189: 		if (mp->pool_quota.max && 
  190: 				(mp->pool_quota.curr + size) > mp->pool_quota.max) {
  191: 			io_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
  192: 			mpool_unlock(mp);
  193: 			return NULL;
  194: 		}
  195: 
  196: 		m = malloc(sizeof(struct tagAlloc));
  197: 		if (!m) {
  198: 			LOGERR;
  199: 			mpool_unlock(mp);
  200: 			return NULL;
  201: 		} else
  202: 			memset(m, 0, sizeof(struct tagAlloc));
  203: 	}
  204: 
  205: 	if (memname)
  206: 		strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
  207: 
  208: 	if (!m->alloc_mem) {
  209: 		align = 1 << (idx + MEM_MIN_BUCKET);
  210: 		m->alloc_mem = malloc(align + 12);	/* +12 sentinel bytes */
  211: 		if (!m->alloc_mem) {
  212: 			LOGERR;
  213: 			free(m);
  214: 			mpool_unlock(mp);
  215: 			return NULL;
  216: 		} else	/* quota */
  217: 			mp->pool_quota.curr += size;
  218: 	}
  219: 
  220: 	m->alloc_mem[0] = size / sizeof(u_int);
  221: 	m->alloc_mem[1] = MEM_MAGIC_START;
  222: 	m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
  223: 	TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
  224: 	/* statistics */
  225: 	mp->pool_calls.alloc++;
  226: 	mp->pool_bytes.alloc += size;
  227: 
  228: 	mpool_unlock(mp);
  229: 	return mem_data(m, void*);
  230: }
  231: 
  232: /*
  233:  * mpool_realloc() Reallocate memory block with new size
  234:  *
  235:  * @mp = Memory pool
  236:  * @data = Allocated memory data
  237:  * @newsize = New size of memory block
  238:  * @memname = Optional new memory block name
  239:  * return: NULL error or !=NULL new reallocated memory block
  240:  */
  241: void *
  242: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
  243: {
  244: 	struct tagAlloc *m, *tmp;
  245: 	int idx, oidx;
  246: 	void *p;
  247: 	u_int align, osize;
  248: 
  249: 	/* if !data execute mpool_malloc() */
  250: 	if (!data)
  251: 		return mpool_malloc(mp, newsize, memname);
  252: 
  253: 	if (!mp) {
  254: 		io_SetErr(EINVAL, "Pool not specified");
  255: 		return NULL;
  256: 	}
  257: 	/* check address range & sentinel */
  258: 	if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
  259: 		io_SetErr(EFAULT, "Corrupted memory address");
  260: 		return NULL;
  261: 	} else {
  262: 		osize = ((u_int*)data)[-2] * sizeof(u_int);
  263: 		oidx = BucketIndex(osize);
  264: 	}
  265: 	/* prepare new size */
  266: 	if (newsize > MEM_ALLOC_MAX) {
  267: 		io_SetErr(ENOMEM, "Memory size is too large");
  268: 		return NULL;
  269: 	} else {
  270: 		newsize = (newsize + 3) & ~3;	/* must align to 4 because needed room for sentinels */
  271: 		idx = BucketIndex(newsize);
  272: 	}
  273: 
  274: 	mpool_lock(mp);
  275: 
  276: 	/* quota */
  277: 	if (mp->pool_quota.max && 
  278: 			(mp->pool_quota.curr + ((u_long) newsize - osize)) > mp->pool_quota.max) {
  279: 		io_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
  280: 		mpool_unlock(mp);
  281: 		return NULL;
  282: 	}
  283: 
  284: 	/* find old memory block */
  285: 	TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) {
  286: 		if (mem_data(m, void*) == data && mem_size(m) == osize) {
  287: 			/* case in different buckets */
  288: 			if (oidx != idx) {
  289: 				TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
  290: 				/* statistics */
  291: 				mp->pool_calls.alloc--;
  292: 			}
  293: 			mp->pool_bytes.alloc -= osize;
  294: 			break;
  295: 		}
  296: 	}
  297: 	/* memory block not found! */
  298: 	if (!m) {
  299: 		mpool_unlock(mp);
  300: 		io_SetErr(EFAULT, "Memory block not found");
  301: 		return NULL;
  302: 	}
  303: 
  304: 	/* try to reallocate memory block to new bucket */
  305: 	if (oidx != idx) {
  306: 		align = 1 << (idx + MEM_MIN_BUCKET);
  307: 		p = realloc(m->alloc_mem, align + 12);
  308: 		if (!p) {
  309: 			LOGERR;
  310: 
  311: 			/* restore to old bucket pulled memory block for reallocation */
  312: 			TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
  313: 			/* statistics */
  314: 			mp->pool_calls.alloc++;
  315: 			mp->pool_bytes.alloc += osize;
  316: 
  317: 			mpool_unlock(mp);
  318: 			return NULL;
  319: 		} else
  320: 			m->alloc_mem = (u_int*) p;
  321: 	}
  322: 	/* quota */
  323: 	mp->pool_quota.curr += (u_long) newsize - osize;
  324: 
  325: 	m->alloc_mem[0] = newsize / sizeof(u_int);
  326: 	m->alloc_mem[1] = MEM_MAGIC_START;
  327: 	m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
  328: 
  329: 	if (oidx != idx) {
  330: 		TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
  331: 		/* statistics */
  332: 		mp->pool_calls.alloc++;
  333: 	}
  334: 	mp->pool_bytes.alloc += newsize;
  335: 
  336: 	if (memname)
  337: 		strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
  338: 
  339: 	mpool_unlock(mp);
  340: 	return mem_data(m, void*);
  341: }
  342: 
  343: /*
  344:  * mpool_purge() - Purge memory block cache and release resources
  345:  *
  346:  * @mp = Memory pool
  347:  * @atmost = Free at most in buckets
  348:  * return: -1 error or 0 ok
  349:  */
  350: int
  351: mpool_purge(mpool_t * __restrict mp, u_int atmost)
  352: {
  353: 	register int i, cx;
  354: 	struct tagAlloc *m, *tmp;
  355: 
  356: 	if (!mp) {
  357: 		io_SetErr(EINVAL, "Pool not specified");
  358: 		return -1;
  359: 	}
  360: 
  361: 	mpool_lock(mp);
  362: 
  363: 	for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
  364: 		TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
  365: 			/* barrier for purge */
  366: 			if (cx < atmost) {
  367: 				cx++;
  368: 				continue;
  369: 			}
  370: 
  371: 			TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
  372: 			/* statistics */
  373: 			mp->pool_calls.cache--;
  374: 			mp->pool_bytes.cache -= mem_size(m);
  375: 
  376: 			mp->pool_calls.free++;
  377: 			mp->pool_bytes.free += mem_size(m);
  378: 			/* quota */
  379: 			mp->pool_quota.curr -= mem_size(m);
  380: 
  381: 			if (m->alloc_mem)
  382: 				free(m->alloc_mem);
  383: 			free(m);
  384: 		}
  385: 	}
  386: 
  387: 	mpool_unlock(mp);
  388: 	return 0;
  389: }
  390: 
  391: /*
  392:  * mpool_free() Free allocated memory with mpool_alloc()
  393:  *
  394:  * @mp = Memory pool
  395:  * @data = Allocated memory data
  396:  * @purge = if !=0 force release memory block
  397:  * return: <0 error or 0 ok released memory block
  398:  */
  399: int
  400: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
  401: {
  402: 	int idx;
  403: 	struct tagAlloc *m, *tmp;
  404: 
  405: 	assert(data);
  406: 	if (!mp) {
  407: 		io_SetErr(EINVAL, "Pool not specified");
  408: 		return -1;
  409: 	}
  410: 	/* check address range & sentinel */
  411: 	assert(!MEM_BADADDR(data) && !MEM_CORRUPT(data));
  412: 	if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
  413: 		io_SetErr(EFAULT, "Corrupted memory address");
  414: 		return -2;
  415: 	} else
  416: 		idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
  417: 
  418: 	mpool_lock(mp);
  419: 	TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
  420: 		if (mem_data(m, void*) == data) {
  421: 			TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
  422: 			/* statistics */
  423: 			mp->pool_calls.alloc--;
  424: 			mp->pool_bytes.alloc -= mem_size(m);
  425: 
  426: 			if (!purge) {
  427: 				TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
  428: 				/* statistics */
  429: 				mp->pool_calls.cache++;
  430: 				mp->pool_bytes.cache += mem_size(m);
  431: 			} else {
  432: 				/* statistics */
  433: 				mp->pool_calls.free++;
  434: 				mp->pool_bytes.free += mem_size(m);
  435: 				/* quota */
  436: 				mp->pool_quota.curr -= mem_size(m);
  437: 
  438: 				if (m->alloc_mem)
  439: 					free(m->alloc_mem);
  440: 				free(m);
  441: 			}
  442: 			break;
  443: 		}
  444: 	mpool_unlock(mp);
  445: 
  446: 	return 0;
  447: }
  448: 
  449: /*
  450:  * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
  451:  *
  452:  * @mp = Memory pool
  453:  * @size = Allocated memory data size
  454:  * @memname = Memory name
  455:  * @purge = if !=0 force release memory block
  456:  * return: <0 error or 0 ok released memory block
  457:  */
  458: int
  459: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
  460: {
  461: 	int idx;
  462: 	struct tagAlloc *m, *tmp;
  463: 
  464: 	if (!mp || !memname) {
  465: 		io_SetErr(EINVAL, "Pool or memory name is not specified");
  466: 		return -1;
  467: 	} else
  468: 		idx = BucketIndex(size);
  469: 
  470: 	mpool_lock(mp);
  471: 	TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
  472: 		if (!strcmp(m->alloc_name, memname)) {
  473: 			TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
  474: 			/* statistics */
  475: 			mp->pool_calls.alloc--;
  476: 			mp->pool_bytes.alloc -= mem_size(m);
  477: 
  478: 			if (!purge) {
  479: 				TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
  480: 				/* statistics */
  481: 				mp->pool_calls.cache++;
  482: 				mp->pool_bytes.cache += mem_size(m);
  483: 			} else {
  484: 				/* statistics */
  485: 				mp->pool_calls.free++;
  486: 				mp->pool_bytes.free += mem_size(m);
  487: 				/* quota */
  488: 				mp->pool_quota.curr -= mem_size(m);
  489: 
  490: 				if (m->alloc_mem)
  491: 					free(m->alloc_mem);
  492: 				free(m);
  493: 			}
  494: 			break;
  495: 		}
  496: 	mpool_unlock(mp);
  497: 
  498: 	return 0;
  499: }
  500: 
  501: /*
  502:  * mpool_strdup() - String duplicate
  503:  *
  504:  * @mp = Memory pool
  505:  * @str = String
  506:  * @memname = Memory name
  507:  * return: NULL error or !=NULL new string
  508:  */
  509: char *
  510: mpool_strdup(mpool_t * __restrict mp, const char *str, const char *memname)
  511: {
  512: 	char *s = NULL;
  513: 	u_int len;
  514: 
  515: 	if (!mp) {
  516: 		io_SetErr(EINVAL, "Pool not specified");
  517: 		return NULL;
  518: 	}
  519: 	if (!str) {
  520: 		io_SetErr(EINVAL, "String is NULL");
  521: 		return NULL;
  522: 	} else
  523: 		len = strlen(str) + 1;
  524: 
  525: 	s = mpool_malloc(mp, len, memname);
  526: 	if (!s)
  527: 		return NULL;
  528: 	else
  529: 		memcpy(s, str, len);
  530: 
  531: 	return s;
  532: }
  533: 
  534: /*
  535:  * mpool_getmembynam() Find allocated memory block by size and memory name
  536:  *
  537:  * @mp = Memory pool
  538:  * @size = Memory size
  539:  * @memname = Memory name
  540:  * return: NULL error or not found and !=NULL allocated memory 
  541:  */
  542: inline struct tagAlloc *
  543: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
  544: {
  545: 	int idx;
  546: 	struct tagAlloc *m = NULL;
  547: 
  548: 	if (!mp || !memname)
  549: 		return NULL;
  550: 
  551: 	idx = BucketIndex(size);
  552: 	TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
  553: 		if (!strcmp(m->alloc_name, memname))
  554: 			break;
  555: 
  556: 	return mem_data(m, void*);
  557: }
  558: 
  559: /*
  560:  * mpool_getsizebyaddr() - Get size of allocated memory block by address
  561:  *
  562:  * @addr = allocated memory from mpool_malloc()
  563:  * return: usable size of allocated memory block
  564:  */
  565: inline u_int
  566: mpool_getsizebyaddr(void * __restrict data)
  567: {
  568: 	if (mpool_chkaddr(data))
  569: 		return 0;
  570: 
  571: 	return (((u_int*) data)[-2] * sizeof(u_int));
  572: }
  573: 
  574: /*
  575:  * mpool_chkaddr() - Check validity of given address
  576:  *
  577:  * @data = allocated memory from mpool_malloc()
  578:  * return: -1 bad address, 1 corrupted address or 0 ok
  579:  */
  580: inline int
  581: mpool_chkaddr(void * __restrict data)
  582: {
  583: 	/* check address range */
  584: 	if (MEM_BADADDR(data))
  585: 		return -1;
  586: 	/* check sentinel */
  587: 	if (MEM_CORRUPT(data))
  588: 		return 1;
  589: 	/* data address is ok! */
  590: 	return 0;
  591: }
  592: 
  593: /*
  594:  * mpool_setquota() - Change maximum memory quota
  595:  *
  596:  * @mp = Memory pool
  597:  * @maxmem = New max quota size
  598:  * return: old maximum memory quota size
  599:  */
  600: inline u_long
  601: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
  602: {
  603: 	u_long ret;
  604: 
  605: 	if (!mp)
  606: 		return 0;
  607: 
  608: 	ret = mp->pool_quota.max;
  609: 	mp->pool_quota.max = maxmem;
  610: 
  611: 	/* if new max quota is less then current allocated memory, 
  612: 	 * 	try to purge memory cache blocks
  613: 	 */
  614: 	if (mp->pool_quota.max < mp->pool_quota.curr)
  615: 		mpool_purge(mp, 0);
  616: 
  617: 	return ret;
  618: }
  619: 
  620: /*
  621:  * mpool_getquota() - Get memory quota
  622:  *
  623:  * @mp = Memory pool
  624:  * @currmem = Return current memory
  625:  * @maxmem = Return max quota size
  626:  * return: none
  627:  */
  628: inline void
  629: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
  630: {
  631: 	if (!mp)
  632: 		return;
  633: 
  634: 	if (maxmem)
  635: 		*maxmem = mp->pool_quota.max;
  636: 	if (currmem)
  637: 		*currmem = mp->pool_quota.curr;
  638: }
  639: 
  640: /* ----------------------------------------------------------- */
  641: 
  642: /*
  643:  * mpool_statistics() - Dump statistics from memory pool buckets
  644:  *
  645:  * @mp = Memory pool
  646:  * @cb = Export statistics to callback
  647:  * return: none
  648:  */
  649: void
  650: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
  651: {
  652: 	struct tagAlloc *m;
  653: 	register int i, act, inact;
  654: 
  655: 	if (!mp || !cb)
  656: 		return;
  657: 
  658: 	for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
  659: 		TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
  660: 			act++;
  661: 		TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
  662: 			inact++;
  663: 
  664: 		cb(1 << (i + MEM_MIN_BUCKET), act, inact);
  665: 	}
  666: }
  667: 
  668: /* ----------------------------------------------------------- */
  669: 
  670: /*
  671:  * mpool_xmalloc() - malloc wrapper
  672:  *
  673:  * @size = Size
  674:  * return: NULL error or !=NULL ok allocated memory
  675:  */
  676: void *
  677: mpool_xmalloc(size_t size)
  678: {
  679: 	return mpool_malloc(io_mpool, size, NULL);
  680: }
  681: 
  682: /*
  683:  * mpool_xcalloc() - calloc wrapper
  684:  *
  685:  * @num = number of elements
  686:  * @size = Size of element
  687:  * return: NULL error or !=NULL ok allocated memory
  688:  */
  689: void *
  690: mpool_xcalloc(size_t num, size_t size)
  691: {
  692: 	return mpool_malloc(io_mpool, num * size, NULL);
  693: }
  694: 
  695: /*
  696:  * mpool_xrealloc() - realloc wrapper
  697:  *
  698:  * @data = Allocated memory data
  699:  * @newsize = New size of memory block
  700:  * return: NULL error or !=NULL new reallocated memory block
  701:  */
  702: void *
  703: mpool_xrealloc(void * __restrict data, size_t newsize)
  704: {
  705: 	return mpool_realloc(io_mpool, data, newsize, NULL);
  706: }
  707: 
  708: /*
  709:  * mpool_xfree() - free wrapper
  710:  *
  711:  * @data = Allocated memory data
  712:  * return: none
  713:  */
  714: void
  715: mpool_xfree(void * __restrict data)
  716: {
  717: 	mpool_free(io_mpool, data, 0);
  718: }
  719: 
  720: /*
  721:  * mpool_xstrdup() - strdup wrapper
  722:  *
  723:  * @str = string
  724:  * return: =NULL error or !=NULL new allocated string
  725:  */
  726: char *
  727: mpool_xstrdup(const char *str)
  728: {
  729: 	return mpool_strdup(io_mpool, str, NULL);
  730: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>