Annotation of libaitsess/src/mem.c, revision 1.1.2.7
1.1.2.2 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.1.2.7 ! misho 6: * $Id: mem.c,v 1.1.2.6 2012/02/28 09:28:00 misho Exp $
1.1.2.2 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
1.1.2.1 misho 46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
1.1.2.6 misho 52: * @maxmem = If !=0 set maximum memory quota
1.1.2.1 misho 53: * return: =NULL error or !=NULL new allocated pool
54: */
55: mpool_t *
1.1.2.6 misho 56: mpool_init(u_long maxmem)
1.1.2.1 misho 57: {
58: mpool_t *mp;
1.1.2.3 misho 59: register int i;
1.1.2.1 misho 60:
61: mp = malloc(sizeof(mpool_t));
62: if (!mp) {
63: LOGERR;
64: return NULL;
65: } else
66: memset(mp, 0, sizeof(mpool_t));
67:
68: pthread_mutex_init(&mp->pool_mtx, NULL);
1.1.2.3 misho 69:
1.1.2.6 misho 70: mp->pool_quota.max = maxmem;
71:
1.1.2.3 misho 72: mpool_lock(mp);
73: for (i = 0; i < MEM_BUCKETS; i++) {
74: TAILQ_INIT(&mp->pool_active[i]);
75: TAILQ_INIT(&mp->pool_inactive[i]);
76: }
77: mpool_unlock(mp);
78:
1.1.2.1 misho 79: return mp;
80: }
81:
82: /*
83: * mpool_destroy() - Destroy memory pool
84: *
85: * @mp = Memory pool
86: * return: none
87: */
88: void
89: mpool_destroy(mpool_t ** __restrict mp)
90: {
91: struct tagAlloc *m;
92: register int i;
93:
94: if (!mp && !*mp)
95: return;
96:
97: mpool_lock(*mp);
98:
99: for (i = 0; i < MEM_BUCKETS; i++) {
100: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
101: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
102: if (m->alloc_mem)
103: free(m->alloc_mem);
104: free(m);
105: }
106: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
107: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
108: if (m->alloc_mem)
109: free(m->alloc_mem);
110: free(m);
111: }
112: }
113:
114: mpool_unlock(*mp);
115: pthread_mutex_destroy(&(*mp)->pool_mtx);
116:
117: free(*mp);
118: *mp = NULL;
119: }
120:
121: /* ----------------------------------------------------------- */
122:
123: static inline long
124: BucketIndex(u_int size)
125: {
126: register long b;
127:
128: if (!size--)
129: return 0; /* min bucket position in array */
130:
131: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
132: if (!(size >> b))
133: break;
134:
135: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
136: }
137:
138: static inline struct tagAlloc *
139: pullInactive(mpool_t * __restrict mp, int idx)
140: {
141: struct tagAlloc *m = NULL;
142:
143: /* must be locked pool before use this function */
144: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
145: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
1.1.2.3 misho 146: /* statistics */
147: mp->pool_calls.cache--;
148: mp->pool_bytes.cache -= mem_size(m);
1.1.2.1 misho 149:
150: /* clear name */
151: *m->alloc_name = 0;
152: /* clear flags */
153: m->alloc_flags ^= m->alloc_flags;
154: }
155:
156: return m;
157: }
158:
159: /*
160: * mpool_malloc() - Memory allocation
161: *
162: * @mp = Memory pool
163: * @size = Size
164: * @memname = Optional memory block name
165: * return: NULL error or !=NULL ok allocated memory
166: */
167: void *
168: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
169: {
170: struct tagAlloc *m;
171: int idx, align;
172:
173: if (!mp) {
174: sess_SetErr(EINVAL, "Pool not specified");
175: return NULL;
176: }
177: if (size > MEM_ALLOC_MAX) {
178: sess_SetErr(ENOMEM, "Memory size is too large");
179: return NULL;
180: } else
181: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
182:
183: idx = BucketIndex(size);
184:
185: mpool_lock(mp);
186:
187: /* get memory from cache if exists */
188: if (!(m = pullInactive(mp, idx))) {
189: /* quota */
190: if (mp->pool_quota.max &&
191: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
192: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
193: mpool_unlock(mp);
194: return NULL;
195: }
196:
197: m = malloc(sizeof(struct tagAlloc));
198: if (!m) {
199: LOGERR;
200: mpool_unlock(mp);
201: return NULL;
202: } else
203: memset(m, 0, sizeof(struct tagAlloc));
204: }
205:
206: if (memname)
207: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
208:
209: if (!m->alloc_mem) {
210: align = 1 << (idx + MEM_MIN_BUCKET);
211: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
212: if (!m->alloc_mem) {
213: LOGERR;
214: free(m);
215: mpool_unlock(mp);
216: return NULL;
217: } else /* quota */
218: mp->pool_quota.curr += size;
219: }
220:
221: m->alloc_mem[0] = size / sizeof(u_int);
222: m->alloc_mem[1] = MEM_MAGIC_START;
223: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
224: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
225: /* statistics */
226: mp->pool_calls.alloc++;
227: mp->pool_bytes.alloc += size;
228:
229: mpool_unlock(mp);
230: return mem_data(m, void*);
231: }
232:
233: /*
1.1.2.7 ! misho 234: * mpool_realloc() Reallocate memory block with new size
! 235: *
! 236: * @mp = Memory pool
! 237: * @data = Allocated memory data
! 238: * @newsize = New size of memory block
! 239: * @memname = Optional new memory block name
! 240: * return: NULL error or !=NULL new reallocated memory block
! 241: */
! 242: void *
! 243: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
! 244: {
! 245: struct tagAlloc *m, *tmp;
! 246: int idx, align, oidx, osize;
! 247: void *p;
! 248:
! 249: /* if !data execute mpool_malloc() */
! 250: if (!data)
! 251: return mpool_malloc(mp, newsize, memname);
! 252:
! 253: if (!mp) {
! 254: sess_SetErr(EINVAL, "Pool not specified");
! 255: return NULL;
! 256: }
! 257: /* check address range & sentinel */
! 258: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
! 259: sess_SetErr(EFAULT, "Corrupted memory address");
! 260: return NULL;
! 261: } else {
! 262: osize = ((u_int*)data)[-2] * sizeof(u_int);
! 263: oidx = BucketIndex(osize);
! 264: }
! 265: /* prepare new size */
! 266: if (newsize > MEM_ALLOC_MAX) {
! 267: sess_SetErr(ENOMEM, "Memory size is too large");
! 268: return NULL;
! 269: } else {
! 270: newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */
! 271: idx = BucketIndex(newsize);
! 272: }
! 273:
! 274: mpool_lock(mp);
! 275:
! 276: /* quota */
! 277: if (mp->pool_quota.max &&
! 278: (mp->pool_quota.curr + (newsize - osize)) > mp->pool_quota.max) {
! 279: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
! 280: mpool_unlock(mp);
! 281: return NULL;
! 282: }
! 283:
! 284: /* find old memory block */
! 285: TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp)
! 286: if (mem_data(m, void*) == data && mem_size(m) == osize) {
! 287: TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
! 288: /* statistics */
! 289: mp->pool_calls.alloc--;
! 290: mp->pool_bytes.alloc -= osize;
! 291: }
! 292: /* memory block not found! */
! 293: if (!m) {
! 294: mpool_unlock(mp);
! 295: sess_SetErr(EFAULT, "Memory block not found");
! 296: return NULL;
! 297: }
! 298:
! 299: /* try to reallocate memory block to new bucket */
! 300: align = 1 << (idx + MEM_MIN_BUCKET);
! 301: p = realloc(m->alloc_mem, align + 12);
! 302: if (!p) {
! 303: LOGERR;
! 304:
! 305: /* restore to old bucket pulled memory block for reallocation */
! 306: TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
! 307: /* statistics */
! 308: mp->pool_calls.alloc++;
! 309: mp->pool_bytes.alloc += osize;
! 310:
! 311: mpool_unlock(mp);
! 312: return NULL;
! 313: } else /* quota */
! 314: mp->pool_quota.curr += (newsize - osize);
! 315:
! 316: m->alloc_mem[0] = newsize / sizeof(u_int);
! 317: m->alloc_mem[1] = MEM_MAGIC_START;
! 318: m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
! 319: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
! 320: /* statistics */
! 321: mp->pool_calls.alloc++;
! 322: mp->pool_bytes.alloc += newsize;
! 323:
! 324: if (memname)
! 325: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
! 326:
! 327: mpool_unlock(mp);
! 328: return mem_data(m, void*);
! 329: }
! 330:
! 331: /*
1.1.2.3 misho 332: * mpool_purge() - Purge memory block cache and release resources
333: *
334: * @mp = Memory pool
335: * @atmost = Free at most in buckets
336: * return: -1 error or 0 ok
337: */
338: int
1.1.2.4 misho 339: mpool_purge(mpool_t * __restrict mp, u_int atmost)
1.1.2.3 misho 340: {
1.1.2.4 misho 341: register int i, cx;
1.1.2.3 misho 342: struct tagAlloc *m, *tmp;
343:
344: if (!mp) {
345: sess_SetErr(EINVAL, "Pool not specified");
346: return -1;
347: }
348:
349: mpool_lock(mp);
350:
1.1.2.4 misho 351: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
1.1.2.3 misho 352: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
1.1.2.4 misho 353: /* barrier for purge */
354: if (cx < atmost) {
355: cx++;
1.1.2.3 misho 356: continue;
1.1.2.4 misho 357: }
1.1.2.3 misho 358:
359: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
360: /* statistics */
361: mp->pool_calls.cache--;
362: mp->pool_bytes.cache -= mem_size(m);
363:
364: mp->pool_calls.free++;
365: mp->pool_bytes.free += mem_size(m);
366: /* quota */
367: mp->pool_quota.curr -= mem_size(m);
368:
369: if (m->alloc_mem)
370: free(m->alloc_mem);
371: free(m);
372: }
373: }
374:
375: mpool_unlock(mp);
376: return 0;
377: }
378:
379: /*
1.1.2.1 misho 380: * mpool_free() Free allocated memory with mpool_alloc()
381: *
382: * @mp = Memory pool
383: * @data = Allocated memory data
384: * @purge = if !=0 force release memory block
385: * return: <0 error or 0 ok released memory block
386: */
387: int
388: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
389: {
390: int idx;
1.1.2.7 ! misho 391: struct tagAlloc *m, *tmp;
1.1.2.1 misho 392:
393: if (!mp) {
394: sess_SetErr(EINVAL, "Pool not specified");
395: return -1;
396: }
397: /* check address range & sentinel */
398: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
399: sess_SetErr(EFAULT, "Corrupted memory address");
400: return -2;
401: } else
1.1.2.5 misho 402: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
1.1.2.1 misho 403:
404: mpool_lock(mp);
1.1.2.7 ! misho 405: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 406: if (mem_data(m, void*) == data) {
407: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 408: /* statistics */
409: mp->pool_calls.alloc--;
410: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 411:
412: if (!purge) {
413: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
414: /* statistics */
415: mp->pool_calls.cache++;
416: mp->pool_bytes.cache += mem_size(m);
417: } else {
418: /* statistics */
419: mp->pool_calls.free++;
420: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 421: /* quota */
422: mp->pool_quota.curr -= mem_size(m);
423:
424: if (m->alloc_mem)
425: free(m->alloc_mem);
426: free(m);
1.1.2.1 misho 427: }
428: break;
429: }
430: mpool_unlock(mp);
431:
432: return 0;
433: }
434:
435: /*
436: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
437: *
438: * @mp = Memory pool
439: * @size = Allocated memory data size
440: * @memname = Memory name
441: * @purge = if !=0 force release memory block
442: * return: <0 error or 0 ok released memory block
443: */
444: int
445: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
446: {
447: int idx;
1.1.2.7 ! misho 448: struct tagAlloc *m, *tmp;
1.1.2.1 misho 449:
450: if (!mp || !memname) {
451: sess_SetErr(EINVAL, "Pool or memory name is not specified");
452: return -1;
453: } else
454: idx = BucketIndex(size);
455:
456: mpool_lock(mp);
1.1.2.7 ! misho 457: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 458: if (!strcmp(m->alloc_name, memname)) {
459: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 460: /* statistics */
461: mp->pool_calls.alloc--;
462: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 463:
464: if (!purge) {
465: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
466: /* statistics */
467: mp->pool_calls.cache++;
468: mp->pool_bytes.cache += mem_size(m);
469: } else {
470: /* statistics */
471: mp->pool_calls.free++;
472: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 473: /* quota */
474: mp->pool_quota.curr -= mem_size(m);
475:
476: if (m->alloc_mem)
477: free(m->alloc_mem);
478: free(m);
1.1.2.1 misho 479: }
480: break;
481: }
482: mpool_unlock(mp);
483:
484: return 0;
485: }
486:
487: /*
488: * mpool_getmembynam() Find allocated memory block by size and memory name
489: *
490: * @mp = Memory pool
491: * @size = Memory size
492: * @memname = Memory name
493: * return: NULL error or not found and !=NULL allocated memory
494: */
495: inline struct tagAlloc *
496: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
497: {
498: int idx;
499: struct tagAlloc *m = NULL;
500:
501: if (!mp || !memname)
502: return NULL;
503:
504: idx = BucketIndex(size);
505: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
506: if (!strcmp(m->alloc_name, memname))
507: break;
508:
509: return mem_data(m, void*);
510: }
1.1.2.3 misho 511:
512: /*
513: * mpool_getsizebyaddr() - Get size of allocated memory block by address
514: *
515: * @addr = allocated memory from mpool_malloc()
516: * return: usable size of allocated memory block
517: */
518: inline u_int
519: mpool_getsizebyaddr(void * __restrict data)
520: {
521: if (mpool_chkaddr(data))
522: return 0;
523:
524: return (((u_int*) data)[-2] * sizeof(u_int));
525: }
526:
527: /*
528: * mpool_chkaddr() - Check validity of given address
529: *
530: * @data = allocated memory from mpool_malloc()
531: * return: -1 bad address, 1 corrupted address or 0 ok
532: */
533: inline int
534: mpool_chkaddr(void * __restrict data)
535: {
536: /* check address range */
537: if (MEM_BADADDR(data))
538: return -1;
539: /* check sentinel */
540: if (MEM_CORRUPT(data))
541: return 1;
542: /* data address is ok! */
543: return 0;
544: }
1.1.2.6 misho 545:
546: /*
547: * mpool_setquota() - Change maximum memory quota
548: *
549: * @mp = Memory pool
550: * @maxmem = New max quota size
551: * return: old maximum memory quota size
552: */
553: inline u_long
554: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
555: {
556: u_long ret;
557:
558: if (!mp)
559: return 0;
560:
561: ret = mp->pool_quota.max;
562: mp->pool_quota.max = maxmem;
563:
564: /* if new max quota is less then current allocated memory,
565: * try to purge memory cache blocks
566: */
567: if (mp->pool_quota.max < mp->pool_quota.curr)
568: mpool_purge(mp, 0);
569:
570: return ret;
571: }
572:
1.1.2.7 ! misho 573: /*
! 574: * mpool_getquota() - Get memory quota
! 575: *
! 576: * @mp = Memory pool
! 577: * @currmem = Return current memory
! 578: * @maxmem = Return max quota size
! 579: * return: none
! 580: */
! 581: inline void
! 582: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
! 583: {
! 584: if (!mp)
! 585: return;
! 586:
! 587: if (maxmem)
! 588: *maxmem = mp->pool_quota.max;
! 589: if (currmem)
! 590: *currmem = mp->pool_quota.curr;
! 591: }
! 592:
1.1.2.6 misho 593: /* ----------------------------------------------------------- */
594:
595: /*
596: * mpool_statistics() - Dump statistics from memory pool buckets
597: *
598: * @mp = Memory pool
599: * @cb = Export statistics to callback
600: * return: none
601: */
602: void
603: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
604: {
605: struct tagAlloc *m;
606: register int i, act, inact;
607:
608: if (!mp || !cb)
609: return;
610:
611: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
612: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
613: act++;
614: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
615: inact++;
616:
617: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
618: }
619: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>