Annotation of libaitsess/src/mem.c, revision 1.1.2.8
1.1.2.2 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.1.2.8 ! misho 6: * $Id: mem.c,v 1.1.2.7 2012/02/28 10:34:43 misho Exp $
1.1.2.2 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
1.1.2.1 misho 46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
1.1.2.6 misho 52: * @maxmem = If !=0 set maximum memory quota
1.1.2.1 misho 53: * return: =NULL error or !=NULL new allocated pool
54: */
55: mpool_t *
1.1.2.6 misho 56: mpool_init(u_long maxmem)
1.1.2.1 misho 57: {
58: mpool_t *mp;
1.1.2.3 misho 59: register int i;
1.1.2.1 misho 60:
61: mp = malloc(sizeof(mpool_t));
62: if (!mp) {
63: LOGERR;
64: return NULL;
65: } else
66: memset(mp, 0, sizeof(mpool_t));
67:
68: pthread_mutex_init(&mp->pool_mtx, NULL);
1.1.2.3 misho 69:
1.1.2.6 misho 70: mp->pool_quota.max = maxmem;
71:
1.1.2.3 misho 72: mpool_lock(mp);
73: for (i = 0; i < MEM_BUCKETS; i++) {
74: TAILQ_INIT(&mp->pool_active[i]);
75: TAILQ_INIT(&mp->pool_inactive[i]);
76: }
77: mpool_unlock(mp);
78:
1.1.2.1 misho 79: return mp;
80: }
81:
82: /*
83: * mpool_destroy() - Destroy memory pool
84: *
85: * @mp = Memory pool
86: * return: none
87: */
88: void
89: mpool_destroy(mpool_t ** __restrict mp)
90: {
91: struct tagAlloc *m;
92: register int i;
93:
94: if (!mp && !*mp)
95: return;
96:
97: mpool_lock(*mp);
98:
99: for (i = 0; i < MEM_BUCKETS; i++) {
100: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
101: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
102: if (m->alloc_mem)
103: free(m->alloc_mem);
104: free(m);
105: }
106: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
107: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
108: if (m->alloc_mem)
109: free(m->alloc_mem);
110: free(m);
111: }
112: }
113:
114: mpool_unlock(*mp);
115: pthread_mutex_destroy(&(*mp)->pool_mtx);
116:
117: free(*mp);
118: *mp = NULL;
119: }
120:
121: /* ----------------------------------------------------------- */
122:
123: static inline long
124: BucketIndex(u_int size)
125: {
126: register long b;
127:
128: if (!size--)
129: return 0; /* min bucket position in array */
130:
131: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
132: if (!(size >> b))
133: break;
134:
135: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
136: }
137:
138: static inline struct tagAlloc *
139: pullInactive(mpool_t * __restrict mp, int idx)
140: {
141: struct tagAlloc *m = NULL;
142:
143: /* must be locked pool before use this function */
144: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
145: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
1.1.2.3 misho 146: /* statistics */
147: mp->pool_calls.cache--;
148: mp->pool_bytes.cache -= mem_size(m);
1.1.2.1 misho 149:
150: /* clear name */
151: *m->alloc_name = 0;
152: /* clear flags */
153: m->alloc_flags ^= m->alloc_flags;
154: }
155:
156: return m;
157: }
158:
159: /*
160: * mpool_malloc() - Memory allocation
161: *
162: * @mp = Memory pool
163: * @size = Size
164: * @memname = Optional memory block name
165: * return: NULL error or !=NULL ok allocated memory
166: */
167: void *
168: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
169: {
170: struct tagAlloc *m;
1.1.2.8 ! misho 171: int idx;
! 172: u_int align;
1.1.2.1 misho 173:
174: if (!mp) {
175: sess_SetErr(EINVAL, "Pool not specified");
176: return NULL;
177: }
178: if (size > MEM_ALLOC_MAX) {
179: sess_SetErr(ENOMEM, "Memory size is too large");
180: return NULL;
181: } else
182: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
183:
184: idx = BucketIndex(size);
185:
186: mpool_lock(mp);
187:
188: /* get memory from cache if exists */
189: if (!(m = pullInactive(mp, idx))) {
190: /* quota */
191: if (mp->pool_quota.max &&
192: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
193: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
194: mpool_unlock(mp);
195: return NULL;
196: }
197:
198: m = malloc(sizeof(struct tagAlloc));
199: if (!m) {
200: LOGERR;
201: mpool_unlock(mp);
202: return NULL;
203: } else
204: memset(m, 0, sizeof(struct tagAlloc));
205: }
206:
207: if (memname)
208: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
209:
210: if (!m->alloc_mem) {
211: align = 1 << (idx + MEM_MIN_BUCKET);
212: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
213: if (!m->alloc_mem) {
214: LOGERR;
215: free(m);
216: mpool_unlock(mp);
217: return NULL;
218: } else /* quota */
219: mp->pool_quota.curr += size;
220: }
221:
222: m->alloc_mem[0] = size / sizeof(u_int);
223: m->alloc_mem[1] = MEM_MAGIC_START;
224: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
225: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
226: /* statistics */
227: mp->pool_calls.alloc++;
228: mp->pool_bytes.alloc += size;
229:
230: mpool_unlock(mp);
231: return mem_data(m, void*);
232: }
233:
234: /*
1.1.2.7 misho 235: * mpool_realloc() Reallocate memory block with new size
236: *
237: * @mp = Memory pool
238: * @data = Allocated memory data
239: * @newsize = New size of memory block
240: * @memname = Optional new memory block name
241: * return: NULL error or !=NULL new reallocated memory block
242: */
243: void *
244: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
245: {
246: struct tagAlloc *m, *tmp;
1.1.2.8 ! misho 247: int idx, oidx;
1.1.2.7 misho 248: void *p;
1.1.2.8 ! misho 249: u_int align, osize;
1.1.2.7 misho 250:
251: /* if !data execute mpool_malloc() */
252: if (!data)
253: return mpool_malloc(mp, newsize, memname);
254:
255: if (!mp) {
256: sess_SetErr(EINVAL, "Pool not specified");
257: return NULL;
258: }
259: /* check address range & sentinel */
260: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
261: sess_SetErr(EFAULT, "Corrupted memory address");
262: return NULL;
263: } else {
264: osize = ((u_int*)data)[-2] * sizeof(u_int);
265: oidx = BucketIndex(osize);
266: }
267: /* prepare new size */
268: if (newsize > MEM_ALLOC_MAX) {
269: sess_SetErr(ENOMEM, "Memory size is too large");
270: return NULL;
271: } else {
272: newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */
273: idx = BucketIndex(newsize);
274: }
275:
276: mpool_lock(mp);
277:
278: /* quota */
279: if (mp->pool_quota.max &&
280: (mp->pool_quota.curr + (newsize - osize)) > mp->pool_quota.max) {
281: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
282: mpool_unlock(mp);
283: return NULL;
284: }
285:
286: /* find old memory block */
1.1.2.8 ! misho 287: TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) {
1.1.2.7 misho 288: if (mem_data(m, void*) == data && mem_size(m) == osize) {
1.1.2.8 ! misho 289: /* case in different buckets */
! 290: if (oidx != idx) {
! 291: TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
! 292: /* statistics */
! 293: mp->pool_calls.alloc--;
! 294: }
1.1.2.7 misho 295: mp->pool_bytes.alloc -= osize;
1.1.2.8 ! misho 296: break;
1.1.2.7 misho 297: }
1.1.2.8 ! misho 298: }
1.1.2.7 misho 299: /* memory block not found! */
300: if (!m) {
301: mpool_unlock(mp);
302: sess_SetErr(EFAULT, "Memory block not found");
303: return NULL;
304: }
305:
306: /* try to reallocate memory block to new bucket */
1.1.2.8 ! misho 307: if (oidx != idx) {
! 308: align = 1 << (idx + MEM_MIN_BUCKET);
! 309: p = realloc(m->alloc_mem, align + 12);
! 310: if (!p) {
! 311: LOGERR;
1.1.2.7 misho 312:
1.1.2.8 ! misho 313: /* restore to old bucket pulled memory block for reallocation */
! 314: TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
! 315: /* statistics */
! 316: mp->pool_calls.alloc++;
! 317: mp->pool_bytes.alloc += osize;
1.1.2.7 misho 318:
1.1.2.8 ! misho 319: mpool_unlock(mp);
! 320: return NULL;
! 321: }
! 322: }
! 323: /* quota */
! 324: mp->pool_quota.curr += (newsize - osize);
1.1.2.7 misho 325:
326: m->alloc_mem[0] = newsize / sizeof(u_int);
327: m->alloc_mem[1] = MEM_MAGIC_START;
328: m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
1.1.2.8 ! misho 329:
! 330: if (oidx != idx) {
! 331: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
! 332: /* statistics */
! 333: mp->pool_calls.alloc++;
! 334: }
1.1.2.7 misho 335: mp->pool_bytes.alloc += newsize;
336:
337: if (memname)
338: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
339:
340: mpool_unlock(mp);
341: return mem_data(m, void*);
342: }
343:
344: /*
1.1.2.3 misho 345: * mpool_purge() - Purge memory block cache and release resources
346: *
347: * @mp = Memory pool
348: * @atmost = Free at most in buckets
349: * return: -1 error or 0 ok
350: */
351: int
1.1.2.4 misho 352: mpool_purge(mpool_t * __restrict mp, u_int atmost)
1.1.2.3 misho 353: {
1.1.2.4 misho 354: register int i, cx;
1.1.2.3 misho 355: struct tagAlloc *m, *tmp;
356:
357: if (!mp) {
358: sess_SetErr(EINVAL, "Pool not specified");
359: return -1;
360: }
361:
362: mpool_lock(mp);
363:
1.1.2.4 misho 364: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
1.1.2.3 misho 365: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
1.1.2.4 misho 366: /* barrier for purge */
367: if (cx < atmost) {
368: cx++;
1.1.2.3 misho 369: continue;
1.1.2.4 misho 370: }
1.1.2.3 misho 371:
372: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
373: /* statistics */
374: mp->pool_calls.cache--;
375: mp->pool_bytes.cache -= mem_size(m);
376:
377: mp->pool_calls.free++;
378: mp->pool_bytes.free += mem_size(m);
379: /* quota */
380: mp->pool_quota.curr -= mem_size(m);
381:
382: if (m->alloc_mem)
383: free(m->alloc_mem);
384: free(m);
385: }
386: }
387:
388: mpool_unlock(mp);
389: return 0;
390: }
391:
392: /*
1.1.2.1 misho 393: * mpool_free() Free allocated memory with mpool_alloc()
394: *
395: * @mp = Memory pool
396: * @data = Allocated memory data
397: * @purge = if !=0 force release memory block
398: * return: <0 error or 0 ok released memory block
399: */
400: int
401: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
402: {
403: int idx;
1.1.2.7 misho 404: struct tagAlloc *m, *tmp;
1.1.2.1 misho 405:
406: if (!mp) {
407: sess_SetErr(EINVAL, "Pool not specified");
408: return -1;
409: }
410: /* check address range & sentinel */
411: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
412: sess_SetErr(EFAULT, "Corrupted memory address");
413: return -2;
414: } else
1.1.2.5 misho 415: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
1.1.2.1 misho 416:
417: mpool_lock(mp);
1.1.2.7 misho 418: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 419: if (mem_data(m, void*) == data) {
420: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 421: /* statistics */
422: mp->pool_calls.alloc--;
423: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 424:
425: if (!purge) {
426: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
427: /* statistics */
428: mp->pool_calls.cache++;
429: mp->pool_bytes.cache += mem_size(m);
430: } else {
431: /* statistics */
432: mp->pool_calls.free++;
433: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 434: /* quota */
435: mp->pool_quota.curr -= mem_size(m);
436:
437: if (m->alloc_mem)
438: free(m->alloc_mem);
439: free(m);
1.1.2.1 misho 440: }
441: break;
442: }
443: mpool_unlock(mp);
444:
445: return 0;
446: }
447:
448: /*
449: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
450: *
451: * @mp = Memory pool
452: * @size = Allocated memory data size
453: * @memname = Memory name
454: * @purge = if !=0 force release memory block
455: * return: <0 error or 0 ok released memory block
456: */
457: int
458: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
459: {
460: int idx;
1.1.2.7 misho 461: struct tagAlloc *m, *tmp;
1.1.2.1 misho 462:
463: if (!mp || !memname) {
464: sess_SetErr(EINVAL, "Pool or memory name is not specified");
465: return -1;
466: } else
467: idx = BucketIndex(size);
468:
469: mpool_lock(mp);
1.1.2.7 misho 470: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 471: if (!strcmp(m->alloc_name, memname)) {
472: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 473: /* statistics */
474: mp->pool_calls.alloc--;
475: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 476:
477: if (!purge) {
478: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
479: /* statistics */
480: mp->pool_calls.cache++;
481: mp->pool_bytes.cache += mem_size(m);
482: } else {
483: /* statistics */
484: mp->pool_calls.free++;
485: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 486: /* quota */
487: mp->pool_quota.curr -= mem_size(m);
488:
489: if (m->alloc_mem)
490: free(m->alloc_mem);
491: free(m);
1.1.2.1 misho 492: }
493: break;
494: }
495: mpool_unlock(mp);
496:
497: return 0;
498: }
499:
500: /*
501: * mpool_getmembynam() Find allocated memory block by size and memory name
502: *
503: * @mp = Memory pool
504: * @size = Memory size
505: * @memname = Memory name
506: * return: NULL error or not found and !=NULL allocated memory
507: */
508: inline struct tagAlloc *
509: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
510: {
511: int idx;
512: struct tagAlloc *m = NULL;
513:
514: if (!mp || !memname)
515: return NULL;
516:
517: idx = BucketIndex(size);
518: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
519: if (!strcmp(m->alloc_name, memname))
520: break;
521:
522: return mem_data(m, void*);
523: }
1.1.2.3 misho 524:
525: /*
526: * mpool_getsizebyaddr() - Get size of allocated memory block by address
527: *
528: * @addr = allocated memory from mpool_malloc()
529: * return: usable size of allocated memory block
530: */
531: inline u_int
532: mpool_getsizebyaddr(void * __restrict data)
533: {
534: if (mpool_chkaddr(data))
535: return 0;
536:
537: return (((u_int*) data)[-2] * sizeof(u_int));
538: }
539:
540: /*
541: * mpool_chkaddr() - Check validity of given address
542: *
543: * @data = allocated memory from mpool_malloc()
544: * return: -1 bad address, 1 corrupted address or 0 ok
545: */
546: inline int
547: mpool_chkaddr(void * __restrict data)
548: {
549: /* check address range */
550: if (MEM_BADADDR(data))
551: return -1;
552: /* check sentinel */
553: if (MEM_CORRUPT(data))
554: return 1;
555: /* data address is ok! */
556: return 0;
557: }
1.1.2.6 misho 558:
559: /*
560: * mpool_setquota() - Change maximum memory quota
561: *
562: * @mp = Memory pool
563: * @maxmem = New max quota size
564: * return: old maximum memory quota size
565: */
566: inline u_long
567: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
568: {
569: u_long ret;
570:
571: if (!mp)
572: return 0;
573:
574: ret = mp->pool_quota.max;
575: mp->pool_quota.max = maxmem;
576:
577: /* if new max quota is less then current allocated memory,
578: * try to purge memory cache blocks
579: */
580: if (mp->pool_quota.max < mp->pool_quota.curr)
581: mpool_purge(mp, 0);
582:
583: return ret;
584: }
585:
1.1.2.7 misho 586: /*
587: * mpool_getquota() - Get memory quota
588: *
589: * @mp = Memory pool
590: * @currmem = Return current memory
591: * @maxmem = Return max quota size
592: * return: none
593: */
594: inline void
595: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
596: {
597: if (!mp)
598: return;
599:
600: if (maxmem)
601: *maxmem = mp->pool_quota.max;
602: if (currmem)
603: *currmem = mp->pool_quota.curr;
604: }
605:
1.1.2.6 misho 606: /* ----------------------------------------------------------- */
607:
608: /*
609: * mpool_statistics() - Dump statistics from memory pool buckets
610: *
611: * @mp = Memory pool
612: * @cb = Export statistics to callback
613: * return: none
614: */
615: void
616: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
617: {
618: struct tagAlloc *m;
619: register int i, act, inact;
620:
621: if (!mp || !cb)
622: return;
623:
624: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
625: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
626: act++;
627: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
628: inact++;
629:
630: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
631: }
632: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>