Annotation of libelwix/src/mem.c, revision 1.4.18.1
1.1 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.4.18.1! misho 6: * $Id: mem.c,v 1.4 2014/01/29 16:42:57 misho Exp $
1.1 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
1.4.18.1! misho 15: Copyright 2004 - 2015
1.1 misho 16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47:
48:
49: mpool_t *elwix_mpool;
50:
51:
52: /*
53: * mpool_init() - Init memory pool
54: *
55: * @maxmem = If !=0 set maximum memory quota
56: * return: =NULL error or !=NULL new allocated pool
57: */
58: mpool_t *
59: mpool_init(u_long maxmem)
60: {
61: mpool_t *mp;
62: register int i;
63:
64: mp = malloc(sizeof(mpool_t));
65: if (!mp) {
66: LOGERR;
67: return NULL;
68: } else
69: memset(mp, 0, sizeof(mpool_t));
70:
71: pthread_mutex_init(&mp->pool_mtx, NULL);
72:
73: mp->pool_quota.max = maxmem;
74:
75: mpool_lock(mp);
76: for (i = 0; i < MEM_BUCKETS; i++) {
77: TAILQ_INIT(&mp->pool_active[i]);
78: TAILQ_INIT(&mp->pool_inactive[i]);
79: }
80: mpool_unlock(mp);
81:
82: return mp;
83: }
84:
85: /*
86: * mpool_destroy() - Destroy memory pool
87: *
88: * @mp = Memory pool
89: * return: none
90: */
91: void
92: mpool_destroy(mpool_t ** __restrict mp)
93: {
94: struct tagAlloc *m;
95: register int i;
96:
97: if (!mp && !*mp)
98: return;
99:
100: mpool_lock(*mp);
101:
102: for (i = 0; i < MEM_BUCKETS; i++) {
103: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
104: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
105: if (m->alloc_mem)
106: free(m->alloc_mem);
107: free(m);
108: }
109: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
110: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
111: if (m->alloc_mem)
112: free(m->alloc_mem);
113: free(m);
114: }
115: }
116:
117: mpool_unlock(*mp);
118: pthread_mutex_destroy(&(*mp)->pool_mtx);
119:
120: free(*mp);
121: *mp = NULL;
122: }
123:
124: /* ----------------------------------------------------------- */
125:
126: static inline long
127: BucketIndex(u_int size)
128: {
129: register long b;
130:
131: if (!size--)
132: return 0; /* min bucket position in array */
133:
134: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
135: if (!(size >> b))
136: break;
137:
138: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
139: }
140:
141: static inline struct tagAlloc *
142: pullInactive(mpool_t * __restrict mp, int idx)
143: {
144: struct tagAlloc *m = NULL;
145:
146: /* must be locked pool before use this function */
147: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
148: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
149: /* statistics */
150: mp->pool_calls.cache--;
151: mp->pool_bytes.cache -= mem_size(m);
152:
153: /* clear name */
154: *m->alloc_name = 0;
155: }
156:
157: return m;
158: }
159:
160: /*
161: * mpool_malloc() - Memory allocation
162: *
163: * @mp = Memory pool
164: * @size = Size
165: * @memname = Optional memory block name
166: * return: NULL error or !=NULL ok allocated memory
167: */
168: void *
169: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
170: {
171: struct tagAlloc *m;
172: int idx;
173: u_int align;
174:
175: if (!mp) {
176: elwix_SetErr(EINVAL, "Pool not specified");
177: return NULL;
178: }
179: if (size > MEM_ALLOC_MAX) {
180: elwix_SetErr(ENOMEM, "Memory size is too large");
181: return NULL;
182: } else
183: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
184:
185: idx = BucketIndex(size);
186:
187: mpool_lock(mp);
188:
189: /* get memory from cache if exists */
190: if (!(m = pullInactive(mp, idx))) {
191: /* quota */
192: if (mp->pool_quota.max &&
193: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
194: elwix_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
195: mpool_unlock(mp);
196: return NULL;
197: }
198:
199: m = malloc(sizeof(struct tagAlloc));
200: if (!m) {
201: LOGERR;
202: mpool_unlock(mp);
203: return NULL;
204: } else
205: memset(m, 0, sizeof(struct tagAlloc));
206: }
207:
208: if (memname)
209: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
210:
211: if (!m->alloc_mem) {
212: align = 1 << (idx + MEM_MIN_BUCKET);
213: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
214: if (!m->alloc_mem) {
215: LOGERR;
216: free(m);
217: mpool_unlock(mp);
218: return NULL;
219: } else { /* quota */
220: mp->pool_quota.curr += size;
221: memset(m->alloc_mem, 0, align + 12);
222: }
223: }
224:
225: m->alloc_mem[0] = size / sizeof(u_int);
226: m->alloc_mem[1] = MEM_MAGIC_START;
227: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
228: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
229: /* statistics */
230: mp->pool_calls.alloc++;
231: mp->pool_bytes.alloc += size;
232:
233: mpool_unlock(mp);
234: return mem_data(m, void*);
235: }
236:
237: /*
238: * mpool_realloc() Reallocate memory block with new size
239: *
240: * @mp = Memory pool
241: * @data = Allocated memory data
242: * @newsize = New size of memory block
243: * @memname = Optional new memory block name
244: * return: NULL error or !=NULL new reallocated memory block
245: */
246: void *
247: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
248: {
249: struct tagAlloc *m, *tmp;
250: int idx, oidx;
251: void *p;
252: u_int align, osize;
253:
254: /* if !data execute mpool_malloc() */
255: if (!data)
256: return mpool_malloc(mp, newsize, memname);
257:
258: if (!mp) {
259: elwix_SetErr(EINVAL, "Pool not specified");
260: return NULL;
261: }
262: /* check address range & sentinel */
263: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
264: elwix_SetErr(EFAULT, "Corrupted memory address");
265: return NULL;
266: } else {
267: osize = ((u_int*)data)[-2] * sizeof(u_int);
268: oidx = BucketIndex(osize);
269: }
270: /* prepare new size */
271: if (newsize > MEM_ALLOC_MAX) {
272: elwix_SetErr(ENOMEM, "Memory size is too large");
273: return NULL;
274: } else {
275: newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */
276: idx = BucketIndex(newsize);
277: }
278:
279: mpool_lock(mp);
280:
281: /* quota */
282: if (mp->pool_quota.max &&
283: (mp->pool_quota.curr + ((u_long) newsize - osize)) > mp->pool_quota.max) {
284: elwix_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
285: mpool_unlock(mp);
286: return NULL;
287: }
288:
289: /* find old memory block */
290: TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) {
291: if (mem_data(m, void*) == data && mem_size(m) == osize) {
292: /* case in different buckets */
293: if (oidx != idx) {
294: TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
295: /* statistics */
296: mp->pool_calls.alloc--;
297: }
298: mp->pool_bytes.alloc -= osize;
299: break;
300: }
301: }
302: /* memory block not found! */
303: if (!m) {
304: mpool_unlock(mp);
305: elwix_SetErr(EFAULT, "Memory block not found");
306: return NULL;
307: }
308:
309: /* try to reallocate memory block to new bucket */
310: if (oidx != idx) {
311: align = 1 << (idx + MEM_MIN_BUCKET);
312: p = realloc(m->alloc_mem, align + 12);
313: if (!p) {
314: LOGERR;
315:
316: /* restore to old bucket pulled memory block for reallocation */
317: TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
318: /* statistics */
319: mp->pool_calls.alloc++;
320: mp->pool_bytes.alloc += osize;
321:
322: mpool_unlock(mp);
323: return NULL;
324: } else
325: m->alloc_mem = (u_int*) p;
326: }
327: /* quota */
328: mp->pool_quota.curr += (u_long) newsize - osize;
329:
330: m->alloc_mem[0] = newsize / sizeof(u_int);
331: m->alloc_mem[1] = MEM_MAGIC_START;
332: m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
333:
334: if (oidx != idx) {
335: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
336: /* statistics */
337: mp->pool_calls.alloc++;
338: }
339: mp->pool_bytes.alloc += newsize;
340:
341: if (memname)
342: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
343:
344: mpool_unlock(mp);
345: return mem_data(m, void*);
346: }
347:
348: /*
349: * mpool_purge() - Purge memory block cache and release resources
350: *
351: * @mp = Memory pool
352: * @atmost = Free at most in buckets
353: * return: -1 error or 0 ok
354: */
355: int
356: mpool_purge(mpool_t * __restrict mp, u_int atmost)
357: {
358: register int i, cx;
359: struct tagAlloc *m, *tmp;
360:
361: if (!mp) {
362: elwix_SetErr(EINVAL, "Pool not specified");
363: return -1;
364: }
365:
366: mpool_lock(mp);
367:
368: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
369: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
370: /* barrier for purge */
371: if (cx < atmost) {
372: cx++;
373: continue;
374: }
375:
376: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
377: /* statistics */
378: mp->pool_calls.cache--;
379: mp->pool_bytes.cache -= mem_size(m);
380:
381: mp->pool_calls.free++;
382: mp->pool_bytes.free += mem_size(m);
383: /* quota */
384: mp->pool_quota.curr -= mem_size(m);
385:
386: if (m->alloc_mem)
387: free(m->alloc_mem);
388: free(m);
389: }
390: }
391:
392: mpool_unlock(mp);
393: return 0;
394: }
395:
396: /*
397: * mpool_free() Free allocated memory with mpool_alloc()
398: *
399: * @mp = Memory pool
400: * @data = Allocated memory data
401: * @purge = if !=0 force release memory block
402: * return: <0 error or 0 ok released memory block
403: */
404: int
405: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
406: {
407: int idx;
408: struct tagAlloc *m, *tmp;
409:
1.4 misho 410: if (!data)
411: return 0;
1.1 misho 412: if (!mp) {
413: elwix_SetErr(EINVAL, "Pool not specified");
414: return -1;
415: }
416: /* check address range & sentinel */
417: assert(!MEM_BADADDR(data) && !MEM_CORRUPT(data));
418: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
419: elwix_SetErr(EFAULT, "Corrupted memory address");
420: return -2;
421: } else
422: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
423:
424: mpool_lock(mp);
425: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
426: if (mem_data(m, void*) == data) {
427: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
428: /* statistics */
429: mp->pool_calls.alloc--;
430: mp->pool_bytes.alloc -= mem_size(m);
431:
432: if (!purge) {
433: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
434: /* statistics */
435: mp->pool_calls.cache++;
436: mp->pool_bytes.cache += mem_size(m);
437: } else {
438: /* statistics */
439: mp->pool_calls.free++;
440: mp->pool_bytes.free += mem_size(m);
441: /* quota */
442: mp->pool_quota.curr -= mem_size(m);
443:
444: if (m->alloc_mem)
445: free(m->alloc_mem);
446: free(m);
447: }
448: break;
449: }
450: mpool_unlock(mp);
451:
452: return 0;
453: }
454:
455: /*
456: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
457: *
458: * @mp = Memory pool
459: * @size = Allocated memory data size
460: * @memname = Memory name
461: * @purge = if !=0 force release memory block
462: * return: <0 error or 0 ok released memory block
463: */
464: int
465: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
466: {
467: int idx;
468: struct tagAlloc *m, *tmp;
469:
470: if (!mp || !memname) {
471: elwix_SetErr(EINVAL, "Pool or memory name is not specified");
472: return -1;
473: } else
474: idx = BucketIndex(size);
475:
476: mpool_lock(mp);
477: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
478: if (!strcmp(m->alloc_name, memname)) {
479: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
480: /* statistics */
481: mp->pool_calls.alloc--;
482: mp->pool_bytes.alloc -= mem_size(m);
483:
484: if (!purge) {
485: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
486: /* statistics */
487: mp->pool_calls.cache++;
488: mp->pool_bytes.cache += mem_size(m);
489: } else {
490: /* statistics */
491: mp->pool_calls.free++;
492: mp->pool_bytes.free += mem_size(m);
493: /* quota */
494: mp->pool_quota.curr -= mem_size(m);
495:
496: if (m->alloc_mem)
497: free(m->alloc_mem);
498: free(m);
499: }
500: break;
501: }
502: mpool_unlock(mp);
503:
504: return 0;
505: }
506:
507: /*
508: * mpool_strdup() - String duplicate
509: *
510: * @mp = Memory pool
511: * @str = String
512: * @memname = Memory name
513: * return: NULL error or !=NULL new string
514: */
515: char *
516: mpool_strdup(mpool_t * __restrict mp, const char *str, const char *memname)
517: {
518: char *s = NULL;
519: u_int len;
520:
521: if (!mp) {
522: elwix_SetErr(EINVAL, "Pool not specified");
523: return NULL;
524: }
525: if (!str) {
526: elwix_SetErr(EINVAL, "String is NULL");
527: return NULL;
528: } else
529: len = strlen(str) + 1;
530:
531: s = mpool_malloc(mp, len, memname);
532: if (!s)
533: return NULL;
534: else
535: memcpy(s, str, len);
536:
537: return s;
538: }
539:
540: /*
541: * mpool_getmembynam() Find allocated memory block by size and memory name
542: *
543: * @mp = Memory pool
544: * @size = Memory size
545: * @memname = Memory name
546: * return: NULL error or not found and !=NULL allocated memory
547: */
1.2 misho 548: struct tagAlloc *
1.1 misho 549: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
550: {
551: int idx;
552: struct tagAlloc *m = NULL;
553:
554: if (!mp || !memname)
555: return NULL;
556:
557: idx = BucketIndex(size);
558: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
559: if (!strcmp(m->alloc_name, memname))
560: break;
561:
562: return mem_data(m, void*);
563: }
564:
565: /*
566: * mpool_getsizebyaddr() - Get size of allocated memory block by address
567: *
568: * @addr = allocated memory from mpool_malloc()
569: * return: usable size of allocated memory block
570: */
1.2 misho 571: u_int
1.1 misho 572: mpool_getsizebyaddr(void * __restrict data)
573: {
574: if (mpool_chkaddr(data))
575: return 0;
576:
577: return (((u_int*) data)[-2] * sizeof(u_int));
578: }
579:
580: /*
581: * mpool_chkaddr() - Check validity of given address
582: *
583: * @data = allocated memory from mpool_malloc()
584: * return: -1 bad address, 1 corrupted address or 0 ok
585: */
1.2 misho 586: int
1.1 misho 587: mpool_chkaddr(void * __restrict data)
588: {
589: /* check address range */
590: if (MEM_BADADDR(data))
591: return -1;
592: /* check sentinel */
593: if (MEM_CORRUPT(data))
594: return 1;
595: /* data address is ok! */
596: return 0;
597: }
598:
599: /*
600: * mpool_setquota() - Change maximum memory quota
601: *
602: * @mp = Memory pool
603: * @maxmem = New max quota size
604: * return: old maximum memory quota size
605: */
1.2 misho 606: u_long
1.1 misho 607: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
608: {
609: u_long ret;
610:
611: if (!mp)
612: return 0;
613:
614: ret = mp->pool_quota.max;
615: mp->pool_quota.max = maxmem;
616:
617: /* if new max quota is less then current allocated memory,
618: * try to purge memory cache blocks
619: */
620: if (mp->pool_quota.max < mp->pool_quota.curr)
621: mpool_purge(mp, 0);
622:
623: return ret;
624: }
625:
626: /*
627: * mpool_getquota() - Get memory quota
628: *
629: * @mp = Memory pool
630: * @currmem = Return current memory
631: * @maxmem = Return max quota size
632: * return: none
633: */
1.2 misho 634: void
1.1 misho 635: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
636: {
637: if (!mp)
638: return;
639:
640: if (maxmem)
641: *maxmem = mp->pool_quota.max;
642: if (currmem)
643: *currmem = mp->pool_quota.curr;
644: }
645:
646: /* ----------------------------------------------------------- */
647:
648: /*
649: * mpool_statistics() - Dump statistics from memory pool buckets
650: *
651: * @mp = Memory pool
652: * @cb = Export statistics to callback
653: * return: none
654: */
655: void
656: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
657: {
658: struct tagAlloc *m;
659: register int i, act, inact;
660:
661: if (!mp || !cb)
662: return;
663:
664: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
665: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
666: act++;
667: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
668: inact++;
669:
670: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
671: }
672: }
673:
674: /* ----------------------------------------------------------- */
675:
676: /*
677: * mpool_xmalloc() - malloc wrapper
678: *
679: * @size = Size
680: * return: NULL error or !=NULL ok allocated memory
681: */
682: void *
683: mpool_xmalloc(size_t size)
684: {
685: return mpool_malloc(elwix_mpool, size, elwix_Prog);
686: }
687:
688: /*
689: * mpool_xcalloc() - calloc wrapper
690: *
691: * @num = number of elements
692: * @size = Size of element
693: * return: NULL error or !=NULL ok allocated memory
694: */
695: void *
696: mpool_xcalloc(size_t num, size_t size)
697: {
698: return mpool_malloc(elwix_mpool, num * size, elwix_Prog);
699: }
700:
701: /*
702: * mpool_xrealloc() - realloc wrapper
703: *
704: * @data = Allocated memory data
705: * @newsize = New size of memory block
706: * return: NULL error or !=NULL new reallocated memory block
707: */
708: void *
709: mpool_xrealloc(void * __restrict data, size_t newsize)
710: {
711: return mpool_realloc(elwix_mpool, data, newsize, elwix_Prog);
712: }
713:
714: /*
715: * mpool_xfree() - free wrapper
716: *
717: * @data = Allocated memory data
718: * return: none
719: */
720: void
721: mpool_xfree(void * __restrict data)
722: {
723: mpool_free(elwix_mpool, data, 0);
724: }
725:
726: /*
727: * mpool_xstrdup() - strdup wrapper
728: *
729: * @str = string
730: * return: =NULL error or !=NULL new allocated string
731: */
732: char *
733: mpool_xstrdup(const char *str)
734: {
735: return mpool_strdup(elwix_mpool, str, elwix_Prog);
736: }
1.4.18.1! misho 737:
! 738: /*
! 739: * mpool_xstatistics() - elwix memory pool statistics wrapper
! 740: *
! 741: * @cb = Export statistics to callback
! 742: * return: none
! 743: */
! 744: void
! 745: mpool_xstatistics(mpool_stat_cb cb)
! 746: {
! 747: mpool_statistics(elwix_mpool, cb);
! 748: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>