File:
[ELWIX - Embedded LightWeight unIX -] /
libaitio /
src /
Attic /
mem.c
Revision
1.3:
download - view:
text,
annotated -
select for diffs -
revision graph
Wed Jul 25 15:21:59 2012 UTC (12 years, 1 month ago) by
misho
Branches:
MAIN
CVS tags:
io5_0,
io4_1,
io4_0,
io3_9,
io3_8,
io3_7,
io3_6,
io3_5,
io3_4,
IO4_1,
IO4_0,
IO3_9,
IO3_8,
IO3_7,
IO3_6,
IO3_5,
IO3_4,
IO3_3,
HEAD
version 3.3
1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: mem.c,v 1.3 2012/07/25 15:21:59 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
52: * @maxmem = If !=0 set maximum memory quota
53: * return: =NULL error or !=NULL new allocated pool
54: */
55: mpool_t *
56: mpool_init(u_long maxmem)
57: {
58: mpool_t *mp;
59: register int i;
60:
61: mp = malloc(sizeof(mpool_t));
62: if (!mp) {
63: LOGERR;
64: return NULL;
65: } else
66: memset(mp, 0, sizeof(mpool_t));
67:
68: pthread_mutex_init(&mp->pool_mtx, NULL);
69:
70: mp->pool_quota.max = maxmem;
71:
72: mpool_lock(mp);
73: for (i = 0; i < MEM_BUCKETS; i++) {
74: TAILQ_INIT(&mp->pool_active[i]);
75: TAILQ_INIT(&mp->pool_inactive[i]);
76: }
77: mpool_unlock(mp);
78:
79: return mp;
80: }
81:
82: /*
83: * mpool_destroy() - Destroy memory pool
84: *
85: * @mp = Memory pool
86: * return: none
87: */
88: void
89: mpool_destroy(mpool_t ** __restrict mp)
90: {
91: struct tagAlloc *m;
92: register int i;
93:
94: if (!mp && !*mp)
95: return;
96:
97: mpool_lock(*mp);
98:
99: for (i = 0; i < MEM_BUCKETS; i++) {
100: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
101: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
102: if (m->alloc_mem)
103: free(m->alloc_mem);
104: free(m);
105: }
106: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
107: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
108: if (m->alloc_mem)
109: free(m->alloc_mem);
110: free(m);
111: }
112: }
113:
114: mpool_unlock(*mp);
115: pthread_mutex_destroy(&(*mp)->pool_mtx);
116:
117: free(*mp);
118: *mp = NULL;
119: }
120:
121: /* ----------------------------------------------------------- */
122:
123: static inline long
124: BucketIndex(u_int size)
125: {
126: register long b;
127:
128: if (!size--)
129: return 0; /* min bucket position in array */
130:
131: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
132: if (!(size >> b))
133: break;
134:
135: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
136: }
137:
138: static inline struct tagAlloc *
139: pullInactive(mpool_t * __restrict mp, int idx)
140: {
141: struct tagAlloc *m = NULL;
142:
143: /* must be locked pool before use this function */
144: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
145: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
146: /* statistics */
147: mp->pool_calls.cache--;
148: mp->pool_bytes.cache -= mem_size(m);
149:
150: /* clear name */
151: *m->alloc_name = 0;
152: }
153:
154: return m;
155: }
156:
157: /*
158: * mpool_malloc() - Memory allocation
159: *
160: * @mp = Memory pool
161: * @size = Size
162: * @memname = Optional memory block name
163: * return: NULL error or !=NULL ok allocated memory
164: */
165: void *
166: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
167: {
168: struct tagAlloc *m;
169: int idx;
170: u_int align;
171:
172: if (!mp) {
173: io_SetErr(EINVAL, "Pool not specified");
174: return NULL;
175: }
176: if (size > MEM_ALLOC_MAX) {
177: io_SetErr(ENOMEM, "Memory size is too large");
178: return NULL;
179: } else
180: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
181:
182: idx = BucketIndex(size);
183:
184: mpool_lock(mp);
185:
186: /* get memory from cache if exists */
187: if (!(m = pullInactive(mp, idx))) {
188: /* quota */
189: if (mp->pool_quota.max &&
190: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
191: io_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
192: mpool_unlock(mp);
193: return NULL;
194: }
195:
196: m = malloc(sizeof(struct tagAlloc));
197: if (!m) {
198: LOGERR;
199: mpool_unlock(mp);
200: return NULL;
201: } else
202: memset(m, 0, sizeof(struct tagAlloc));
203: }
204:
205: if (memname)
206: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
207:
208: if (!m->alloc_mem) {
209: align = 1 << (idx + MEM_MIN_BUCKET);
210: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
211: if (!m->alloc_mem) {
212: LOGERR;
213: free(m);
214: mpool_unlock(mp);
215: return NULL;
216: } else { /* quota */
217: mp->pool_quota.curr += size;
218: memset(m->alloc_mem, 0, align + 12);
219: }
220: }
221:
222: m->alloc_mem[0] = size / sizeof(u_int);
223: m->alloc_mem[1] = MEM_MAGIC_START;
224: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
225: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
226: /* statistics */
227: mp->pool_calls.alloc++;
228: mp->pool_bytes.alloc += size;
229:
230: mpool_unlock(mp);
231: return mem_data(m, void*);
232: }
233:
234: /*
235: * mpool_realloc() Reallocate memory block with new size
236: *
237: * @mp = Memory pool
238: * @data = Allocated memory data
239: * @newsize = New size of memory block
240: * @memname = Optional new memory block name
241: * return: NULL error or !=NULL new reallocated memory block
242: */
243: void *
244: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
245: {
246: struct tagAlloc *m, *tmp;
247: int idx, oidx;
248: void *p;
249: u_int align, osize;
250:
251: /* if !data execute mpool_malloc() */
252: if (!data)
253: return mpool_malloc(mp, newsize, memname);
254:
255: if (!mp) {
256: io_SetErr(EINVAL, "Pool not specified");
257: return NULL;
258: }
259: /* check address range & sentinel */
260: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
261: io_SetErr(EFAULT, "Corrupted memory address");
262: return NULL;
263: } else {
264: osize = ((u_int*)data)[-2] * sizeof(u_int);
265: oidx = BucketIndex(osize);
266: }
267: /* prepare new size */
268: if (newsize > MEM_ALLOC_MAX) {
269: io_SetErr(ENOMEM, "Memory size is too large");
270: return NULL;
271: } else {
272: newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */
273: idx = BucketIndex(newsize);
274: }
275:
276: mpool_lock(mp);
277:
278: /* quota */
279: if (mp->pool_quota.max &&
280: (mp->pool_quota.curr + ((u_long) newsize - osize)) > mp->pool_quota.max) {
281: io_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
282: mpool_unlock(mp);
283: return NULL;
284: }
285:
286: /* find old memory block */
287: TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) {
288: if (mem_data(m, void*) == data && mem_size(m) == osize) {
289: /* case in different buckets */
290: if (oidx != idx) {
291: TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
292: /* statistics */
293: mp->pool_calls.alloc--;
294: }
295: mp->pool_bytes.alloc -= osize;
296: break;
297: }
298: }
299: /* memory block not found! */
300: if (!m) {
301: mpool_unlock(mp);
302: io_SetErr(EFAULT, "Memory block not found");
303: return NULL;
304: }
305:
306: /* try to reallocate memory block to new bucket */
307: if (oidx != idx) {
308: align = 1 << (idx + MEM_MIN_BUCKET);
309: p = realloc(m->alloc_mem, align + 12);
310: if (!p) {
311: LOGERR;
312:
313: /* restore to old bucket pulled memory block for reallocation */
314: TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
315: /* statistics */
316: mp->pool_calls.alloc++;
317: mp->pool_bytes.alloc += osize;
318:
319: mpool_unlock(mp);
320: return NULL;
321: } else
322: m->alloc_mem = (u_int*) p;
323: }
324: /* quota */
325: mp->pool_quota.curr += (u_long) newsize - osize;
326:
327: m->alloc_mem[0] = newsize / sizeof(u_int);
328: m->alloc_mem[1] = MEM_MAGIC_START;
329: m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
330:
331: if (oidx != idx) {
332: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
333: /* statistics */
334: mp->pool_calls.alloc++;
335: }
336: mp->pool_bytes.alloc += newsize;
337:
338: if (memname)
339: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
340:
341: mpool_unlock(mp);
342: return mem_data(m, void*);
343: }
344:
345: /*
346: * mpool_purge() - Purge memory block cache and release resources
347: *
348: * @mp = Memory pool
349: * @atmost = Free at most in buckets
350: * return: -1 error or 0 ok
351: */
352: int
353: mpool_purge(mpool_t * __restrict mp, u_int atmost)
354: {
355: register int i, cx;
356: struct tagAlloc *m, *tmp;
357:
358: if (!mp) {
359: io_SetErr(EINVAL, "Pool not specified");
360: return -1;
361: }
362:
363: mpool_lock(mp);
364:
365: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
366: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
367: /* barrier for purge */
368: if (cx < atmost) {
369: cx++;
370: continue;
371: }
372:
373: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
374: /* statistics */
375: mp->pool_calls.cache--;
376: mp->pool_bytes.cache -= mem_size(m);
377:
378: mp->pool_calls.free++;
379: mp->pool_bytes.free += mem_size(m);
380: /* quota */
381: mp->pool_quota.curr -= mem_size(m);
382:
383: if (m->alloc_mem)
384: free(m->alloc_mem);
385: free(m);
386: }
387: }
388:
389: mpool_unlock(mp);
390: return 0;
391: }
392:
393: /*
394: * mpool_free() Free allocated memory with mpool_alloc()
395: *
396: * @mp = Memory pool
397: * @data = Allocated memory data
398: * @purge = if !=0 force release memory block
399: * return: <0 error or 0 ok released memory block
400: */
401: int
402: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
403: {
404: int idx;
405: struct tagAlloc *m, *tmp;
406:
407: assert(data);
408: if (!mp) {
409: io_SetErr(EINVAL, "Pool not specified");
410: return -1;
411: }
412: /* check address range & sentinel */
413: assert(!MEM_BADADDR(data) && !MEM_CORRUPT(data));
414: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
415: io_SetErr(EFAULT, "Corrupted memory address");
416: return -2;
417: } else
418: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
419:
420: mpool_lock(mp);
421: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
422: if (mem_data(m, void*) == data) {
423: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
424: /* statistics */
425: mp->pool_calls.alloc--;
426: mp->pool_bytes.alloc -= mem_size(m);
427:
428: if (!purge) {
429: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
430: /* statistics */
431: mp->pool_calls.cache++;
432: mp->pool_bytes.cache += mem_size(m);
433: } else {
434: /* statistics */
435: mp->pool_calls.free++;
436: mp->pool_bytes.free += mem_size(m);
437: /* quota */
438: mp->pool_quota.curr -= mem_size(m);
439:
440: if (m->alloc_mem)
441: free(m->alloc_mem);
442: free(m);
443: }
444: break;
445: }
446: mpool_unlock(mp);
447:
448: return 0;
449: }
450:
451: /*
452: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
453: *
454: * @mp = Memory pool
455: * @size = Allocated memory data size
456: * @memname = Memory name
457: * @purge = if !=0 force release memory block
458: * return: <0 error or 0 ok released memory block
459: */
460: int
461: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
462: {
463: int idx;
464: struct tagAlloc *m, *tmp;
465:
466: if (!mp || !memname) {
467: io_SetErr(EINVAL, "Pool or memory name is not specified");
468: return -1;
469: } else
470: idx = BucketIndex(size);
471:
472: mpool_lock(mp);
473: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
474: if (!strcmp(m->alloc_name, memname)) {
475: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
476: /* statistics */
477: mp->pool_calls.alloc--;
478: mp->pool_bytes.alloc -= mem_size(m);
479:
480: if (!purge) {
481: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
482: /* statistics */
483: mp->pool_calls.cache++;
484: mp->pool_bytes.cache += mem_size(m);
485: } else {
486: /* statistics */
487: mp->pool_calls.free++;
488: mp->pool_bytes.free += mem_size(m);
489: /* quota */
490: mp->pool_quota.curr -= mem_size(m);
491:
492: if (m->alloc_mem)
493: free(m->alloc_mem);
494: free(m);
495: }
496: break;
497: }
498: mpool_unlock(mp);
499:
500: return 0;
501: }
502:
503: /*
504: * mpool_strdup() - String duplicate
505: *
506: * @mp = Memory pool
507: * @str = String
508: * @memname = Memory name
509: * return: NULL error or !=NULL new string
510: */
511: char *
512: mpool_strdup(mpool_t * __restrict mp, const char *str, const char *memname)
513: {
514: char *s = NULL;
515: u_int len;
516:
517: if (!mp) {
518: io_SetErr(EINVAL, "Pool not specified");
519: return NULL;
520: }
521: if (!str) {
522: io_SetErr(EINVAL, "String is NULL");
523: return NULL;
524: } else
525: len = strlen(str) + 1;
526:
527: s = mpool_malloc(mp, len, memname);
528: if (!s)
529: return NULL;
530: else
531: memcpy(s, str, len);
532:
533: return s;
534: }
535:
536: /*
537: * mpool_getmembynam() Find allocated memory block by size and memory name
538: *
539: * @mp = Memory pool
540: * @size = Memory size
541: * @memname = Memory name
542: * return: NULL error or not found and !=NULL allocated memory
543: */
544: inline struct tagAlloc *
545: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
546: {
547: int idx;
548: struct tagAlloc *m = NULL;
549:
550: if (!mp || !memname)
551: return NULL;
552:
553: idx = BucketIndex(size);
554: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
555: if (!strcmp(m->alloc_name, memname))
556: break;
557:
558: return mem_data(m, void*);
559: }
560:
561: /*
562: * mpool_getsizebyaddr() - Get size of allocated memory block by address
563: *
564: * @addr = allocated memory from mpool_malloc()
565: * return: usable size of allocated memory block
566: */
567: inline u_int
568: mpool_getsizebyaddr(void * __restrict data)
569: {
570: if (mpool_chkaddr(data))
571: return 0;
572:
573: return (((u_int*) data)[-2] * sizeof(u_int));
574: }
575:
576: /*
577: * mpool_chkaddr() - Check validity of given address
578: *
579: * @data = allocated memory from mpool_malloc()
580: * return: -1 bad address, 1 corrupted address or 0 ok
581: */
582: inline int
583: mpool_chkaddr(void * __restrict data)
584: {
585: /* check address range */
586: if (MEM_BADADDR(data))
587: return -1;
588: /* check sentinel */
589: if (MEM_CORRUPT(data))
590: return 1;
591: /* data address is ok! */
592: return 0;
593: }
594:
595: /*
596: * mpool_setquota() - Change maximum memory quota
597: *
598: * @mp = Memory pool
599: * @maxmem = New max quota size
600: * return: old maximum memory quota size
601: */
602: inline u_long
603: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
604: {
605: u_long ret;
606:
607: if (!mp)
608: return 0;
609:
610: ret = mp->pool_quota.max;
611: mp->pool_quota.max = maxmem;
612:
613: /* if new max quota is less then current allocated memory,
614: * try to purge memory cache blocks
615: */
616: if (mp->pool_quota.max < mp->pool_quota.curr)
617: mpool_purge(mp, 0);
618:
619: return ret;
620: }
621:
622: /*
623: * mpool_getquota() - Get memory quota
624: *
625: * @mp = Memory pool
626: * @currmem = Return current memory
627: * @maxmem = Return max quota size
628: * return: none
629: */
630: inline void
631: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
632: {
633: if (!mp)
634: return;
635:
636: if (maxmem)
637: *maxmem = mp->pool_quota.max;
638: if (currmem)
639: *currmem = mp->pool_quota.curr;
640: }
641:
642: /* ----------------------------------------------------------- */
643:
644: /*
645: * mpool_statistics() - Dump statistics from memory pool buckets
646: *
647: * @mp = Memory pool
648: * @cb = Export statistics to callback
649: * return: none
650: */
651: void
652: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
653: {
654: struct tagAlloc *m;
655: register int i, act, inact;
656:
657: if (!mp || !cb)
658: return;
659:
660: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
661: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
662: act++;
663: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
664: inact++;
665:
666: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
667: }
668: }
669:
670: /* ----------------------------------------------------------- */
671:
672: /*
673: * mpool_xmalloc() - malloc wrapper
674: *
675: * @size = Size
676: * return: NULL error or !=NULL ok allocated memory
677: */
678: void *
679: mpool_xmalloc(size_t size)
680: {
681: return mpool_malloc(io_mpool, size, NULL);
682: }
683:
684: /*
685: * mpool_xcalloc() - calloc wrapper
686: *
687: * @num = number of elements
688: * @size = Size of element
689: * return: NULL error or !=NULL ok allocated memory
690: */
691: void *
692: mpool_xcalloc(size_t num, size_t size)
693: {
694: return mpool_malloc(io_mpool, num * size, NULL);
695: }
696:
697: /*
698: * mpool_xrealloc() - realloc wrapper
699: *
700: * @data = Allocated memory data
701: * @newsize = New size of memory block
702: * return: NULL error or !=NULL new reallocated memory block
703: */
704: void *
705: mpool_xrealloc(void * __restrict data, size_t newsize)
706: {
707: return mpool_realloc(io_mpool, data, newsize, NULL);
708: }
709:
710: /*
711: * mpool_xfree() - free wrapper
712: *
713: * @data = Allocated memory data
714: * return: none
715: */
716: void
717: mpool_xfree(void * __restrict data)
718: {
719: mpool_free(io_mpool, data, 0);
720: }
721:
722: /*
723: * mpool_xstrdup() - strdup wrapper
724: *
725: * @str = string
726: * return: =NULL error or !=NULL new allocated string
727: */
728: char *
729: mpool_xstrdup(const char *str)
730: {
731: return mpool_strdup(io_mpool, str, NULL);
732: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>