Annotation of libaitsess/src/mem.c, revision 1.1.2.11
1.1.2.2 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.1.2.11! misho 6: * $Id: mem.c,v 1.1.2.10 2012/02/28 12:44:18 misho Exp $
1.1.2.2 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
1.1.2.1 misho 46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
1.1.2.6 misho 52: * @maxmem = If !=0 set maximum memory quota
1.1.2.1 misho 53: * return: =NULL error or !=NULL new allocated pool
54: */
55: mpool_t *
1.1.2.6 misho 56: mpool_init(u_long maxmem)
1.1.2.1 misho 57: {
58: mpool_t *mp;
1.1.2.3 misho 59: register int i;
1.1.2.1 misho 60:
61: mp = malloc(sizeof(mpool_t));
62: if (!mp) {
63: LOGERR;
64: return NULL;
65: } else
66: memset(mp, 0, sizeof(mpool_t));
67:
68: pthread_mutex_init(&mp->pool_mtx, NULL);
1.1.2.3 misho 69:
1.1.2.6 misho 70: mp->pool_quota.max = maxmem;
71:
1.1.2.3 misho 72: mpool_lock(mp);
73: for (i = 0; i < MEM_BUCKETS; i++) {
74: TAILQ_INIT(&mp->pool_active[i]);
75: TAILQ_INIT(&mp->pool_inactive[i]);
76: }
77: mpool_unlock(mp);
78:
1.1.2.1 misho 79: return mp;
80: }
81:
82: /*
83: * mpool_destroy() - Destroy memory pool
84: *
85: * @mp = Memory pool
86: * return: none
87: */
88: void
89: mpool_destroy(mpool_t ** __restrict mp)
90: {
91: struct tagAlloc *m;
92: register int i;
93:
94: if (!mp && !*mp)
95: return;
96:
97: mpool_lock(*mp);
98:
99: for (i = 0; i < MEM_BUCKETS; i++) {
100: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
101: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
102: if (m->alloc_mem)
103: free(m->alloc_mem);
104: free(m);
105: }
106: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
107: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
108: if (m->alloc_mem)
109: free(m->alloc_mem);
110: free(m);
111: }
112: }
113:
114: mpool_unlock(*mp);
115: pthread_mutex_destroy(&(*mp)->pool_mtx);
116:
117: free(*mp);
118: *mp = NULL;
119: }
120:
121: /* ----------------------------------------------------------- */
122:
123: static inline long
124: BucketIndex(u_int size)
125: {
126: register long b;
127:
128: if (!size--)
129: return 0; /* min bucket position in array */
130:
131: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
132: if (!(size >> b))
133: break;
134:
135: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
136: }
137:
138: static inline struct tagAlloc *
139: pullInactive(mpool_t * __restrict mp, int idx)
140: {
141: struct tagAlloc *m = NULL;
142:
143: /* must be locked pool before use this function */
144: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
145: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
1.1.2.3 misho 146: /* statistics */
147: mp->pool_calls.cache--;
148: mp->pool_bytes.cache -= mem_size(m);
1.1.2.1 misho 149:
150: /* clear name */
151: *m->alloc_name = 0;
152: }
153:
154: return m;
155: }
156:
157: /*
158: * mpool_malloc() - Memory allocation
159: *
160: * @mp = Memory pool
161: * @size = Size
162: * @memname = Optional memory block name
163: * return: NULL error or !=NULL ok allocated memory
164: */
165: void *
166: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
167: {
168: struct tagAlloc *m;
1.1.2.8 misho 169: int idx;
170: u_int align;
1.1.2.1 misho 171:
172: if (!mp) {
173: sess_SetErr(EINVAL, "Pool not specified");
174: return NULL;
175: }
176: if (size > MEM_ALLOC_MAX) {
177: sess_SetErr(ENOMEM, "Memory size is too large");
178: return NULL;
179: } else
180: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
181:
182: idx = BucketIndex(size);
183:
184: mpool_lock(mp);
185:
186: /* get memory from cache if exists */
187: if (!(m = pullInactive(mp, idx))) {
188: /* quota */
189: if (mp->pool_quota.max &&
190: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
191: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
192: mpool_unlock(mp);
193: return NULL;
194: }
195:
196: m = malloc(sizeof(struct tagAlloc));
197: if (!m) {
198: LOGERR;
199: mpool_unlock(mp);
200: return NULL;
201: } else
202: memset(m, 0, sizeof(struct tagAlloc));
203: }
204:
205: if (memname)
206: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
207:
208: if (!m->alloc_mem) {
209: align = 1 << (idx + MEM_MIN_BUCKET);
210: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
211: if (!m->alloc_mem) {
212: LOGERR;
213: free(m);
214: mpool_unlock(mp);
215: return NULL;
216: } else /* quota */
217: mp->pool_quota.curr += size;
218: }
219:
220: m->alloc_mem[0] = size / sizeof(u_int);
221: m->alloc_mem[1] = MEM_MAGIC_START;
222: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
223: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
224: /* statistics */
225: mp->pool_calls.alloc++;
226: mp->pool_bytes.alloc += size;
227:
228: mpool_unlock(mp);
229: return mem_data(m, void*);
230: }
231:
232: /*
1.1.2.7 misho 233: * mpool_realloc() Reallocate memory block with new size
234: *
235: * @mp = Memory pool
236: * @data = Allocated memory data
237: * @newsize = New size of memory block
238: * @memname = Optional new memory block name
239: * return: NULL error or !=NULL new reallocated memory block
240: */
241: void *
242: mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname)
243: {
244: struct tagAlloc *m, *tmp;
1.1.2.8 misho 245: int idx, oidx;
1.1.2.7 misho 246: void *p;
1.1.2.8 misho 247: u_int align, osize;
1.1.2.7 misho 248:
249: /* if !data execute mpool_malloc() */
250: if (!data)
251: return mpool_malloc(mp, newsize, memname);
252:
253: if (!mp) {
254: sess_SetErr(EINVAL, "Pool not specified");
255: return NULL;
256: }
257: /* check address range & sentinel */
258: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
259: sess_SetErr(EFAULT, "Corrupted memory address");
260: return NULL;
261: } else {
262: osize = ((u_int*)data)[-2] * sizeof(u_int);
263: oidx = BucketIndex(osize);
264: }
265: /* prepare new size */
266: if (newsize > MEM_ALLOC_MAX) {
267: sess_SetErr(ENOMEM, "Memory size is too large");
268: return NULL;
269: } else {
270: newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */
271: idx = BucketIndex(newsize);
272: }
273:
274: mpool_lock(mp);
275:
276: /* quota */
277: if (mp->pool_quota.max &&
1.1.2.10 misho 278: (mp->pool_quota.curr + ((u_long) newsize - osize)) > mp->pool_quota.max) {
1.1.2.7 misho 279: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
280: mpool_unlock(mp);
281: return NULL;
282: }
283:
284: /* find old memory block */
1.1.2.8 misho 285: TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) {
1.1.2.7 misho 286: if (mem_data(m, void*) == data && mem_size(m) == osize) {
1.1.2.8 misho 287: /* case in different buckets */
288: if (oidx != idx) {
289: TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node);
290: /* statistics */
291: mp->pool_calls.alloc--;
292: }
1.1.2.7 misho 293: mp->pool_bytes.alloc -= osize;
1.1.2.8 misho 294: break;
1.1.2.7 misho 295: }
1.1.2.8 misho 296: }
1.1.2.7 misho 297: /* memory block not found! */
298: if (!m) {
299: mpool_unlock(mp);
300: sess_SetErr(EFAULT, "Memory block not found");
301: return NULL;
302: }
303:
304: /* try to reallocate memory block to new bucket */
1.1.2.8 misho 305: if (oidx != idx) {
306: align = 1 << (idx + MEM_MIN_BUCKET);
307: p = realloc(m->alloc_mem, align + 12);
308: if (!p) {
309: LOGERR;
1.1.2.7 misho 310:
1.1.2.8 misho 311: /* restore to old bucket pulled memory block for reallocation */
312: TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node);
313: /* statistics */
314: mp->pool_calls.alloc++;
315: mp->pool_bytes.alloc += osize;
1.1.2.7 misho 316:
1.1.2.8 misho 317: mpool_unlock(mp);
318: return NULL;
1.1.2.9 misho 319: } else
320: m->alloc_mem = (u_int*) p;
1.1.2.8 misho 321: }
322: /* quota */
1.1.2.10 misho 323: mp->pool_quota.curr += (u_long) newsize - osize;
1.1.2.7 misho 324:
325: m->alloc_mem[0] = newsize / sizeof(u_int);
326: m->alloc_mem[1] = MEM_MAGIC_START;
327: m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP;
1.1.2.8 misho 328:
329: if (oidx != idx) {
330: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
331: /* statistics */
332: mp->pool_calls.alloc++;
333: }
1.1.2.7 misho 334: mp->pool_bytes.alloc += newsize;
335:
336: if (memname)
337: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
338:
339: mpool_unlock(mp);
340: return mem_data(m, void*);
341: }
342:
343: /*
1.1.2.3 misho 344: * mpool_purge() - Purge memory block cache and release resources
345: *
346: * @mp = Memory pool
347: * @atmost = Free at most in buckets
348: * return: -1 error or 0 ok
349: */
350: int
1.1.2.4 misho 351: mpool_purge(mpool_t * __restrict mp, u_int atmost)
1.1.2.3 misho 352: {
1.1.2.4 misho 353: register int i, cx;
1.1.2.3 misho 354: struct tagAlloc *m, *tmp;
355:
356: if (!mp) {
357: sess_SetErr(EINVAL, "Pool not specified");
358: return -1;
359: }
360:
361: mpool_lock(mp);
362:
1.1.2.4 misho 363: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
1.1.2.3 misho 364: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
1.1.2.4 misho 365: /* barrier for purge */
366: if (cx < atmost) {
367: cx++;
1.1.2.3 misho 368: continue;
1.1.2.4 misho 369: }
1.1.2.3 misho 370:
371: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
372: /* statistics */
373: mp->pool_calls.cache--;
374: mp->pool_bytes.cache -= mem_size(m);
375:
376: mp->pool_calls.free++;
377: mp->pool_bytes.free += mem_size(m);
378: /* quota */
379: mp->pool_quota.curr -= mem_size(m);
380:
381: if (m->alloc_mem)
382: free(m->alloc_mem);
383: free(m);
384: }
385: }
386:
387: mpool_unlock(mp);
388: return 0;
389: }
390:
391: /*
1.1.2.1 misho 392: * mpool_free() Free allocated memory with mpool_alloc()
393: *
394: * @mp = Memory pool
395: * @data = Allocated memory data
396: * @purge = if !=0 force release memory block
397: * return: <0 error or 0 ok released memory block
398: */
399: int
400: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
401: {
402: int idx;
1.1.2.7 misho 403: struct tagAlloc *m, *tmp;
1.1.2.1 misho 404:
405: if (!mp) {
406: sess_SetErr(EINVAL, "Pool not specified");
407: return -1;
408: }
409: /* check address range & sentinel */
410: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
411: sess_SetErr(EFAULT, "Corrupted memory address");
412: return -2;
413: } else
1.1.2.5 misho 414: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
1.1.2.1 misho 415:
416: mpool_lock(mp);
1.1.2.7 misho 417: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 418: if (mem_data(m, void*) == data) {
419: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 420: /* statistics */
421: mp->pool_calls.alloc--;
422: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 423:
424: if (!purge) {
425: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
426: /* statistics */
427: mp->pool_calls.cache++;
428: mp->pool_bytes.cache += mem_size(m);
429: } else {
430: /* statistics */
431: mp->pool_calls.free++;
432: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 433: /* quota */
434: mp->pool_quota.curr -= mem_size(m);
435:
436: if (m->alloc_mem)
437: free(m->alloc_mem);
438: free(m);
1.1.2.1 misho 439: }
440: break;
441: }
442: mpool_unlock(mp);
443:
444: return 0;
445: }
446:
447: /*
448: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
449: *
450: * @mp = Memory pool
451: * @size = Allocated memory data size
452: * @memname = Memory name
453: * @purge = if !=0 force release memory block
454: * return: <0 error or 0 ok released memory block
455: */
456: int
457: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
458: {
459: int idx;
1.1.2.7 misho 460: struct tagAlloc *m, *tmp;
1.1.2.1 misho 461:
462: if (!mp || !memname) {
463: sess_SetErr(EINVAL, "Pool or memory name is not specified");
464: return -1;
465: } else
466: idx = BucketIndex(size);
467:
468: mpool_lock(mp);
1.1.2.7 misho 469: TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp)
1.1.2.1 misho 470: if (!strcmp(m->alloc_name, memname)) {
471: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 472: /* statistics */
473: mp->pool_calls.alloc--;
474: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 475:
476: if (!purge) {
477: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
478: /* statistics */
479: mp->pool_calls.cache++;
480: mp->pool_bytes.cache += mem_size(m);
481: } else {
482: /* statistics */
483: mp->pool_calls.free++;
484: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 485: /* quota */
486: mp->pool_quota.curr -= mem_size(m);
487:
488: if (m->alloc_mem)
489: free(m->alloc_mem);
490: free(m);
1.1.2.1 misho 491: }
492: break;
493: }
494: mpool_unlock(mp);
495:
496: return 0;
497: }
498:
499: /*
500: * mpool_getmembynam() Find allocated memory block by size and memory name
501: *
502: * @mp = Memory pool
503: * @size = Memory size
504: * @memname = Memory name
505: * return: NULL error or not found and !=NULL allocated memory
506: */
507: inline struct tagAlloc *
508: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
509: {
510: int idx;
511: struct tagAlloc *m = NULL;
512:
513: if (!mp || !memname)
514: return NULL;
515:
516: idx = BucketIndex(size);
517: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
518: if (!strcmp(m->alloc_name, memname))
519: break;
520:
521: return mem_data(m, void*);
522: }
1.1.2.3 misho 523:
524: /*
525: * mpool_getsizebyaddr() - Get size of allocated memory block by address
526: *
527: * @addr = allocated memory from mpool_malloc()
528: * return: usable size of allocated memory block
529: */
530: inline u_int
531: mpool_getsizebyaddr(void * __restrict data)
532: {
533: if (mpool_chkaddr(data))
534: return 0;
535:
536: return (((u_int*) data)[-2] * sizeof(u_int));
537: }
538:
539: /*
540: * mpool_chkaddr() - Check validity of given address
541: *
542: * @data = allocated memory from mpool_malloc()
543: * return: -1 bad address, 1 corrupted address or 0 ok
544: */
545: inline int
546: mpool_chkaddr(void * __restrict data)
547: {
548: /* check address range */
549: if (MEM_BADADDR(data))
550: return -1;
551: /* check sentinel */
552: if (MEM_CORRUPT(data))
553: return 1;
554: /* data address is ok! */
555: return 0;
556: }
1.1.2.6 misho 557:
558: /*
559: * mpool_setquota() - Change maximum memory quota
560: *
561: * @mp = Memory pool
562: * @maxmem = New max quota size
563: * return: old maximum memory quota size
564: */
565: inline u_long
566: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
567: {
568: u_long ret;
569:
570: if (!mp)
571: return 0;
572:
573: ret = mp->pool_quota.max;
574: mp->pool_quota.max = maxmem;
575:
576: /* if new max quota is less then current allocated memory,
577: * try to purge memory cache blocks
578: */
579: if (mp->pool_quota.max < mp->pool_quota.curr)
580: mpool_purge(mp, 0);
581:
582: return ret;
583: }
584:
1.1.2.7 misho 585: /*
586: * mpool_getquota() - Get memory quota
587: *
588: * @mp = Memory pool
589: * @currmem = Return current memory
590: * @maxmem = Return max quota size
591: * return: none
592: */
593: inline void
594: mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem)
595: {
596: if (!mp)
597: return;
598:
599: if (maxmem)
600: *maxmem = mp->pool_quota.max;
601: if (currmem)
602: *currmem = mp->pool_quota.curr;
603: }
604:
1.1.2.6 misho 605: /* ----------------------------------------------------------- */
606:
607: /*
608: * mpool_statistics() - Dump statistics from memory pool buckets
609: *
610: * @mp = Memory pool
611: * @cb = Export statistics to callback
612: * return: none
613: */
614: void
615: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
616: {
617: struct tagAlloc *m;
618: register int i, act, inact;
619:
620: if (!mp || !cb)
621: return;
622:
623: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
624: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
625: act++;
626: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
627: inact++;
628:
629: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
630: }
631: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>