Annotation of libaitsess/src/mem.c, revision 1.1.2.6
1.1.2.2 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.1.2.6 ! misho 6: * $Id: mem.c,v 1.1.2.5 2012/02/28 00:25:25 misho Exp $
1.1.2.2 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
1.1.2.1 misho 46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
1.1.2.6 ! misho 52: * @maxmem = If !=0 set maximum memory quota
1.1.2.1 misho 53: * return: =NULL error or !=NULL new allocated pool
54: */
55: mpool_t *
1.1.2.6 ! misho 56: mpool_init(u_long maxmem)
1.1.2.1 misho 57: {
58: mpool_t *mp;
1.1.2.3 misho 59: register int i;
1.1.2.1 misho 60:
61: mp = malloc(sizeof(mpool_t));
62: if (!mp) {
63: LOGERR;
64: return NULL;
65: } else
66: memset(mp, 0, sizeof(mpool_t));
67:
68: pthread_mutex_init(&mp->pool_mtx, NULL);
1.1.2.3 misho 69:
1.1.2.6 ! misho 70: mp->pool_quota.max = maxmem;
! 71:
1.1.2.3 misho 72: mpool_lock(mp);
73: for (i = 0; i < MEM_BUCKETS; i++) {
74: TAILQ_INIT(&mp->pool_active[i]);
75: TAILQ_INIT(&mp->pool_inactive[i]);
76: }
77: mpool_unlock(mp);
78:
1.1.2.1 misho 79: return mp;
80: }
81:
82: /*
83: * mpool_destroy() - Destroy memory pool
84: *
85: * @mp = Memory pool
86: * return: none
87: */
88: void
89: mpool_destroy(mpool_t ** __restrict mp)
90: {
91: struct tagAlloc *m;
92: register int i;
93:
94: if (!mp && !*mp)
95: return;
96:
97: mpool_lock(*mp);
98:
99: for (i = 0; i < MEM_BUCKETS; i++) {
100: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
101: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
102: if (m->alloc_mem)
103: free(m->alloc_mem);
104: free(m);
105: }
106: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
107: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
108: if (m->alloc_mem)
109: free(m->alloc_mem);
110: free(m);
111: }
112: }
113:
114: mpool_unlock(*mp);
115: pthread_mutex_destroy(&(*mp)->pool_mtx);
116:
117: free(*mp);
118: *mp = NULL;
119: }
120:
121: /* ----------------------------------------------------------- */
122:
123: static inline long
124: BucketIndex(u_int size)
125: {
126: register long b;
127:
128: if (!size--)
129: return 0; /* min bucket position in array */
130:
131: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
132: if (!(size >> b))
133: break;
134:
135: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
136: }
137:
138: static inline struct tagAlloc *
139: pullInactive(mpool_t * __restrict mp, int idx)
140: {
141: struct tagAlloc *m = NULL;
142:
143: /* must be locked pool before use this function */
144: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
145: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
1.1.2.3 misho 146: /* statistics */
147: mp->pool_calls.cache--;
148: mp->pool_bytes.cache -= mem_size(m);
1.1.2.1 misho 149:
150: /* clear name */
151: *m->alloc_name = 0;
152: /* clear flags */
153: m->alloc_flags ^= m->alloc_flags;
154: }
155:
156: return m;
157: }
158:
159: /*
160: * mpool_malloc() - Memory allocation
161: *
162: * @mp = Memory pool
163: * @size = Size
164: * @memname = Optional memory block name
165: * return: NULL error or !=NULL ok allocated memory
166: */
167: void *
168: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
169: {
170: struct tagAlloc *m;
171: int idx, align;
172:
173: if (!mp) {
174: sess_SetErr(EINVAL, "Pool not specified");
175: return NULL;
176: }
177: if (size > MEM_ALLOC_MAX) {
178: sess_SetErr(ENOMEM, "Memory size is too large");
179: return NULL;
180: } else
181: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
182:
183: idx = BucketIndex(size);
184:
185: mpool_lock(mp);
186:
187: /* get memory from cache if exists */
188: if (!(m = pullInactive(mp, idx))) {
189: /* quota */
190: if (mp->pool_quota.max &&
191: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
192: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
193: mpool_unlock(mp);
194: return NULL;
195: }
196:
197: m = malloc(sizeof(struct tagAlloc));
198: if (!m) {
199: LOGERR;
200: mpool_unlock(mp);
201: return NULL;
202: } else
203: memset(m, 0, sizeof(struct tagAlloc));
204: }
205:
206: if (memname)
207: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
208:
209: if (!m->alloc_mem) {
210: align = 1 << (idx + MEM_MIN_BUCKET);
211: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
212: if (!m->alloc_mem) {
213: LOGERR;
214: free(m);
215: mpool_unlock(mp);
216: return NULL;
217: } else /* quota */
218: mp->pool_quota.curr += size;
219: }
220:
221: m->alloc_mem[0] = size / sizeof(u_int);
222: m->alloc_mem[1] = MEM_MAGIC_START;
223: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
224: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
225: /* statistics */
226: mp->pool_calls.alloc++;
227: mp->pool_bytes.alloc += size;
228:
229: mpool_unlock(mp);
230: return mem_data(m, void*);
231: }
232:
233: /*
1.1.2.3 misho 234: * mpool_purge() - Purge memory block cache and release resources
235: *
236: * @mp = Memory pool
237: * @atmost = Free at most in buckets
238: * return: -1 error or 0 ok
239: */
240: int
1.1.2.4 misho 241: mpool_purge(mpool_t * __restrict mp, u_int atmost)
1.1.2.3 misho 242: {
1.1.2.4 misho 243: register int i, cx;
1.1.2.3 misho 244: struct tagAlloc *m, *tmp;
245:
246: if (!mp) {
247: sess_SetErr(EINVAL, "Pool not specified");
248: return -1;
249: }
250:
251: mpool_lock(mp);
252:
1.1.2.4 misho 253: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
1.1.2.3 misho 254: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
1.1.2.4 misho 255: /* barrier for purge */
256: if (cx < atmost) {
257: cx++;
1.1.2.3 misho 258: continue;
1.1.2.4 misho 259: }
1.1.2.3 misho 260:
261: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
262: /* statistics */
263: mp->pool_calls.cache--;
264: mp->pool_bytes.cache -= mem_size(m);
265:
266: mp->pool_calls.free++;
267: mp->pool_bytes.free += mem_size(m);
268: /* quota */
269: mp->pool_quota.curr -= mem_size(m);
270:
271: if (m->alloc_mem)
272: free(m->alloc_mem);
273: free(m);
274: }
275: }
276:
277: mpool_unlock(mp);
278: return 0;
279: }
280:
281: /*
1.1.2.1 misho 282: * mpool_free() Free allocated memory with mpool_alloc()
283: *
284: * @mp = Memory pool
285: * @data = Allocated memory data
286: * @purge = if !=0 force release memory block
287: * return: <0 error or 0 ok released memory block
288: */
289: int
290: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
291: {
292: int idx;
293: struct tagAlloc *m;
294:
295: if (!mp) {
296: sess_SetErr(EINVAL, "Pool not specified");
297: return -1;
298: }
299: /* check address range & sentinel */
300: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
301: sess_SetErr(EFAULT, "Corrupted memory address");
302: return -2;
303: } else
1.1.2.5 misho 304: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
1.1.2.1 misho 305:
306: mpool_lock(mp);
307: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
308: if (mem_data(m, void*) == data) {
309: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 310: /* statistics */
311: mp->pool_calls.alloc--;
312: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 313:
314: if (!purge) {
315: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
316: /* statistics */
317: mp->pool_calls.cache++;
318: mp->pool_bytes.cache += mem_size(m);
319: } else {
320: /* statistics */
321: mp->pool_calls.free++;
322: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 323: /* quota */
324: mp->pool_quota.curr -= mem_size(m);
325:
326: if (m->alloc_mem)
327: free(m->alloc_mem);
328: free(m);
1.1.2.1 misho 329: }
330: break;
331: }
332: mpool_unlock(mp);
333:
334: return 0;
335: }
336:
337: /*
338: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
339: *
340: * @mp = Memory pool
341: * @size = Allocated memory data size
342: * @memname = Memory name
343: * @purge = if !=0 force release memory block
344: * return: <0 error or 0 ok released memory block
345: */
346: int
347: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
348: {
349: int idx;
350: struct tagAlloc *m;
351:
352: if (!mp || !memname) {
353: sess_SetErr(EINVAL, "Pool or memory name is not specified");
354: return -1;
355: } else
356: idx = BucketIndex(size);
357:
358: mpool_lock(mp);
359: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
360: if (!strcmp(m->alloc_name, memname)) {
361: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 362: /* statistics */
363: mp->pool_calls.alloc--;
364: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 365:
366: if (!purge) {
367: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
368: /* statistics */
369: mp->pool_calls.cache++;
370: mp->pool_bytes.cache += mem_size(m);
371: } else {
372: /* statistics */
373: mp->pool_calls.free++;
374: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 375: /* quota */
376: mp->pool_quota.curr -= mem_size(m);
377:
378: if (m->alloc_mem)
379: free(m->alloc_mem);
380: free(m);
1.1.2.1 misho 381: }
382: break;
383: }
384: mpool_unlock(mp);
385:
386: return 0;
387: }
388:
389: /*
390: * mpool_getmembynam() Find allocated memory block by size and memory name
391: *
392: * @mp = Memory pool
393: * @size = Memory size
394: * @memname = Memory name
395: * return: NULL error or not found and !=NULL allocated memory
396: */
397: inline struct tagAlloc *
398: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
399: {
400: int idx;
401: struct tagAlloc *m = NULL;
402:
403: if (!mp || !memname)
404: return NULL;
405:
406: idx = BucketIndex(size);
407: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
408: if (!strcmp(m->alloc_name, memname))
409: break;
410:
411: return mem_data(m, void*);
412: }
1.1.2.3 misho 413:
414: /*
415: * mpool_getsizebyaddr() - Get size of allocated memory block by address
416: *
417: * @addr = allocated memory from mpool_malloc()
418: * return: usable size of allocated memory block
419: */
420: inline u_int
421: mpool_getsizebyaddr(void * __restrict data)
422: {
423: if (mpool_chkaddr(data))
424: return 0;
425:
426: return (((u_int*) data)[-2] * sizeof(u_int));
427: }
428:
429: /*
430: * mpool_chkaddr() - Check validity of given address
431: *
432: * @data = allocated memory from mpool_malloc()
433: * return: -1 bad address, 1 corrupted address or 0 ok
434: */
435: inline int
436: mpool_chkaddr(void * __restrict data)
437: {
438: /* check address range */
439: if (MEM_BADADDR(data))
440: return -1;
441: /* check sentinel */
442: if (MEM_CORRUPT(data))
443: return 1;
444: /* data address is ok! */
445: return 0;
446: }
1.1.2.6 ! misho 447:
! 448: /*
! 449: * mpool_setquota() - Change maximum memory quota
! 450: *
! 451: * @mp = Memory pool
! 452: * @maxmem = New max quota size
! 453: * return: old maximum memory quota size
! 454: */
! 455: inline u_long
! 456: mpool_setquota(mpool_t * __restrict mp, u_long maxmem)
! 457: {
! 458: u_long ret;
! 459:
! 460: if (!mp)
! 461: return 0;
! 462:
! 463: ret = mp->pool_quota.max;
! 464: mp->pool_quota.max = maxmem;
! 465:
! 466: /* if new max quota is less then current allocated memory,
! 467: * try to purge memory cache blocks
! 468: */
! 469: if (mp->pool_quota.max < mp->pool_quota.curr)
! 470: mpool_purge(mp, 0);
! 471:
! 472: return ret;
! 473: }
! 474:
! 475: /* ----------------------------------------------------------- */
! 476:
! 477: /*
! 478: * mpool_statistics() - Dump statistics from memory pool buckets
! 479: *
! 480: * @mp = Memory pool
! 481: * @cb = Export statistics to callback
! 482: * return: none
! 483: */
! 484: void
! 485: mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb)
! 486: {
! 487: struct tagAlloc *m;
! 488: register int i, act, inact;
! 489:
! 490: if (!mp || !cb)
! 491: return;
! 492:
! 493: for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) {
! 494: TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node)
! 495: act++;
! 496: TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node)
! 497: inact++;
! 498:
! 499: cb(1 << (i + MEM_MIN_BUCKET), act, inact);
! 500: }
! 501: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>