Annotation of libaitsess/src/mem.c, revision 1.1.2.5
1.1.2.2 misho 1: /*************************************************************************
2: * (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
1.1.2.5 ! misho 6: * $Id: mem.c,v 1.1.2.4 2012/02/28 00:13:26 misho Exp $
1.1.2.2 misho 7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
1.1.2.1 misho 46: #include "global.h"
47:
48:
49: /*
50: * mpool_init() - Init memory pool
51: *
52: * return: =NULL error or !=NULL new allocated pool
53: */
54: mpool_t *
55: mpool_init(void)
56: {
57: mpool_t *mp;
1.1.2.3 misho 58: register int i;
1.1.2.1 misho 59:
60: mp = malloc(sizeof(mpool_t));
61: if (!mp) {
62: LOGERR;
63: return NULL;
64: } else
65: memset(mp, 0, sizeof(mpool_t));
66:
67: pthread_mutex_init(&mp->pool_mtx, NULL);
1.1.2.3 misho 68:
69: mpool_lock(mp);
70: for (i = 0; i < MEM_BUCKETS; i++) {
71: TAILQ_INIT(&mp->pool_active[i]);
72: TAILQ_INIT(&mp->pool_inactive[i]);
73: }
74: mpool_unlock(mp);
75:
1.1.2.1 misho 76: return mp;
77: }
78:
79: /*
80: * mpool_destroy() - Destroy memory pool
81: *
82: * @mp = Memory pool
83: * return: none
84: */
85: void
86: mpool_destroy(mpool_t ** __restrict mp)
87: {
88: struct tagAlloc *m;
89: register int i;
90:
91: if (!mp && !*mp)
92: return;
93:
94: mpool_lock(*mp);
95:
96: for (i = 0; i < MEM_BUCKETS; i++) {
97: while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) {
98: TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node);
99: if (m->alloc_mem)
100: free(m->alloc_mem);
101: free(m);
102: }
103: while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) {
104: TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node);
105: if (m->alloc_mem)
106: free(m->alloc_mem);
107: free(m);
108: }
109: }
110:
111: mpool_unlock(*mp);
112: pthread_mutex_destroy(&(*mp)->pool_mtx);
113:
114: free(*mp);
115: *mp = NULL;
116: }
117:
118: /* ----------------------------------------------------------- */
119:
120: static inline long
121: BucketIndex(u_int size)
122: {
123: register long b;
124:
125: if (!size--)
126: return 0; /* min bucket position in array */
127:
128: for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++)
129: if (!(size >> b))
130: break;
131:
132: return b - MEM_MIN_BUCKET; /* convert to bucket array index */
133: }
134:
135: static inline struct tagAlloc *
136: pullInactive(mpool_t * __restrict mp, int idx)
137: {
138: struct tagAlloc *m = NULL;
139:
140: /* must be locked pool before use this function */
141: if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) {
142: TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node);
1.1.2.3 misho 143: /* statistics */
144: mp->pool_calls.cache--;
145: mp->pool_bytes.cache -= mem_size(m);
1.1.2.1 misho 146:
147: /* clear name */
148: *m->alloc_name = 0;
149: /* clear flags */
150: m->alloc_flags ^= m->alloc_flags;
151: }
152:
153: return m;
154: }
155:
156: /*
157: * mpool_malloc() - Memory allocation
158: *
159: * @mp = Memory pool
160: * @size = Size
161: * @memname = Optional memory block name
162: * return: NULL error or !=NULL ok allocated memory
163: */
164: void *
165: mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname)
166: {
167: struct tagAlloc *m;
168: int idx, align;
169:
170: if (!mp) {
171: sess_SetErr(EINVAL, "Pool not specified");
172: return NULL;
173: }
174: if (size > MEM_ALLOC_MAX) {
175: sess_SetErr(ENOMEM, "Memory size is too large");
176: return NULL;
177: } else
178: size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */
179:
180: idx = BucketIndex(size);
181:
182: mpool_lock(mp);
183:
184: /* get memory from cache if exists */
185: if (!(m = pullInactive(mp, idx))) {
186: /* quota */
187: if (mp->pool_quota.max &&
188: (mp->pool_quota.curr + size) > mp->pool_quota.max) {
189: sess_SetErr(ENOMEM, "Max.allocate memory quota has been reached");
190: mpool_unlock(mp);
191: return NULL;
192: }
193:
194: m = malloc(sizeof(struct tagAlloc));
195: if (!m) {
196: LOGERR;
197: mpool_unlock(mp);
198: return NULL;
199: } else
200: memset(m, 0, sizeof(struct tagAlloc));
201: }
202:
203: if (memname)
204: strlcpy(m->alloc_name, memname, sizeof m->alloc_name);
205:
206: if (!m->alloc_mem) {
207: align = 1 << (idx + MEM_MIN_BUCKET);
208: m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */
209: if (!m->alloc_mem) {
210: LOGERR;
211: free(m);
212: mpool_unlock(mp);
213: return NULL;
214: } else /* quota */
215: mp->pool_quota.curr += size;
216: }
217:
218: m->alloc_mem[0] = size / sizeof(u_int);
219: m->alloc_mem[1] = MEM_MAGIC_START;
220: m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP;
221: TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node);
222: /* statistics */
223: mp->pool_calls.alloc++;
224: mp->pool_bytes.alloc += size;
225:
226: mpool_unlock(mp);
227: return mem_data(m, void*);
228: }
229:
230: /*
1.1.2.3 misho 231: * mpool_purge() - Purge memory block cache and release resources
232: *
233: * @mp = Memory pool
234: * @atmost = Free at most in buckets
235: * return: -1 error or 0 ok
236: */
237: int
1.1.2.4 misho 238: mpool_purge(mpool_t * __restrict mp, u_int atmost)
1.1.2.3 misho 239: {
1.1.2.4 misho 240: register int i, cx;
1.1.2.3 misho 241: struct tagAlloc *m, *tmp;
242:
243: if (!mp) {
244: sess_SetErr(EINVAL, "Pool not specified");
245: return -1;
246: }
247:
248: mpool_lock(mp);
249:
1.1.2.4 misho 250: for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) {
1.1.2.3 misho 251: TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) {
1.1.2.4 misho 252: /* barrier for purge */
253: if (cx < atmost) {
254: cx++;
1.1.2.3 misho 255: continue;
1.1.2.4 misho 256: }
1.1.2.3 misho 257:
258: TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node);
259: /* statistics */
260: mp->pool_calls.cache--;
261: mp->pool_bytes.cache -= mem_size(m);
262:
263: mp->pool_calls.free++;
264: mp->pool_bytes.free += mem_size(m);
265: /* quota */
266: mp->pool_quota.curr -= mem_size(m);
267:
268: if (m->alloc_mem)
269: free(m->alloc_mem);
270: free(m);
271: }
272: }
273:
274: mpool_unlock(mp);
275: return 0;
276: }
277:
278: /*
1.1.2.1 misho 279: * mpool_free() Free allocated memory with mpool_alloc()
280: *
281: * @mp = Memory pool
282: * @data = Allocated memory data
283: * @purge = if !=0 force release memory block
284: * return: <0 error or 0 ok released memory block
285: */
286: int
287: mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge)
288: {
289: int idx;
290: struct tagAlloc *m;
291:
292: if (!mp) {
293: sess_SetErr(EINVAL, "Pool not specified");
294: return -1;
295: }
296: /* check address range & sentinel */
297: if (MEM_BADADDR(data) || MEM_CORRUPT(data)) {
298: sess_SetErr(EFAULT, "Corrupted memory address");
299: return -2;
300: } else
1.1.2.5 ! misho 301: idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int));
1.1.2.1 misho 302:
303: mpool_lock(mp);
304: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
305: if (mem_data(m, void*) == data) {
306: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 307: /* statistics */
308: mp->pool_calls.alloc--;
309: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 310:
311: if (!purge) {
312: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
313: /* statistics */
314: mp->pool_calls.cache++;
315: mp->pool_bytes.cache += mem_size(m);
316: } else {
317: /* statistics */
318: mp->pool_calls.free++;
319: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 320: /* quota */
321: mp->pool_quota.curr -= mem_size(m);
322:
323: if (m->alloc_mem)
324: free(m->alloc_mem);
325: free(m);
1.1.2.1 misho 326: }
327: break;
328: }
329: mpool_unlock(mp);
330:
331: return 0;
332: }
333:
334: /*
335: * mpool_free2() Free allocated memory with mpool_alloc() by size and memory name
336: *
337: * @mp = Memory pool
338: * @size = Allocated memory data size
339: * @memname = Memory name
340: * @purge = if !=0 force release memory block
341: * return: <0 error or 0 ok released memory block
342: */
343: int
344: mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge)
345: {
346: int idx;
347: struct tagAlloc *m;
348:
349: if (!mp || !memname) {
350: sess_SetErr(EINVAL, "Pool or memory name is not specified");
351: return -1;
352: } else
353: idx = BucketIndex(size);
354:
355: mpool_lock(mp);
356: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
357: if (!strcmp(m->alloc_name, memname)) {
358: TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node);
1.1.2.3 misho 359: /* statistics */
360: mp->pool_calls.alloc--;
361: mp->pool_bytes.alloc -= mem_size(m);
1.1.2.1 misho 362:
363: if (!purge) {
364: TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node);
365: /* statistics */
366: mp->pool_calls.cache++;
367: mp->pool_bytes.cache += mem_size(m);
368: } else {
369: /* statistics */
370: mp->pool_calls.free++;
371: mp->pool_bytes.free += mem_size(m);
1.1.2.3 misho 372: /* quota */
373: mp->pool_quota.curr -= mem_size(m);
374:
375: if (m->alloc_mem)
376: free(m->alloc_mem);
377: free(m);
1.1.2.1 misho 378: }
379: break;
380: }
381: mpool_unlock(mp);
382:
383: return 0;
384: }
385:
386: /*
387: * mpool_getmembynam() Find allocated memory block by size and memory name
388: *
389: * @mp = Memory pool
390: * @size = Memory size
391: * @memname = Memory name
392: * return: NULL error or not found and !=NULL allocated memory
393: */
394: inline struct tagAlloc *
395: mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname)
396: {
397: int idx;
398: struct tagAlloc *m = NULL;
399:
400: if (!mp || !memname)
401: return NULL;
402:
403: idx = BucketIndex(size);
404: TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node)
405: if (!strcmp(m->alloc_name, memname))
406: break;
407:
408: return mem_data(m, void*);
409: }
1.1.2.3 misho 410:
411: /*
412: * mpool_getsizebyaddr() - Get size of allocated memory block by address
413: *
414: * @addr = allocated memory from mpool_malloc()
415: * return: usable size of allocated memory block
416: */
417: inline u_int
418: mpool_getsizebyaddr(void * __restrict data)
419: {
420: if (mpool_chkaddr(data))
421: return 0;
422:
423: return (((u_int*) data)[-2] * sizeof(u_int));
424: }
425:
426: /*
427: * mpool_chkaddr() - Check validity of given address
428: *
429: * @data = allocated memory from mpool_malloc()
430: * return: -1 bad address, 1 corrupted address or 0 ok
431: */
432: inline int
433: mpool_chkaddr(void * __restrict data)
434: {
435: /* check address range */
436: if (MEM_BADADDR(data))
437: return -1;
438: /* check sentinel */
439: if (MEM_CORRUPT(data))
440: return 1;
441: /* data address is ok! */
442: return 0;
443: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>