|
version 1.1.2.1, 2012/02/23 17:09:16
|
version 1.1.4.3, 2012/05/23 11:49:35
|
|
Line 1
|
Line 1
|
| |
/************************************************************************* |
| |
* (C) 2012 AITNET ltd - Sofia/Bulgaria - <misho@elwix.org> |
| |
* by Michael Pounov <misho@openbsd-bg.org> |
| |
* |
| |
* $Author$ |
| |
* $Id$ |
| |
* |
| |
************************************************************************** |
| |
The ELWIX and AITNET software is distributed under the following |
| |
terms: |
| |
|
| |
All of the documentation and software included in the ELWIX and AITNET |
| |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| |
Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 |
| |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| |
Redistribution and use in source and binary forms, with or without |
| |
modification, are permitted provided that the following conditions |
| |
are met: |
| |
1. Redistributions of source code must retain the above copyright |
| |
notice, this list of conditions and the following disclaimer. |
| |
2. Redistributions in binary form must reproduce the above copyright |
| |
notice, this list of conditions and the following disclaimer in the |
| |
documentation and/or other materials provided with the distribution. |
| |
3. All advertising materials mentioning features or use of this software |
| |
must display the following acknowledgement: |
| |
This product includes software developed by Michael Pounov <misho@elwix.org> |
| |
ELWIX - Embedded LightWeight unIX and its contributors. |
| |
4. Neither the name of AITNET nor the names of its contributors |
| |
may be used to endorse or promote products derived from this software |
| |
without specific prior written permission. |
| |
|
| |
THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND |
| |
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| |
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| |
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| |
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| |
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| |
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| |
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| |
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| |
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| |
SUCH DAMAGE. |
| |
*/ |
| #include "global.h" |
#include "global.h" |
| |
|
| |
|
| io_mpool_t * | /* |
| io_mpoolInit() | * mpool_init() - Init memory pool |
| | * |
| | * @maxmem = If !=0 set maximum memory quota |
| | * return: =NULL error or !=NULL new allocated pool |
| | */ |
| | mpool_t * |
| | mpool_init(u_long maxmem) |
| { |
{ |
| |
mpool_t *mp; |
| |
register int i; |
| |
|
| |
mp = malloc(sizeof(mpool_t)); |
| |
if (!mp) { |
| |
LOGERR; |
| |
return NULL; |
| |
} else |
| |
memset(mp, 0, sizeof(mpool_t)); |
| |
|
| |
pthread_mutex_init(&mp->pool_mtx, NULL); |
| |
|
| |
mp->pool_quota.max = maxmem; |
| |
|
| |
mpool_lock(mp); |
| |
for (i = 0; i < MEM_BUCKETS; i++) { |
| |
TAILQ_INIT(&mp->pool_active[i]); |
| |
TAILQ_INIT(&mp->pool_inactive[i]); |
| |
} |
| |
mpool_unlock(mp); |
| |
|
| |
return mp; |
| } |
} |
| |
|
| |
/* |
| |
* mpool_destroy() - Destroy memory pool |
| |
* |
| |
* @mp = Memory pool |
| |
* return: none |
| |
*/ |
| void |
void |
| io_mpoolDestroy(io_mpool_t ** __restrict mp) | mpool_destroy(mpool_t ** __restrict mp) |
| { |
{ |
| |
struct tagAlloc *m; |
| |
register int i; |
| |
|
| |
if (!mp && !*mp) |
| |
return; |
| |
|
| |
mpool_lock(*mp); |
| |
|
| |
for (i = 0; i < MEM_BUCKETS; i++) { |
| |
while ((m = TAILQ_FIRST(&(*mp)->pool_active[i]))) { |
| |
TAILQ_REMOVE(&(*mp)->pool_active[i], m, alloc_node); |
| |
if (m->alloc_mem) |
| |
free(m->alloc_mem); |
| |
free(m); |
| |
} |
| |
while ((m = TAILQ_FIRST(&(*mp)->pool_inactive[i]))) { |
| |
TAILQ_REMOVE(&(*mp)->pool_inactive[i], m, alloc_node); |
| |
if (m->alloc_mem) |
| |
free(m->alloc_mem); |
| |
free(m); |
| |
} |
| |
} |
| |
|
| |
mpool_unlock(*mp); |
| |
pthread_mutex_destroy(&(*mp)->pool_mtx); |
| |
|
| |
free(*mp); |
| |
*mp = NULL; |
| } |
} |
| |
|
| /* ----------------------------------------------------------- */ |
/* ----------------------------------------------------------- */ |
| |
|
| |
static inline long |
| |
BucketIndex(u_int size) |
| |
{ |
| |
register long b; |
| |
|
| |
if (!size--) |
| |
return 0; /* min bucket position in array */ |
| |
|
| |
for (b = MEM_MIN_BUCKET; b < MEM_MAX_BUCKET; b++) |
| |
if (!(size >> b)) |
| |
break; |
| |
|
| |
return b - MEM_MIN_BUCKET; /* convert to bucket array index */ |
| |
} |
| |
|
| |
static inline struct tagAlloc * |
| |
pullInactive(mpool_t * __restrict mp, int idx) |
| |
{ |
| |
struct tagAlloc *m = NULL; |
| |
|
| |
/* must be locked pool before use this function */ |
| |
if ((m = TAILQ_FIRST(&mp->pool_inactive[idx]))) { |
| |
TAILQ_REMOVE(&mp->pool_inactive[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.cache--; |
| |
mp->pool_bytes.cache -= mem_size(m); |
| |
|
| |
/* clear name */ |
| |
*m->alloc_name = 0; |
| |
} |
| |
|
| |
return m; |
| |
} |
| |
|
| |
/* |
| |
* mpool_malloc() - Memory allocation |
| |
* |
| |
* @mp = Memory pool |
| |
* @size = Size |
| |
* @memname = Optional memory block name |
| |
* return: NULL error or !=NULL ok allocated memory |
| |
*/ |
| void * |
void * |
| io_malloc(u_int size, const char *memname) | mpool_malloc(mpool_t * __restrict mp, u_int size, const char *memname) |
| { |
{ |
| uint32_t *p; | struct tagAlloc *m; |
| | int idx; |
| | u_int align; |
| |
|
| size = (size + 3) & ~3; | if (!mp) { |
| p = malloc(sz + 12); | io_SetErr(EINVAL, "Pool not specified"); |
| | return NULL; |
| | } |
| | if (size > MEM_ALLOC_MAX) { |
| | io_SetErr(ENOMEM, "Memory size is too large"); |
| | return NULL; |
| | } else |
| | size = (size + 3) & ~3; /* must align to 4 because needed room for sentinels */ |
| |
|
| return (void*) (p + 2); | idx = BucketIndex(size); |
| | |
| | mpool_lock(mp); |
| | |
| | /* get memory from cache if exists */ |
| | if (!(m = pullInactive(mp, idx))) { |
| | /* quota */ |
| | if (mp->pool_quota.max && |
| | (mp->pool_quota.curr + size) > mp->pool_quota.max) { |
| | io_SetErr(ENOMEM, "Max.allocate memory quota has been reached"); |
| | mpool_unlock(mp); |
| | return NULL; |
| | } |
| | |
| | m = malloc(sizeof(struct tagAlloc)); |
| | if (!m) { |
| | LOGERR; |
| | mpool_unlock(mp); |
| | return NULL; |
| | } else |
| | memset(m, 0, sizeof(struct tagAlloc)); |
| | } |
| | |
| | if (memname) |
| | strlcpy(m->alloc_name, memname, sizeof m->alloc_name); |
| | |
| | if (!m->alloc_mem) { |
| | align = 1 << (idx + MEM_MIN_BUCKET); |
| | m->alloc_mem = malloc(align + 12); /* +12 sentinel bytes */ |
| | if (!m->alloc_mem) { |
| | LOGERR; |
| | free(m); |
| | mpool_unlock(mp); |
| | return NULL; |
| | } else /* quota */ |
| | mp->pool_quota.curr += size; |
| | } |
| | |
| | m->alloc_mem[0] = size / sizeof(u_int); |
| | m->alloc_mem[1] = MEM_MAGIC_START; |
| | m->alloc_mem[2 + size / sizeof(u_int)] = MEM_MAGIC_STOP; |
| | TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node); |
| | /* statistics */ |
| | mp->pool_calls.alloc++; |
| | mp->pool_bytes.alloc += size; |
| | |
| | mpool_unlock(mp); |
| | return mem_data(m, void*); |
| } |
} |
| |
|
| |
/* |
| |
* mpool_realloc() Reallocate memory block with new size |
| |
* |
| |
* @mp = Memory pool |
| |
* @data = Allocated memory data |
| |
* @newsize = New size of memory block |
| |
* @memname = Optional new memory block name |
| |
* return: NULL error or !=NULL new reallocated memory block |
| |
*/ |
| |
void * |
| |
mpool_realloc(mpool_t * __restrict mp, void * __restrict data, u_int newsize, const char *memname) |
| |
{ |
| |
struct tagAlloc *m, *tmp; |
| |
int idx, oidx; |
| |
void *p; |
| |
u_int align, osize; |
| |
|
| |
/* if !data execute mpool_malloc() */ |
| |
if (!data) |
| |
return mpool_malloc(mp, newsize, memname); |
| |
|
| |
if (!mp) { |
| |
io_SetErr(EINVAL, "Pool not specified"); |
| |
return NULL; |
| |
} |
| |
/* check address range & sentinel */ |
| |
if (MEM_BADADDR(data) || MEM_CORRUPT(data)) { |
| |
io_SetErr(EFAULT, "Corrupted memory address"); |
| |
return NULL; |
| |
} else { |
| |
osize = ((u_int*)data)[-2] * sizeof(u_int); |
| |
oidx = BucketIndex(osize); |
| |
} |
| |
/* prepare new size */ |
| |
if (newsize > MEM_ALLOC_MAX) { |
| |
io_SetErr(ENOMEM, "Memory size is too large"); |
| |
return NULL; |
| |
} else { |
| |
newsize = (newsize + 3) & ~3; /* must align to 4 because needed room for sentinels */ |
| |
idx = BucketIndex(newsize); |
| |
} |
| |
|
| |
mpool_lock(mp); |
| |
|
| |
/* quota */ |
| |
if (mp->pool_quota.max && |
| |
(mp->pool_quota.curr + ((u_long) newsize - osize)) > mp->pool_quota.max) { |
| |
io_SetErr(ENOMEM, "Max.allocate memory quota has been reached"); |
| |
mpool_unlock(mp); |
| |
return NULL; |
| |
} |
| |
|
| |
/* find old memory block */ |
| |
TAILQ_FOREACH_SAFE(m, &mp->pool_active[oidx], alloc_node, tmp) { |
| |
if (mem_data(m, void*) == data && mem_size(m) == osize) { |
| |
/* case in different buckets */ |
| |
if (oidx != idx) { |
| |
TAILQ_REMOVE(&mp->pool_active[oidx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.alloc--; |
| |
} |
| |
mp->pool_bytes.alloc -= osize; |
| |
break; |
| |
} |
| |
} |
| |
/* memory block not found! */ |
| |
if (!m) { |
| |
mpool_unlock(mp); |
| |
io_SetErr(EFAULT, "Memory block not found"); |
| |
return NULL; |
| |
} |
| |
|
| |
/* try to reallocate memory block to new bucket */ |
| |
if (oidx != idx) { |
| |
align = 1 << (idx + MEM_MIN_BUCKET); |
| |
p = realloc(m->alloc_mem, align + 12); |
| |
if (!p) { |
| |
LOGERR; |
| |
|
| |
/* restore to old bucket pulled memory block for reallocation */ |
| |
TAILQ_INSERT_HEAD(&mp->pool_active[oidx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.alloc++; |
| |
mp->pool_bytes.alloc += osize; |
| |
|
| |
mpool_unlock(mp); |
| |
return NULL; |
| |
} else |
| |
m->alloc_mem = (u_int*) p; |
| |
} |
| |
/* quota */ |
| |
mp->pool_quota.curr += (u_long) newsize - osize; |
| |
|
| |
m->alloc_mem[0] = newsize / sizeof(u_int); |
| |
m->alloc_mem[1] = MEM_MAGIC_START; |
| |
m->alloc_mem[2 + newsize / sizeof(u_int)] = MEM_MAGIC_STOP; |
| |
|
| |
if (oidx != idx) { |
| |
TAILQ_INSERT_HEAD(&mp->pool_active[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.alloc++; |
| |
} |
| |
mp->pool_bytes.alloc += newsize; |
| |
|
| |
if (memname) |
| |
strlcpy(m->alloc_name, memname, sizeof m->alloc_name); |
| |
|
| |
mpool_unlock(mp); |
| |
return mem_data(m, void*); |
| |
} |
| |
|
| |
/* |
| |
* mpool_purge() - Purge memory block cache and release resources |
| |
* |
| |
* @mp = Memory pool |
| |
* @atmost = Free at most in buckets |
| |
* return: -1 error or 0 ok |
| |
*/ |
| |
int |
| |
mpool_purge(mpool_t * __restrict mp, u_int atmost) |
| |
{ |
| |
register int i, cx; |
| |
struct tagAlloc *m, *tmp; |
| |
|
| |
if (!mp) { |
| |
io_SetErr(EINVAL, "Pool not specified"); |
| |
return -1; |
| |
} |
| |
|
| |
mpool_lock(mp); |
| |
|
| |
for (i = cx = 0; i < MEM_BUCKETS; cx = 0, i++) { |
| |
TAILQ_FOREACH_SAFE(m, &mp->pool_inactive[i], alloc_node, tmp) { |
| |
/* barrier for purge */ |
| |
if (cx < atmost) { |
| |
cx++; |
| |
continue; |
| |
} |
| |
|
| |
TAILQ_REMOVE(&mp->pool_inactive[i], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.cache--; |
| |
mp->pool_bytes.cache -= mem_size(m); |
| |
|
| |
mp->pool_calls.free++; |
| |
mp->pool_bytes.free += mem_size(m); |
| |
/* quota */ |
| |
mp->pool_quota.curr -= mem_size(m); |
| |
|
| |
if (m->alloc_mem) |
| |
free(m->alloc_mem); |
| |
free(m); |
| |
} |
| |
} |
| |
|
| |
mpool_unlock(mp); |
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* mpool_free() Free allocated memory with mpool_alloc() |
| |
* |
| |
* @mp = Memory pool |
| |
* @data = Allocated memory data |
| |
* @purge = if !=0 force release memory block |
| |
* return: <0 error or 0 ok released memory block |
| |
*/ |
| |
int |
| |
mpool_free(mpool_t * __restrict mp, void * __restrict data, int purge) |
| |
{ |
| |
int idx; |
| |
struct tagAlloc *m, *tmp; |
| |
|
| |
if (!mp) { |
| |
io_SetErr(EINVAL, "Pool not specified"); |
| |
return -1; |
| |
} |
| |
/* check address range & sentinel */ |
| |
if (MEM_BADADDR(data) || MEM_CORRUPT(data)) { |
| |
io_SetErr(EFAULT, "Corrupted memory address"); |
| |
return -2; |
| |
} else |
| |
idx = BucketIndex(((u_int*)data)[-2] * sizeof(u_int)); |
| |
|
| |
mpool_lock(mp); |
| |
TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp) |
| |
if (mem_data(m, void*) == data) { |
| |
TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.alloc--; |
| |
mp->pool_bytes.alloc -= mem_size(m); |
| |
|
| |
if (!purge) { |
| |
TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.cache++; |
| |
mp->pool_bytes.cache += mem_size(m); |
| |
} else { |
| |
/* statistics */ |
| |
mp->pool_calls.free++; |
| |
mp->pool_bytes.free += mem_size(m); |
| |
/* quota */ |
| |
mp->pool_quota.curr -= mem_size(m); |
| |
|
| |
if (m->alloc_mem) |
| |
free(m->alloc_mem); |
| |
free(m); |
| |
} |
| |
break; |
| |
} |
| |
mpool_unlock(mp); |
| |
|
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* mpool_free2() Free allocated memory with mpool_alloc() by size and memory name |
| |
* |
| |
* @mp = Memory pool |
| |
* @size = Allocated memory data size |
| |
* @memname = Memory name |
| |
* @purge = if !=0 force release memory block |
| |
* return: <0 error or 0 ok released memory block |
| |
*/ |
| |
int |
| |
mpool_free2(mpool_t * __restrict mp, u_int size, const char *memname, int purge) |
| |
{ |
| |
int idx; |
| |
struct tagAlloc *m, *tmp; |
| |
|
| |
if (!mp || !memname) { |
| |
io_SetErr(EINVAL, "Pool or memory name is not specified"); |
| |
return -1; |
| |
} else |
| |
idx = BucketIndex(size); |
| |
|
| |
mpool_lock(mp); |
| |
TAILQ_FOREACH_SAFE(m, &mp->pool_active[idx], alloc_node, tmp) |
| |
if (!strcmp(m->alloc_name, memname)) { |
| |
TAILQ_REMOVE(&mp->pool_active[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.alloc--; |
| |
mp->pool_bytes.alloc -= mem_size(m); |
| |
|
| |
if (!purge) { |
| |
TAILQ_INSERT_HEAD(&mp->pool_inactive[idx], m, alloc_node); |
| |
/* statistics */ |
| |
mp->pool_calls.cache++; |
| |
mp->pool_bytes.cache += mem_size(m); |
| |
} else { |
| |
/* statistics */ |
| |
mp->pool_calls.free++; |
| |
mp->pool_bytes.free += mem_size(m); |
| |
/* quota */ |
| |
mp->pool_quota.curr -= mem_size(m); |
| |
|
| |
if (m->alloc_mem) |
| |
free(m->alloc_mem); |
| |
free(m); |
| |
} |
| |
break; |
| |
} |
| |
mpool_unlock(mp); |
| |
|
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* mpool_getmembynam() Find allocated memory block by size and memory name |
| |
* |
| |
* @mp = Memory pool |
| |
* @size = Memory size |
| |
* @memname = Memory name |
| |
* return: NULL error or not found and !=NULL allocated memory |
| |
*/ |
| |
inline struct tagAlloc * |
| |
mpool_getmembynam(mpool_t * __restrict mp, u_int size, const char *memname) |
| |
{ |
| |
int idx; |
| |
struct tagAlloc *m = NULL; |
| |
|
| |
if (!mp || !memname) |
| |
return NULL; |
| |
|
| |
idx = BucketIndex(size); |
| |
TAILQ_FOREACH(m, &mp->pool_active[idx], alloc_node) |
| |
if (!strcmp(m->alloc_name, memname)) |
| |
break; |
| |
|
| |
return mem_data(m, void*); |
| |
} |
| |
|
| |
/* |
| |
* mpool_getsizebyaddr() - Get size of allocated memory block by address |
| |
* |
| |
* @addr = allocated memory from mpool_malloc() |
| |
* return: usable size of allocated memory block |
| |
*/ |
| |
inline u_int |
| |
mpool_getsizebyaddr(void * __restrict data) |
| |
{ |
| |
if (mpool_chkaddr(data)) |
| |
return 0; |
| |
|
| |
return (((u_int*) data)[-2] * sizeof(u_int)); |
| |
} |
| |
|
| |
/* |
| |
* mpool_chkaddr() - Check validity of given address |
| |
* |
| |
* @data = allocated memory from mpool_malloc() |
| |
* return: -1 bad address, 1 corrupted address or 0 ok |
| |
*/ |
| |
inline int |
| |
mpool_chkaddr(void * __restrict data) |
| |
{ |
| |
/* check address range */ |
| |
if (MEM_BADADDR(data)) |
| |
return -1; |
| |
/* check sentinel */ |
| |
if (MEM_CORRUPT(data)) |
| |
return 1; |
| |
/* data address is ok! */ |
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* mpool_setquota() - Change maximum memory quota |
| |
* |
| |
* @mp = Memory pool |
| |
* @maxmem = New max quota size |
| |
* return: old maximum memory quota size |
| |
*/ |
| |
inline u_long |
| |
mpool_setquota(mpool_t * __restrict mp, u_long maxmem) |
| |
{ |
| |
u_long ret; |
| |
|
| |
if (!mp) |
| |
return 0; |
| |
|
| |
ret = mp->pool_quota.max; |
| |
mp->pool_quota.max = maxmem; |
| |
|
| |
/* if new max quota is less then current allocated memory, |
| |
* try to purge memory cache blocks |
| |
*/ |
| |
if (mp->pool_quota.max < mp->pool_quota.curr) |
| |
mpool_purge(mp, 0); |
| |
|
| |
return ret; |
| |
} |
| |
|
| |
/* |
| |
* mpool_getquota() - Get memory quota |
| |
* |
| |
* @mp = Memory pool |
| |
* @currmem = Return current memory |
| |
* @maxmem = Return max quota size |
| |
* return: none |
| |
*/ |
| |
inline void |
| |
mpool_getquota(mpool_t * __restrict mp, u_long *currmem, u_long *maxmem) |
| |
{ |
| |
if (!mp) |
| |
return; |
| |
|
| |
if (maxmem) |
| |
*maxmem = mp->pool_quota.max; |
| |
if (currmem) |
| |
*currmem = mp->pool_quota.curr; |
| |
} |
| |
|
| |
/* ----------------------------------------------------------- */ |
| |
|
| |
/* |
| |
* mpool_statistics() - Dump statistics from memory pool buckets |
| |
* |
| |
* @mp = Memory pool |
| |
* @cb = Export statistics to callback |
| |
* return: none |
| |
*/ |
| void |
void |
| io_free(void * __restrict mem) | mpool_statistics(mpool_t * __restrict mp, mpool_stat_cb cb) |
| { |
{ |
| free(mem); | struct tagAlloc *m; |
| | register int i, act, inact; |
| | |
| | if (!mp || !cb) |
| | return; |
| | |
| | for (i = act = inact = 0; i < MEM_BUCKETS; act = inact = 0, i++) { |
| | TAILQ_FOREACH(m, &mp->pool_active[i], alloc_node) |
| | act++; |
| | TAILQ_FOREACH(m, &mp->pool_inactive[i], alloc_node) |
| | inact++; |
| | |
| | cb(1 << (i + MEM_MIN_BUCKET), act, inact); |
| | } |
| } |
} |