Annotation of embedaddon/rsync/lib/pool_alloc.c, revision 1.1.1.1
1.1 misho 1: #include "rsync.h"
2:
3: #define POOL_DEF_EXTENT (32 * 1024)
4:
5: struct alloc_pool
6: {
7: size_t size; /* extent size */
8: size_t quantum; /* allocation quantum */
9: struct pool_extent *extents; /* top extent is "live" */
10: void (*bomb)(); /* function to call if
11: * malloc fails */
12: int flags;
13:
14: /* statistical data */
15: unsigned long e_created; /* extents created */
16: unsigned long e_freed; /* extents detroyed */
17: int64 n_allocated; /* calls to alloc */
18: int64 n_freed; /* calls to free */
19: int64 b_allocated; /* cum. bytes allocated */
20: int64 b_freed; /* cum. bytes freed */
21: };
22:
23: struct pool_extent
24: {
25: void *start; /* starting address */
26: size_t free; /* free bytecount */
27: size_t bound; /* bytes bound by padding,
28: * overhead and freed */
29: struct pool_extent *next;
30: };
31:
32: struct align_test {
33: void *foo;
34: int64 bar;
35: };
36:
37: #define MINALIGN offsetof(struct align_test, bar)
38:
39: /* Temporarily cast a void* var into a char* var when adding an offset (to
40: * keep some compilers from complaining about the pointer arithmetic). */
41: #define PTR_ADD(b,o) ( (void*) ((char*)(b) + (o)) )
42:
43: alloc_pool_t
44: pool_create(size_t size, size_t quantum, void (*bomb)(const char *), int flags)
45: {
46: struct alloc_pool *pool;
47:
48: if (!(pool = new(struct alloc_pool)))
49: return pool;
50: memset(pool, 0, sizeof (struct alloc_pool));
51:
52: pool->size = size /* round extent size to min alignment reqs */
53: ? (size + MINALIGN - 1) & ~(MINALIGN - 1)
54: : POOL_DEF_EXTENT;
55: if (flags & POOL_INTERN) {
56: pool->size -= sizeof (struct pool_extent);
57: flags |= POOL_APPEND;
58: }
59: pool->quantum = quantum ? quantum : MINALIGN;
60: pool->bomb = bomb;
61: pool->flags = flags;
62:
63: return pool;
64: }
65:
66: void
67: pool_destroy(alloc_pool_t p)
68: {
69: struct alloc_pool *pool = (struct alloc_pool *) p;
70: struct pool_extent *cur, *next;
71:
72: if (!pool)
73: return;
74:
75: for (cur = pool->extents; cur; cur = next) {
76: next = cur->next;
77: free(cur->start);
78: if (!(pool->flags & POOL_APPEND))
79: free(cur);
80: }
81: free(pool);
82: }
83:
84: void *
85: pool_alloc(alloc_pool_t p, size_t len, const char *bomb_msg)
86: {
87: struct alloc_pool *pool = (struct alloc_pool *) p;
88: if (!pool)
89: return NULL;
90:
91: if (!len)
92: len = pool->quantum;
93: else if (pool->quantum > 1 && len % pool->quantum)
94: len += pool->quantum - len % pool->quantum;
95:
96: if (len > pool->size)
97: goto bomb_out;
98:
99: if (!pool->extents || len > pool->extents->free) {
100: void *start;
101: size_t free;
102: size_t bound;
103: size_t skew;
104: size_t asize;
105: struct pool_extent *ext;
106:
107: free = pool->size;
108: bound = 0;
109:
110: asize = pool->size;
111: if (pool->flags & POOL_APPEND)
112: asize += sizeof (struct pool_extent);
113:
114: if (!(start = new_array(char, asize)))
115: goto bomb_out;
116:
117: if (pool->flags & POOL_CLEAR)
118: memset(start, 0, free);
119:
120: if (pool->flags & POOL_APPEND)
121: ext = PTR_ADD(start, free);
122: else if (!(ext = new(struct pool_extent)))
123: goto bomb_out;
124: if (pool->flags & POOL_QALIGN && pool->quantum > 1
125: && (skew = (size_t)PTR_ADD(start, free) % pool->quantum)) {
126: bound += skew;
127: free -= skew;
128: }
129: ext->start = start;
130: ext->free = free;
131: ext->bound = bound;
132: ext->next = pool->extents;
133: pool->extents = ext;
134:
135: pool->e_created++;
136: }
137:
138: pool->n_allocated++;
139: pool->b_allocated += len;
140:
141: pool->extents->free -= len;
142:
143: return PTR_ADD(pool->extents->start, pool->extents->free);
144:
145: bomb_out:
146: if (pool->bomb)
147: (*pool->bomb)(bomb_msg);
148: return NULL;
149: }
150:
151: /* This function allows you to declare memory in the pool that you are done
152: * using. If you free all the memory in a pool's extent, that extent will
153: * be freed. */
154: void
155: pool_free(alloc_pool_t p, size_t len, void *addr)
156: {
157: struct alloc_pool *pool = (struct alloc_pool *)p;
158: struct pool_extent *cur, *prev;
159:
160: if (!pool)
161: return;
162:
163: if (!len)
164: len = pool->quantum;
165: else if (pool->quantum > 1 && len % pool->quantum)
166: len += pool->quantum - len % pool->quantum;
167:
168: pool->n_freed++;
169: pool->b_freed += len;
170:
171: for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) {
172: if (addr >= cur->start
173: && addr < PTR_ADD(cur->start, pool->size))
174: break;
175: }
176: if (!cur)
177: return;
178:
179: if (!prev) {
180: /* The "live" extent is kept ready for more allocations. */
181: if (cur->free + cur->bound + len >= pool->size) {
182: size_t skew;
183:
184: if (pool->flags & POOL_CLEAR) {
185: memset(PTR_ADD(cur->start, cur->free), 0,
186: pool->size - cur->free);
187: }
188: cur->free = pool->size;
189: cur->bound = 0;
190: if (pool->flags & POOL_QALIGN && pool->quantum > 1
191: && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) {
192: cur->bound += skew;
193: cur->free -= skew;
194: }
195: } else if (addr == PTR_ADD(cur->start, cur->free)) {
196: if (pool->flags & POOL_CLEAR)
197: memset(addr, 0, len);
198: cur->free += len;
199: } else
200: cur->bound += len;
201: } else {
202: cur->bound += len;
203:
204: if (cur->free + cur->bound >= pool->size) {
205: prev->next = cur->next;
206: free(cur->start);
207: if (!(pool->flags & POOL_APPEND))
208: free(cur);
209: pool->e_freed++;
210: } else if (prev != pool->extents) {
211: /* Move the extent to be the first non-live extent. */
212: prev->next = cur->next;
213: cur->next = pool->extents->next;
214: pool->extents->next = cur;
215: }
216: }
217: }
218:
219: /* This allows you to declare that the given address marks the edge of some
220: * pool memory that is no longer needed. Any extents that hold only data
221: * older than the boundary address are freed. NOTE: You MUST NOT USE BOTH
222: * pool_free() and pool_free_old() on the same pool!! */
223: void
224: pool_free_old(alloc_pool_t p, void *addr)
225: {
226: struct alloc_pool *pool = (struct alloc_pool *)p;
227: struct pool_extent *cur, *prev, *next;
228:
229: if (!pool || !addr)
230: return;
231:
232: for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) {
233: if (addr >= cur->start
234: && addr < PTR_ADD(cur->start, pool->size))
235: break;
236: }
237: if (!cur)
238: return;
239:
240: if (addr == PTR_ADD(cur->start, cur->free)) {
241: if (prev) {
242: prev->next = NULL;
243: next = cur;
244: } else {
245: size_t skew;
246:
247: /* The most recent live extent can just be reset. */
248: if (pool->flags & POOL_CLEAR)
249: memset(addr, 0, pool->size - cur->free);
250: cur->free = pool->size;
251: cur->bound = 0;
252: if (pool->flags & POOL_QALIGN && pool->quantum > 1
253: && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) {
254: cur->bound += skew;
255: cur->free -= skew;
256: }
257: next = cur->next;
258: cur->next = NULL;
259: }
260: } else {
261: next = cur->next;
262: cur->next = NULL;
263: }
264:
265: while ((cur = next) != NULL) {
266: next = cur->next;
267: free(cur->start);
268: if (!(pool->flags & POOL_APPEND))
269: free(cur);
270: pool->e_freed++;
271: }
272: }
273:
274: /* If the current extent doesn't have "len" free space in it, mark it as full
275: * so that the next alloc will start a new extent. If len is (size_t)-1, this
276: * bump will always occur. The function returns a boundary address that can
277: * be used with pool_free_old(), or a NULL if no memory is allocated. */
278: void *
279: pool_boundary(alloc_pool_t p, size_t len)
280: {
281: struct alloc_pool *pool = (struct alloc_pool *)p;
282: struct pool_extent *cur;
283:
284: if (!pool || !pool->extents)
285: return NULL;
286:
287: cur = pool->extents;
288:
289: if (cur->free < len) {
290: cur->bound += cur->free;
291: cur->free = 0;
292: }
293:
294: return PTR_ADD(cur->start, cur->free);
295: }
296:
297: #define FDPRINT(label, value) \
298: do { \
299: int len = snprintf(buf, sizeof buf, label, value); \
300: if (write(fd, buf, len) != len) \
301: ret = -1; \
302: } while (0)
303:
304: #define FDEXTSTAT(ext) \
305: do { \
306: int len = snprintf(buf, sizeof buf, " %12ld %5ld\n", \
307: (long)ext->free, (long)ext->bound); \
308: if (write(fd, buf, len) != len) \
309: ret = -1; \
310: } while (0)
311:
312: int
313: pool_stats(alloc_pool_t p, int fd, int summarize)
314: {
315: struct alloc_pool *pool = (struct alloc_pool *) p;
316: struct pool_extent *cur;
317: char buf[BUFSIZ];
318: int ret = 0;
319:
320: if (!pool)
321: return ret;
322:
323: FDPRINT(" Extent size: %12ld\n", (long) pool->size);
324: FDPRINT(" Alloc quantum: %12ld\n", (long) pool->quantum);
325: FDPRINT(" Extents created: %12ld\n", pool->e_created);
326: FDPRINT(" Extents freed: %12ld\n", pool->e_freed);
327: FDPRINT(" Alloc count: %12.0f\n", (double) pool->n_allocated);
328: FDPRINT(" Free Count: %12.0f\n", (double) pool->n_freed);
329: FDPRINT(" Bytes allocated: %12.0f\n", (double) pool->b_allocated);
330: FDPRINT(" Bytes freed: %12.0f\n", (double) pool->b_freed);
331:
332: if (summarize)
333: return ret;
334:
335: if (!pool->extents)
336: return ret;
337:
338: if (write(fd, "\n", 1) != 1)
339: ret = -1;
340:
341: for (cur = pool->extents; cur; cur = cur->next)
342: FDEXTSTAT(cur);
343:
344: return ret;
345: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>