Annotation of embedaddon/ntp/ElectricFence/efence.c, revision 1.1.1.1
1.1 misho 1: /*
2: * Electric Fence - Red-Zone memory allocator.
3: * Bruce Perens, 1988, 1993
4: *
5: * This is a special version of malloc() and company for debugging software
6: * that is suspected of overrunning or underrunning the boundaries of a
7: * malloc buffer, or touching free memory.
8: *
9: * It arranges for each malloc buffer to be followed (or preceded)
10: * in the address space by an inaccessable virtual memory page,
11: * and for free memory to be inaccessable. If software touches the
12: * inaccessable page, it will get an immediate segmentation
13: * fault. It is then trivial to uncover the offending code using a debugger.
14: *
15: * An advantage of this product over most malloc debuggers is that this one
16: * detects reading out of bounds as well as writing, and this one stops on
17: * the exact instruction that causes the error, rather than waiting until the
18: * next boundary check.
19: *
20: * There is one product that debugs malloc buffer overruns
21: * better than Electric Fence: "Purify" from Purify Systems, and that's only
22: * a small part of what Purify does. I'm not affiliated with Purify, I just
23: * respect a job well done.
24: *
25: * This version of malloc() should not be linked into production software,
26: * since it tremendously increases the time and memory overhead of malloc().
27: * Each malloc buffer will consume a minimum of two virtual memory pages,
28: * this is 16 kilobytes on many systems. On some systems it will be necessary
29: * to increase the amount of swap space in order to debug large programs that
30: * perform lots of allocation, because of the per-buffer overhead.
31: */
32: #include "efence.h"
33: #include <stdlib.h>
34: #include <unistd.h>
35: #include <memory.h>
36: #include <string.h>
37:
38: #ifdef malloc
39: #undef malloc
40: #endif
41:
42: #ifdef calloc
43: #undef calloc
44: #endif
45:
46: static const char version[] = "\n Electric Fence 2.0.5"
47: " Copyright (C) 1987-1995 Bruce Perens.\n";
48:
49: /*
50: * MEMORY_CREATION_SIZE is the amount of memory to get from the operating
51: * system at one time. We'll break that memory down into smaller pieces for
52: * malloc buffers. One megabyte is probably a good value.
53: */
54: #define MEMORY_CREATION_SIZE 1024 * 1024
55:
56: /*
57: * Enum Mode indicates the status of a malloc buffer.
58: */
59: enum _Mode {
60: NOT_IN_USE = 0, /* Available to represent a malloc buffer. */
61: FREE, /* A free buffer. */
62: ALLOCATED, /* A buffer that is in use. */
63: PROTECTED, /* A freed buffer that can not be allocated again. */
64: INTERNAL_USE /* A buffer used internally by malloc(). */
65: };
66: typedef enum _Mode Mode;
67:
68: /*
69: * Struct Slot contains all of the information about a malloc buffer except
70: * for the contents of its memory.
71: */
72: struct _Slot {
73: void * userAddress;
74: void * internalAddress;
75: size_t userSize;
76: size_t internalSize;
77: Mode mode;
78: };
79: typedef struct _Slot Slot;
80:
81: /*
82: * EF_ALIGNMENT is a global variable used to control the default alignment
83: * of buffers returned by malloc(), calloc(), and realloc(). It is all-caps
84: * so that its name matches the name of the environment variable that is used
85: * to set it. This gives the programmer one less name to remember.
86: * If the value is -1, it will be set from the environment or sizeof(int)
87: * at run time.
88: */
89: int EF_ALIGNMENT = -1;
90:
91: /*
92: * EF_PROTECT_FREE is a global variable used to control the disposition of
93: * memory that is released using free(). It is all-caps so that its name
94: * matches the name of the environment variable that is used to set it.
95: * If its value is greater non-zero, memory released by free is made
96: * inaccessable and never allocated again. Any software that touches free
97: * memory will then get a segmentation fault. If its value is zero, freed
98: * memory will be available for reallocation, but will still be inaccessable
99: * until it is reallocated.
100: * If the value is -1, it will be set from the environment or to 0 at run-time.
101: */
102: int EF_PROTECT_FREE = -1;
103:
104: /*
105: * EF_PROTECT_BELOW is used to modify the behavior of the allocator. When
106: * its value is non-zero, the allocator will place an inaccessable page
107: * immediately _before_ the malloc buffer in the address space, instead
108: * of _after_ it. Use this to detect malloc buffer under-runs, rather than
109: * over-runs. It won't detect both at the same time, so you should test your
110: * software twice, once with this value clear, and once with it set.
111: * If the value is -1, it will be set from the environment or to zero at
112: * run-time
113: */
114: int EF_PROTECT_BELOW = -1;
115:
116: /*
117: * EF_ALLOW_MALLOC_0 is set if Electric Fence is to allow malloc(0). I
118: * trap malloc(0) by default because it is a common source of bugs.
119: */
120: int EF_ALLOW_MALLOC_0 = -1;
121:
122: /*
123: * allocationList points to the array of slot structures used to manage the
124: * malloc arena.
125: */
126: static Slot * allocationList = 0;
127:
128: /*
129: * allocationListSize is the size of the allocation list. This will always
130: * be a multiple of the page size.
131: */
132: static size_t allocationListSize = 0;
133:
134: /*
135: * slotCount is the number of Slot structures in allocationList.
136: */
137: static size_t slotCount = 0;
138:
139: /*
140: * unUsedSlots is the number of Slot structures that are currently available
141: * to represent new malloc buffers. When this number gets too low, we will
142: * create new slots.
143: */
144: static size_t unUsedSlots = 0;
145:
146: /*
147: * slotsPerPage is the number of slot structures that fit in a virtual
148: * memory page.
149: */
150: static size_t slotsPerPage = 0;
151:
152: /*
153: * internalUse is set when allocating and freeing the allocatior-internal
154: * data structures.
155: */
156: static int internalUse = 0;
157:
158: /*
159: * noAllocationListProtection is set to tell malloc() and free() not to
160: * manipulate the protection of the allocation list. This is only set in
161: * realloc(), which does it to save on slow system calls, and in
162: * allocateMoreSlots(), which does it because it changes the allocation list.
163: */
164: static int noAllocationListProtection = 0;
165:
166: /*
167: * bytesPerPage is set at run-time to the number of bytes per virtual-memory
168: * page, as returned by Page_Size().
169: */
170: static size_t bytesPerPage = 0;
171:
172: /*
173: * internalError is called for those "shouldn't happen" errors in the
174: * allocator.
175: */
176: static void
177: internalError(void)
178: {
179: EF_Abort("Internal error in allocator.");
180: }
181:
182: /*
183: * initialize sets up the memory allocation arena and the run-time
184: * configuration information.
185: */
186: static void
187: initialize(void)
188: {
189: size_t size = MEMORY_CREATION_SIZE;
190: size_t slack;
191: char * string;
192: Slot * slot;
193:
194: EF_Print(version);
195:
196: /*
197: * Import the user's environment specification of the default
198: * alignment for malloc(). We want that alignment to be under
199: * user control, since smaller alignment lets us catch more bugs,
200: * however some software will break if malloc() returns a buffer
201: * that is not word-aligned.
202: *
203: * I would like
204: * alignment to be zero so that we could catch all one-byte
205: * overruns, however if malloc() is asked to allocate an odd-size
206: * buffer and returns an address that is not word-aligned, or whose
207: * size is not a multiple of the word size, software breaks.
208: * This was the case with the Sun string-handling routines,
209: * which can do word fetches up to three bytes beyond the end of a
210: * string. I handle this problem in part by providing
211: * byte-reference-only versions of the string library functions, but
212: * there are other functions that break, too. Some in X Windows, one
213: * in Sam Leffler's TIFF library, and doubtless many others.
214: */
215: if ( EF_ALIGNMENT == -1 ) {
216: if ( (string = getenv("EF_ALIGNMENT")) != 0 )
217: EF_ALIGNMENT = (size_t)atoi(string);
218: else
219: EF_ALIGNMENT = sizeof(int);
220: }
221:
222: /*
223: * See if the user wants to protect the address space below a buffer,
224: * rather than that above a buffer.
225: */
226: if ( EF_PROTECT_BELOW == -1 ) {
227: if ( (string = getenv("EF_PROTECT_BELOW")) != 0 )
228: EF_PROTECT_BELOW = (atoi(string) != 0);
229: else
230: EF_PROTECT_BELOW = 0;
231: }
232:
233: /*
234: * See if the user wants to protect memory that has been freed until
235: * the program exits, rather than until it is re-allocated.
236: */
237: if ( EF_PROTECT_FREE == -1 ) {
238: if ( (string = getenv("EF_PROTECT_FREE")) != 0 )
239: EF_PROTECT_FREE = (atoi(string) != 0);
240: else
241: EF_PROTECT_FREE = 0;
242: }
243:
244: /*
245: * See if the user wants to allow malloc(0).
246: */
247: if ( EF_ALLOW_MALLOC_0 == -1 ) {
248: if ( (string = getenv("EF_ALLOW_MALLOC_0")) != 0 )
249: EF_ALLOW_MALLOC_0 = (atoi(string) != 0);
250: else
251: EF_ALLOW_MALLOC_0 = 0;
252: }
253:
254: /*
255: * Get the run-time configuration of the virtual memory page size.
256: */
257: bytesPerPage = Page_Size();
258:
259: /*
260: * Figure out how many Slot structures to allocate at one time.
261: */
262: slotCount = slotsPerPage = bytesPerPage / sizeof(Slot);
263: allocationListSize = bytesPerPage;
264:
265: if ( allocationListSize > size )
266: size = allocationListSize;
267:
268: if ( (slack = size % bytesPerPage) != 0 )
269: size += bytesPerPage - slack;
270:
271: /*
272: * Allocate memory, and break it up into two malloc buffers. The
273: * first buffer will be used for Slot structures, the second will
274: * be marked free.
275: */
276: slot = allocationList = (Slot *)Page_Create(size);
277: memset((char *)allocationList, 0, allocationListSize);
278:
279: slot[0].internalSize = slot[0].userSize = allocationListSize;
280: slot[0].internalAddress = slot[0].userAddress = allocationList;
281: slot[0].mode = INTERNAL_USE;
282: if ( size > allocationListSize ) {
283: slot[1].internalAddress = slot[1].userAddress
284: = ((char *)slot[0].internalAddress) + slot[0].internalSize;
285: slot[1].internalSize
286: = slot[1].userSize = size - slot[0].internalSize;
287: slot[1].mode = FREE;
288: }
289:
290: /*
291: * Deny access to the free page, so that we will detect any software
292: * that treads upon free memory.
293: */
294: Page_DenyAccess(slot[1].internalAddress, slot[1].internalSize);
295:
296: /*
297: * Account for the two slot structures that we've used.
298: */
299: unUsedSlots = slotCount - 2;
300: }
301:
302: /*
303: * allocateMoreSlots is called when there are only enough slot structures
304: * left to support the allocation of a single malloc buffer.
305: */
306: static void
307: allocateMoreSlots(void)
308: {
309: size_t newSize = allocationListSize + bytesPerPage;
310: void * newAllocation;
311: void * oldAllocation = allocationList;
312:
313: Page_AllowAccess(allocationList, allocationListSize);
314: noAllocationListProtection = 1;
315: internalUse = 1;
316:
317: newAllocation = malloc(newSize);
318: memcpy(newAllocation, allocationList, allocationListSize);
319: memset(&(((char *)newAllocation)[allocationListSize]), 0, bytesPerPage);
320:
321: allocationList = (Slot *)newAllocation;
322: allocationListSize = newSize;
323: slotCount += slotsPerPage;
324: unUsedSlots += slotsPerPage;
325:
326: free(oldAllocation);
327:
328: /*
329: * Keep access to the allocation list open at this point, because
330: * I am returning to memalign(), which needs that access.
331: */
332: noAllocationListProtection = 0;
333: internalUse = 0;
334: }
335:
336: /*
337: * This is the memory allocator. When asked to allocate a buffer, allocate
338: * it in such a way that the end of the buffer is followed by an inaccessable
339: * memory page. If software overruns that buffer, it will touch the bad page
340: * and get an immediate segmentation fault. It's then easy to zero in on the
341: * offending code with a debugger.
342: *
343: * There are a few complications. If the user asks for an odd-sized buffer,
344: * we would have to have that buffer start on an odd address if the byte after
345: * the end of the buffer was to be on the inaccessable page. Unfortunately,
346: * there is lots of software that asks for odd-sized buffers and then
347: * requires that the returned address be word-aligned, or the size of the
348: * buffer be a multiple of the word size. An example are the string-processing
349: * functions on Sun systems, which do word references to the string memory
350: * and may refer to memory up to three bytes beyond the end of the string.
351: * For this reason, I take the alignment requests to memalign() and valloc()
352: * seriously, and
353: *
354: * Electric Fence wastes lots of memory. I do a best-fit allocator here
355: * so that it won't waste even more. It's slow, but thrashing because your
356: * working set is too big for a system's RAM is even slower.
357: */
358: extern C_LINKAGE void *
359: memalign(size_t alignment, size_t userSize)
360: {
361: register Slot * slot;
362: register size_t count;
363: Slot * fullSlot = 0;
364: Slot * emptySlots[2];
365: size_t internalSize;
366: size_t slack;
367: char * address;
368:
369: if ( allocationList == 0 )
370: initialize();
371:
372: if ( userSize == 0 && !EF_ALLOW_MALLOC_0 )
373: EF_Abort("Allocating 0 bytes, probably a bug.");
374:
375: /*
376: * If EF_PROTECT_BELOW is set, all addresses returned by malloc()
377: * and company will be page-aligned.
378: */
379: if ( !EF_PROTECT_BELOW && alignment > 1 ) {
380: if ( (slack = userSize % alignment) != 0 )
381: userSize += alignment - slack;
382: }
383:
384: /*
385: * The internal size of the buffer is rounded up to the next page-size
386: * boudary, and then we add another page's worth of memory for the
387: * dead page.
388: */
389: internalSize = userSize + bytesPerPage;
390: if ( (slack = internalSize % bytesPerPage) != 0 )
391: internalSize += bytesPerPage - slack;
392:
393: /*
394: * These will hold the addresses of two empty Slot structures, that
395: * can be used to hold information for any memory I create, and any
396: * memory that I mark free.
397: */
398: emptySlots[0] = 0;
399: emptySlots[1] = 0;
400:
401: /*
402: * The internal memory used by the allocator is currently
403: * inaccessable, so that errant programs won't scrawl on the
404: * allocator's arena. I'll un-protect it here so that I can make
405: * a new allocation. I'll re-protect it before I return.
406: */
407: if ( !noAllocationListProtection )
408: Page_AllowAccess(allocationList, allocationListSize);
409:
410: /*
411: * If I'm running out of empty slots, create some more before
412: * I don't have enough slots left to make an allocation.
413: */
414: if ( !internalUse && unUsedSlots < 7 ) {
415: allocateMoreSlots();
416: }
417:
418: /*
419: * Iterate through all of the slot structures. Attempt to find a slot
420: * containing free memory of the exact right size. Accept a slot with
421: * more memory than we want, if the exact right size is not available.
422: * Find two slot structures that are not in use. We will need one if
423: * we split a buffer into free and allocated parts, and the second if
424: * we have to create new memory and mark it as free.
425: *
426: */
427:
428: for ( slot = allocationList, count = slotCount ; count > 0; count-- ) {
429: if ( slot->mode == FREE
430: && slot->internalSize >= internalSize ) {
431: if ( !fullSlot
432: ||slot->internalSize < fullSlot->internalSize){
433: fullSlot = slot;
434: if ( slot->internalSize == internalSize
435: && emptySlots[0] )
436: break; /* All done, */
437: }
438: }
439: else if ( slot->mode == NOT_IN_USE ) {
440: if ( !emptySlots[0] )
441: emptySlots[0] = slot;
442: else if ( !emptySlots[1] )
443: emptySlots[1] = slot;
444: else if ( fullSlot
445: && fullSlot->internalSize == internalSize )
446: break; /* All done. */
447: }
448: slot++;
449: }
450: if ( !emptySlots[0] )
451: internalError();
452:
453: if ( !fullSlot ) {
454: /*
455: * I get here if I haven't been able to find a free buffer
456: * with all of the memory I need. I'll have to create more
457: * memory. I'll mark it all as free, and then split it into
458: * free and allocated portions later.
459: */
460: size_t chunkSize = MEMORY_CREATION_SIZE;
461:
462: if ( !emptySlots[1] )
463: internalError();
464:
465: if ( chunkSize < internalSize )
466: chunkSize = internalSize;
467:
468: if ( (slack = chunkSize % bytesPerPage) != 0 )
469: chunkSize += bytesPerPage - slack;
470:
471: /* Use up one of the empty slots to make the full slot. */
472: fullSlot = emptySlots[0];
473: emptySlots[0] = emptySlots[1];
474: fullSlot->internalAddress = Page_Create(chunkSize);
475: fullSlot->internalSize = chunkSize;
476: fullSlot->mode = FREE;
477: unUsedSlots--;
478: }
479:
480: /*
481: * If I'm allocating memory for the allocator's own data structures,
482: * mark it INTERNAL_USE so that no errant software will be able to
483: * free it.
484: */
485: if ( internalUse )
486: fullSlot->mode = INTERNAL_USE;
487: else
488: fullSlot->mode = ALLOCATED;
489:
490: /*
491: * If the buffer I've found is larger than I need, split it into
492: * an allocated buffer with the exact amount of memory I need, and
493: * a free buffer containing the surplus memory.
494: */
495: if ( fullSlot->internalSize > internalSize ) {
496: emptySlots[0]->internalSize
497: = fullSlot->internalSize - internalSize;
498: emptySlots[0]->internalAddress
499: = ((char *)fullSlot->internalAddress) + internalSize;
500: emptySlots[0]->mode = FREE;
501: fullSlot->internalSize = internalSize;
502: unUsedSlots--;
503: }
504:
505: if ( !EF_PROTECT_BELOW ) {
506: /*
507: * Arrange the buffer so that it is followed by an inaccessable
508: * memory page. A buffer overrun that touches that page will
509: * cause a segmentation fault.
510: */
511: address = (char *)fullSlot->internalAddress;
512:
513: /* Set up the "live" page. */
514: if ( internalSize - bytesPerPage > 0 )
515: Page_AllowAccess(
516: fullSlot->internalAddress
517: ,internalSize - bytesPerPage);
518:
519: address += internalSize - bytesPerPage;
520:
521: /* Set up the "dead" page. */
522: if ( EF_PROTECT_FREE )
523: Page_Delete(address, bytesPerPage);
524: else
525: Page_DenyAccess(address, bytesPerPage);
526:
527: /* Figure out what address to give the user. */
528: address -= userSize;
529: }
530: else { /* EF_PROTECT_BELOW != 0 */
531: /*
532: * Arrange the buffer so that it is preceded by an inaccessable
533: * memory page. A buffer underrun that touches that page will
534: * cause a segmentation fault.
535: */
536: address = (char *)fullSlot->internalAddress;
537:
538: /* Set up the "dead" page. */
539: if ( EF_PROTECT_FREE )
540: Page_Delete(address, bytesPerPage);
541: else
542: Page_DenyAccess(address, bytesPerPage);
543:
544: address += bytesPerPage;
545:
546: /* Set up the "live" page. */
547: if ( internalSize - bytesPerPage > 0 )
548: Page_AllowAccess(address, internalSize - bytesPerPage);
549: }
550:
551: fullSlot->userAddress = address;
552: fullSlot->userSize = userSize;
553:
554: /*
555: * Make the pool's internal memory inaccessable, so that the program
556: * being debugged can't stomp on it.
557: */
558: if ( !internalUse )
559: Page_DenyAccess(allocationList, allocationListSize);
560:
561: return address;
562: }
563:
564: /*
565: * Find the slot structure for a user address.
566: */
567: static Slot *
568: slotForUserAddress(void * address)
569: {
570: register Slot * slot = allocationList;
571: register size_t count = slotCount;
572:
573: for ( ; count > 0; count-- ) {
574: if ( slot->userAddress == address )
575: return slot;
576: slot++;
577: }
578:
579: return 0;
580: }
581:
582: /*
583: * Find the slot structure for an internal address.
584: */
585: static Slot *
586: slotForInternalAddress(void * address)
587: {
588: register Slot * slot = allocationList;
589: register size_t count = slotCount;
590:
591: for ( ; count > 0; count-- ) {
592: if ( slot->internalAddress == address )
593: return slot;
594: slot++;
595: }
596: return 0;
597: }
598:
599: /*
600: * Given the internal address of a buffer, find the buffer immediately
601: * before that buffer in the address space. This is used by free() to
602: * coalesce two free buffers into one.
603: */
604: static Slot *
605: slotForInternalAddressPreviousTo(void * address)
606: {
607: register Slot * slot = allocationList;
608: register size_t count = slotCount;
609:
610: for ( ; count > 0; count-- ) {
611: if ( ((char *)slot->internalAddress)
612: + slot->internalSize == address )
613: return slot;
614: slot++;
615: }
616: return 0;
617: }
618:
619: extern C_LINKAGE void
620: free(void * address)
621: {
622: Slot * slot;
623: Slot * previousSlot = 0;
624: Slot * nextSlot = 0;
625:
626: if ( address == 0 )
627: return;
628:
629: if ( allocationList == 0 )
630: EF_Abort("free() called before first malloc().");
631:
632: if ( !noAllocationListProtection )
633: Page_AllowAccess(allocationList, allocationListSize);
634:
635: slot = slotForUserAddress(address);
636:
637: if ( !slot )
638: EF_Abort("free(%a): address not from malloc().", address);
639:
640: if ( slot->mode != ALLOCATED ) {
641: if ( internalUse && slot->mode == INTERNAL_USE )
642: /* Do nothing. */;
643: else {
644: EF_Abort(
645: "free(%a): freeing free memory."
646: ,address);
647: }
648: }
649:
650: if ( EF_PROTECT_FREE )
651: slot->mode = PROTECTED;
652: else
653: slot->mode = FREE;
654:
655: previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress);
656: nextSlot = slotForInternalAddress(
657: ((char *)slot->internalAddress) + slot->internalSize);
658:
659: if ( previousSlot
660: && (previousSlot->mode == FREE || previousSlot->mode == PROTECTED) ) {
661: /* Coalesce previous slot with this one. */
662: previousSlot->internalSize += slot->internalSize;
663: if ( EF_PROTECT_FREE )
664: previousSlot->mode = PROTECTED;
665:
666: slot->internalAddress = slot->userAddress = 0;
667: slot->internalSize = slot->userSize = 0;
668: slot->mode = NOT_IN_USE;
669: slot = previousSlot;
670: unUsedSlots++;
671: }
672: if ( nextSlot
673: && (nextSlot->mode == FREE || nextSlot->mode == PROTECTED) ) {
674: /* Coalesce next slot with this one. */
675: slot->internalSize += nextSlot->internalSize;
676: nextSlot->internalAddress = nextSlot->userAddress = 0;
677: nextSlot->internalSize = nextSlot->userSize = 0;
678: nextSlot->mode = NOT_IN_USE;
679: unUsedSlots++;
680: }
681:
682: slot->userAddress = slot->internalAddress;
683: slot->userSize = slot->internalSize;
684:
685: /*
686: * Free memory is _always_ set to deny access. When EF_PROTECT_FREE
687: * is true, free memory is never reallocated, so it remains access
688: * denied for the life of the process. When EF_PROTECT_FREE is false,
689: * the memory may be re-allocated, at which time access to it will be
690: * allowed again.
691: *
692: * Some operating systems allow munmap() with single-page resolution,
693: * and allow you to un-map portions of a region, rather than the
694: * entire region that was mapped with mmap(). On those operating
695: * systems, we can release protected free pages with Page_Delete(),
696: * in the hope that the swap space attached to those pages will be
697: * released as well.
698: */
699: if ( EF_PROTECT_FREE )
700: Page_Delete(slot->internalAddress, slot->internalSize);
701: else
702: Page_DenyAccess(slot->internalAddress, slot->internalSize);
703:
704: if ( !noAllocationListProtection )
705: Page_DenyAccess(allocationList, allocationListSize);
706: }
707:
708: extern C_LINKAGE void *
709: realloc(void * oldBuffer, size_t newSize)
710: {
711: void * newBuffer = malloc(newSize);
712:
713: if ( oldBuffer ) {
714: size_t size;
715: Slot * slot;
716:
717: Page_AllowAccess(allocationList, allocationListSize);
718: noAllocationListProtection = 1;
719:
720: slot = slotForUserAddress(oldBuffer);
721:
722: if ( slot == 0 )
723: EF_Abort(
724: "realloc(%a, %d): address not from malloc()."
725: ,oldBuffer
726: ,newSize);
727:
728: if ( newSize < (size = slot->userSize) )
729: size = newSize;
730:
731: if ( size > 0 )
732: memcpy(newBuffer, oldBuffer, size);
733:
734: free(oldBuffer);
735: noAllocationListProtection = 0;
736: Page_DenyAccess(allocationList, allocationListSize);
737:
738: if ( size < newSize )
739: memset(&(((char *)newBuffer)[size]), 0, newSize - size);
740:
741: /* Internal memory was re-protected in free() */
742: }
743:
744: return newBuffer;
745: }
746:
747: extern C_LINKAGE void *
748: malloc(size_t size)
749: {
750: if ( allocationList == 0 )
751: initialize(); /* This sets EF_ALIGNMENT */
752:
753: return memalign(EF_ALIGNMENT, size);
754: }
755:
756: extern C_LINKAGE void *
757: calloc(size_t nelem, size_t elsize)
758: {
759: size_t size = nelem * elsize;
760: void * allocation = malloc(size);
761:
762: memset(allocation, 0, size);
763: return allocation;
764: }
765:
766: /*
767: * This will catch more bugs if you remove the page alignment, but it
768: * will break some software.
769: */
770: extern C_LINKAGE void *
771: valloc (size_t size)
772: {
773: return memalign(bytesPerPage, size);
774: }
775:
776: #ifdef __hpux
777: /*
778: * HP-UX 8/9.01 strcat reads a word past source when doing unaligned copies!
779: * Work around it here. The bug report has been filed with HP.
780: */
781: char *strcat(char *d, const char *s)
782: {
783: strcpy(d+strlen(d), s);
784: return d;
785: }
786: #endif
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>