version 1.1.1.1, 2012/02/21 23:05:52
|
version 1.1.1.4, 2014/06/15 19:46:05
|
Line 1
|
Line 1
|
/* |
/* |
* Stack-less Just-In-Time compiler |
* Stack-less Just-In-Time compiler |
* |
* |
* Copyright 2009-2010 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. |
* |
* |
* Redistribution and use in source and binary forms, with or without modification, are |
* Redistribution and use in source and binary forms, with or without modification, are |
* permitted provided that the following conditions are met: |
* permitted provided that the following conditions are met: |
Line 52
|
Line 52
|
The unused blocks are stored in a chain list pointed by free_blocks. This |
The unused blocks are stored in a chain list pointed by free_blocks. This |
list is useful if we need to find a suitable memory area when the allocator |
list is useful if we need to find a suitable memory area when the allocator |
is called. |
is called. |
| |
When a block is freed, the new free block is connected to its adjacent free |
When a block is freed, the new free block is connected to its adjacent free |
blocks if possible. |
blocks if possible. |
|
|
Line 83
|
Line 83
|
|
|
static SLJIT_INLINE void* alloc_chunk(sljit_uw size) |
static SLJIT_INLINE void* alloc_chunk(sljit_uw size) |
{ |
{ |
return VirtualAlloc(0, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); | return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); |
} |
} |
|
|
static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) |
static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) |
Line 94 static SLJIT_INLINE void free_chunk(void* chunk, sljit
|
Line 94 static SLJIT_INLINE void free_chunk(void* chunk, sljit
|
|
|
#else |
#else |
|
|
#include <sys/mman.h> |
|
|
|
static SLJIT_INLINE void* alloc_chunk(sljit_uw size) |
static SLJIT_INLINE void* alloc_chunk(sljit_uw size) |
{ |
{ |
void* retval = mmap(0, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); | void* retval; |
| |
| #ifdef MAP_ANON |
| retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); |
| #else |
| if (dev_zero < 0) { |
| if (open_dev_zero()) |
| return NULL; |
| } |
| retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, dev_zero, 0); |
| #endif |
| |
return (retval != MAP_FAILED) ? retval : NULL; |
return (retval != MAP_FAILED) ? retval : NULL; |
} |
} |
|
|
Line 202 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit
|
Line 211 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit
|
|
|
chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK; |
chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK; |
header = (struct block_header*)alloc_chunk(chunk_size); |
header = (struct block_header*)alloc_chunk(chunk_size); |
PTR_FAIL_IF(!header); | if (!header) { |
| allocator_release_lock(); |
| return NULL; |
| } |
|
|
chunk_size -= sizeof(struct block_header); |
chunk_size -= sizeof(struct block_header); |
total_size += chunk_size; |
total_size += chunk_size; |
Line 237 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* pt
|
Line 249 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* pt
|
struct free_block* free_block; |
struct free_block* free_block; |
|
|
allocator_grab_lock(); |
allocator_grab_lock(); |
header = AS_BLOCK_HEADER(ptr, -(sljit_w)sizeof(struct block_header)); | header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header)); |
allocated_size -= header->size; |
allocated_size -= header->size; |
|
|
/* Connecting free blocks together if possible. */ |
/* Connecting free blocks together if possible. */ |
|
|
/* If header->prev_size == 0, free_block will equal to header. |
/* If header->prev_size == 0, free_block will equal to header. |
In this case, free_block->header.size will be > 0. */ |
In this case, free_block->header.size will be > 0. */ |
free_block = AS_FREE_BLOCK(header, -(sljit_w)header->prev_size); | free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size); |
if (SLJIT_UNLIKELY(!free_block->header.size)) { |
if (SLJIT_UNLIKELY(!free_block->header.size)) { |
free_block->size += header->size; |
free_block->size += header->size; |
header = AS_BLOCK_HEADER(free_block, free_block->size); |
header = AS_BLOCK_HEADER(free_block, free_block->size); |
Line 263 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* pt
|
Line 275 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* pt
|
header->prev_size = free_block->size; |
header->prev_size = free_block->size; |
} |
} |
|
|
|
/* The whole chunk is free. */ |
if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) { |
if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) { |
|
/* If this block is freed, we still have (allocated_size / 2) free space. */ |
if (total_size - free_block->size > (allocated_size * 3 / 2)) { |
if (total_size - free_block->size > (allocated_size * 3 / 2)) { |
|
total_size -= free_block->size; |
sljit_remove_free_block(free_block); |
sljit_remove_free_block(free_block); |
free_chunk(free_block, free_block->size + sizeof(struct block_header)); |
free_chunk(free_block, free_block->size + sizeof(struct block_header)); |
} |
} |
} |
} |
|
|
|
allocator_release_lock(); |
|
} |
|
|
|
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void) |
|
{ |
|
struct free_block* free_block; |
|
struct free_block* next_free_block; |
|
|
|
allocator_grab_lock(); |
|
|
|
free_block = free_blocks; |
|
while (free_block) { |
|
next_free_block = free_block->next; |
|
if (!free_block->header.prev_size && |
|
AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) { |
|
total_size -= free_block->size; |
|
sljit_remove_free_block(free_block); |
|
free_chunk(free_block, free_block->size + sizeof(struct block_header)); |
|
} |
|
free_block = next_free_block; |
|
} |
|
|
|
SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks)); |
allocator_release_lock(); |
allocator_release_lock(); |
} |
} |