40 #if defined(_SC_PAGE_SIZE) 41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE 42 #elif defined(_SC_PAGESIZE) 43 #define MHD_SC_PAGESIZE _SC_PAGESIZE 48 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS) 49 #define MAP_ANONYMOUS MAP_ANON 52 #define MAP_FAILED NULL 53 #elif ! defined(MAP_FAILED) 54 #define MAP_FAILED ((void*) -1) 60 #define ALIGN_SIZE (2 * sizeof(void*)) 65 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \ 66 / (ALIGN_SIZE) *(ALIGN_SIZE)) 68 #if defined(PAGE_SIZE) 69 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE 70 #elif defined(PAGESIZE) 71 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE 73 #define MHD_DEF_PAGE_SIZE_ (4096) 87 #ifdef MHD_SC_PAGESIZE 89 result = sysconf (MHD_SC_PAGESIZE);
146 struct MemoryPool *pool;
149 pool = malloc (
sizeof (
struct MemoryPool));
152 #if defined(MAP_ANONYMOUS) || defined(_WIN32) 153 if ( (max <= 32 * 1024) ||
163 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32) 164 pool->memory = mmap (
NULL,
166 PROT_READ | PROT_WRITE,
167 MAP_PRIVATE | MAP_ANONYMOUS,
170 #elif defined(_WIN32) 171 pool->memory = VirtualAlloc (
NULL,
173 MEM_COMMIT | MEM_RESERVE,
183 pool->memory = malloc (alloc_size);
184 if (
NULL == pool->memory)
189 pool->is_mmap =
false;
191 #if defined(MAP_ANONYMOUS) || defined(_WIN32) 194 pool->is_mmap =
true;
198 pool->end = alloc_size;
199 pool->size = alloc_size;
216 mhd_assert (pool->size >= pool->end - pool->pos);
220 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32) 221 munmap (pool->memory,
223 #elif defined(_WIN32) 224 VirtualFree (pool->memory,
244 mhd_assert (pool->size >= pool->end - pool->pos);
245 return (pool->end - pool->pos);
269 mhd_assert (pool->size >= pool->end - pool->pos);
271 if ( (0 == asize) && (0 != size) )
273 if ( (pool->pos + asize > pool->end) ||
274 (pool->pos + asize < pool->pos))
278 ret = &pool->memory[pool->end - asize];
283 ret = &pool->memory[pool->pos];
317 mhd_assert (pool->size >= pool->end - pool->pos);
320 mhd_assert (old ==
NULL || pool->memory + pool->size >= (uint8_t*) old
323 mhd_assert (old ==
NULL || pool->memory + pool->pos > (uint8_t*) old);
327 const size_t old_offset = (uint8_t*) old - pool->memory;
328 const bool shrinking = (old_size > new_size);
332 memset ((uint8_t*) old + new_size, 0, old_size - new_size);
339 if ( (new_apos > pool->end) ||
340 (new_apos < pool->pos) )
344 pool->pos = new_apos;
352 if ( ( (0 == asize) &&
354 (asize > pool->end - pool->pos) )
357 new_blc = pool->memory + pool->pos;
363 memcpy (new_blc, old, old_size);
365 memset (old, 0, old_size);
391 mhd_assert (pool->size >= pool->end - pool->pos);
395 mhd_assert (keep ==
NULL || pool->memory + pool->size >= (uint8_t*) keep
397 if ( (
NULL != keep) &&
398 (keep != pool->memory) )
401 memmove (pool->memory,
406 if (pool->size > copy_bytes)
410 to_zero = pool->size - copy_bytes;
415 uint8_t *recommit_addr;
418 recommit_addr = pool->memory + pool->size - to_recommit;
422 if (VirtualFree (recommit_addr,
426 to_zero -= to_recommit;
428 if (recommit_addr != VirtualAlloc (recommit_addr,
436 memset (&pool->memory[copy_bytes],
441 pool->end = pool->size;
void MHD_init_mem_pools_(void)
size_t MHD_pool_get_free(struct MemoryPool *pool)
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
static size_t MHD_sys_page_size_
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
struct MemoryPool * MHD_pool_create(size_t max)
#define ROUND_TO_ALIGN(n)
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
void MHD_pool_destroy(struct MemoryPool *pool)
#define MHD_DEF_PAGE_SIZE_