// free delayed frees from other threads (but skip contended ones) _mi_heap_delayed_free_partial(heap);
// find (or allocate) a page of the right size mi_page_t* page = mi_find_page(heap, size, huge_alignment); ifmi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more mi_heap_collect(heap, true/* force */); page = mi_find_page(heap, size, huge_alignment); }
ifmi_unlikely(page == NULL) { // out of memory constsize_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); returnNULL; }
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) ifmi_unlikely(zero && page->xblock_size == 0) { // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. void* p = _mi_page_malloc(heap, page, size, false); mi_assert_internal(p != NULL); _mi_memzero_aligned(p, mi_page_usable_block_size(page)); return p; } else { return _mi_page_malloc(heap, page, size, zero); } }
// allow use of the block internally // note: when tracking we need to avoid ever touching the MI_PADDING since // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`) mi_track_mem_undefined(block, mi_page_usable_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63) ifmi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE); if (page->free_is_zero) { block->next = 0; mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE); } else { _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE); } }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN if (!zero && !mi_page_is_huge(page)) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); } #elif (MI_SECURE!=0) if (!zero) { block->next = 0; } // don't leak internal data #endif
staticmi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) { // search through the pages in "next fit" order #if MI_STAT size_t count = 0; #endif mi_page_t* page = pq->first; while (page != NULL) { mi_page_t* next = page->next; // remember next #if MI_STAT count++; #endif
// 0. collect freed blocks by us and other threads _mi_page_free_collect(page, false);
// 1. if the page contains free blocks, we are done if (mi_page_immediate_available(page)) { break; // pick this one }
// 2. Try to extend if (page->capacity < page->reserved) { mi_page_extend_free(heap, page, heap->tld); mi_assert_internal(mi_page_immediate_available(page)); break; }
// 3. If the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq);
// collect the thread free list if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation _mi_page_thread_free_collect(page); }
// and the local free list if (page->local_free != NULL) { ifmi_likely(page->free == NULL) { // usual case page->free = page->local_free; page->local_free = NULL; page->free_is_zero = false; } elseif (force) { // append -- only on shutdown (force) as this is a linear operation mi_block_t* tail = page->local_free; mi_block_t* next; while ((next = mi_block_next(page, tail)) != NULL) { tail = next; } mi_block_set_next(page, tail, page->free); page->free = page->local_free; page->local_free = NULL; page->free_is_zero = false; } }
ifmi_likely(is_local) { // thread-local free? ifmi_likely(page->flags.full_aligned == 0)// and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned) { mi_block_t* const block = (mi_block_t*)p; ifmi_unlikely(mi_check_is_double_free(page, block))return; mi_check_padding(page, block); mi_stat_free(page, block); #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned mi_block_set_next(page, block, page->local_free); page->local_free = block; ifmi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); } } else { // page is full or contains (inner) aligned blocks; use generic path _mi_free_generic(segment, page, true, p); } } else { // not thread-local; use generic path _mi_free_generic(segment, page, false, p); } }
staticinlineboolmi_check_is_double_free(constmi_page_t* page, constmi_block_t* block) { bool is_double_free = false; mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? { // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? // (continue in separate function to improve code generation) is_double_free = mi_check_is_double_freex(page, block); } return is_double_free; } #else staticinlineboolmi_check_is_double_free(constmi_page_t* page, constmi_block_t* block) { MI_UNUSED(page); MI_UNUSED(block); returnfalse; }
static mi_decl_noinline boolmi_check_is_double_freex(constmi_page_t* page, constmi_block_t* block) { // The decoded value is in the same page (or NULL). // Walk the free lists to verify positively if it is already freed if (mi_list_contains(page, page->free, block) || mi_list_contains(page, page->local_free, block) || mi_list_contains(page, mi_page_thread_free(page), block)) { _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); returntrue; } returnfalse; }