/* Memory map support */ int n_mmaps; int n_mmaps_max; int max_n_mmaps; /* the mmap_threshold is dynamic, until the user sets it manually, at which point we need to disable any dynamic behavior. */ int no_dyn_threshold;
/* Statistics */ INTERNAL_SIZE_T mmapped_mem; /*INTERNAL_SIZE_T sbrked_mem;*/ /*INTERNAL_SIZE_T max_sbrked_mem;*/ INTERNAL_SIZE_T max_mmapped_mem; INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
/* First address handed out by MORECORE/sbrk. */ char *sbrk_base; };
/* Memory map support */ int n_mmaps; int n_mmaps_max; int max_n_mmaps; /* the mmap_threshold is dynamic, until the user sets it manually, at which point we need to disable any dynamic behavior. */ int no_dyn_threshold;
/* First address handed out by MORECORE/sbrk. */ char *sbrk_base;
#if USE_TCACHE /* Maximum number of buckets to use. */ size_t tcache_bins; size_t tcache_max_bytes; /* Maximum number of chunks in each bucket. */ size_t tcache_count; /* Maximum number of chunks to remove from the unsorted list, which aren't used to prefill the cache. */ size_t tcache_unsorted_limit; #endif };
/* A heap is a single contiguous memory region holding (coalesceable) malloc_chunks. It is allocated with mmap() and always starts at an address aligned to HEAP_MAX_SIZE. */
typedefstruct _heap_info { mstate ar_ptr; /* Arena for this heap. */ struct _heap_info *prev;/* Previous heap. */ size_t size; /* Current size in bytes. */ size_t mprotect_size; /* Size in bytes that has been mprotected PROT_READ|PROT_WRITE. */ /* Make sure the following data is properly aligned, particularly that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of MALLOC_ALIGNMENT. */ char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; } heap_info;
/* Linked list */ //指向下一个arena structmalloc_state *next;
/* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ //指向下一个空闲内存区域的指针 structmalloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ //在多线程内存管理中,记录当前有多少个线程与特定的arena相关联 INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };
2.27
1 2 3 4 5 6 7 8
__libc_lock_define (, mutex);
/* Flags (formerly in max_fast). */ int flags;
/* Set if the fastbin chunks contain recently inserted free blocks. */ /* Note this is a bool but not all targets support atomics on booleans. */ int have_fastchunks;
/* There are several instances of this struct ("arenas") in this malloc. If you are adapting this malloc in a way that does NOT use a static or mmapped malloc_state, you MUST explicitly zero-fill it before using. This malloc relies on the property that malloc_state is initialized to all zeroes (as is true of C statics). */
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
structmalloc_chunk* fd;/* double links -- used only if free. */ structmalloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk* fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk* bk_nextsize; };
if (ar_ptr != NULL) __libc_lock_unlock (ar_ptr->mutex);
/* In a low memory situation, we may not be able to allocate memory - in which case, we just keep trying later. However, we typically do this very early, so either there is sufficient memory, or there isn't enough memory to do non-trivial allocations anyway. */
/* maintain large bins in sorted order */ if (fwd != bck) { /* Or with inuse bit to speed comparisons */ size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ assert ((bck->bk->size & NON_MAIN_ARENA) == 0); if ((unsignedlong) (size) < (unsignedlong) (bck->bk->size)) { fwd = bck; bck = bck->bk;
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */
if (!in_smallbin_range (nb)) { bin = bin_at (av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first (bin)) != bin && (unsignedlong) (victim->size) >= (unsignedlong) (nb)) { victim = victim->bk_nextsize; while (((unsignedlong) (size = chunksize (victim)) < (unsignedlong) (nb))) victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last (bin) && victim->size == victim->fd->size) victim = victim->fd;
staticvoidmalloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */
/* These have same use as in free() */ mchunkptr nextchunk; INTERNAL_SIZE_T size; INTERNAL_SIZE_T nextsize; INTERNAL_SIZE_T prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd;
if (get_max_fast () != 0) { //global_max_fast不为0,表示ptmalloc已经初始化,清除分配区flag中fastbin的标志位
/* Setup fencepost and free the old top chunk with a multiple of MALLOC_ALIGNMENT in size. *//* The fencepost takes at least MINSIZE bytes, because it might become the top chunk again later. Note that a footer is set up, too, although the chunk is marked in use. */ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); if (old_size >= MINSIZE) { set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); _int_free (av, old_top, 1); } else { set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); set_foot (old_top, (old_size + 2 * SIZE_SZ)); } } elseif (!tried_mmap) /* We can at least try to use to mmap memory. */ goto try_mmap; } else/* av == main_arena */
{ /* Request enough space for nb + pad + overhead */ size = nb + mp_.top_pad + MINSIZE;
/* 如果是连续的,我们可以减去希望与新空间合并的现有空间。 我们稍后只在我们实际没有获得连续空间时再加回来。 */ if (contiguous (av)) size -= old_size;
/* Don't try to call MORECORE if argument is so big as to appear negative. Note that since mmap takes size_t arg, it may succeed below even if we cannot call MORECORE. */ if (size > 0) { brk = (char *) (MORECORE (size)); LIBC_PROBE (memory_sbrk_more, 2, brk, size); }
if (brk != (char *) (MORECORE_FAILURE)) { /* Call the `morecore' hook if necessary. */ void (*hook) (void) = atomic_forced_read (__after_morecore_hook); if (__builtin_expect (hook != NULL, 0)) (*hook)(); } else { /* If have mmap, try using it as a backup when MORECORE fails or cannot be used. This is worth doing on systems that have "holes" in address space, so sbrk cannot extend to give contiguous space, but space is available elsewhere. Note that we ignore mmap max count and threshold limits, since the space will not be used as a segregated mmap region. */ /* Cannot merge with old top, so add its size back in */if (contiguous (av)) size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */ if ((unsignedlong) (size) < (unsignedlong) (MMAP_AS_MORECORE_SIZE)) size = MMAP_AS_MORECORE_SIZE;
/* Don't try if size wraps around 0 */ if ((unsignedlong) (size) > (unsignedlong) (nb)) { char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mbrk != MAP_FAILED) { /* We do not need, and cannot use, another sbrk call to find end */ brk = mbrk; snd_brk = brk + size;
/* Record that we no longer have a contiguous sbrk region. After the first time mmap is used as backup, we do not ever rely on contiguous space since this could incorrectly bridge regions. */ set_noncontiguous (av); } } }
if (brk != (char *) (MORECORE_FAILURE)) { if (mp_.sbrk_base == 0) mp_.sbrk_base = brk; av->system_mem += size;
/* If MORECORE extends previous space, we can likewise extend top size. */ if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE)) set_head (old_top, (size + old_size) | PREV_INUSE);
/* Otherwise, make adjustments: * If the first time through or noncontiguous, we need to call sbrk just to find out where the end of memory lies. * We need to ensure that all returned chunks from malloc will meet MALLOC_ALIGNMENT * If there was an intervening foreign sbrk, we need to adjust sbrk request size to account for fact that we will not be able to combine new space with existing space in old_top. * Almost all systems internally allocate whole pages at a time, in which case we might as well use the whole last page of request. So we allocate enough more memory to hit a page boundary now, which in turn causes future contiguous calls to page-align. */ else { front_misalign = 0; end_misalign = 0; correction = 0; aligned_brk = brk;
/* handle contiguous cases */ if (contiguous (av)) { /* Count foreign sbrk as system_mem. */ if (old_size) av->system_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */ correction = MALLOC_ALIGNMENT - front_misalign; aligned_brk += correction; }
/* If this isn't adjacent to existing space, then we will not be able to merge with old_top space, so must add to 2nd request. */ correction += old_size;
/* Extend the end address to hit a page boundary */ end_misalign = (INTERNAL_SIZE_T) (brk + size + correction); correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
/* If can't allocate correction, try to at least find out current brk. It might be enough to proceed without failing. Note that if second sbrk did NOT fail, we assume that space is contiguous with first sbrk. This is a safe assumption unless program is multithreaded but doesn't use locks and a foreign sbrk occurred between our first and second calls. */ if (snd_brk == (char *) (MORECORE_FAILURE)) { correction = 0; snd_brk = (char *) (MORECORE (0)); } else { /* Call the `morecore' hook if necessary. */ void (*hook) (void) = atomic_forced_read (__after_morecore_hook); if (__builtin_expect (hook != NULL, 0)) (*hook)(); } }
/* handle non-contiguous cases */ else { if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) /* MORECORE/mmap must correctly align */ assert (((unsignedlong) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); else { front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */ aligned_brk += MALLOC_ALIGNMENT - front_misalign; } }
/* Find out current end of memory */ if (snd_brk == (char *) (MORECORE_FAILURE)) { snd_brk = (char *) (MORECORE (0)); } }
/* Adjust top based on results of second sbrk */ if (snd_brk != (char *) (MORECORE_FAILURE)) { av->top = (mchunkptr) aligned_brk; set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); av->system_mem += correction;
/* If not the first time through, we either have a gap due to foreign sbrk or a non-contiguous region. Insert a double fencepost at old_top to prevent consolidation with space we don't own. These fenceposts are artificial chunks that are marked as inuse and are in any case too small to use. We need two to make sizes and alignments work out. */ if (old_size != 0) { /* Shrink old_top to insert fenceposts, keeping size a multiple of MALLOC_ALIGNMENT. We know there is at least enough space in old_top to do this. */ old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK; set_head (old_top, old_size | PREV_INUSE);
/* Note that the following assignments completely overwrite old_top when old_size was previously MINSIZE. This is intentional. We need the fencepost, even if old_top otherwise gets lost. */ chunk_at_offset (old_top, old_size)->size = (2 * SIZE_SZ) | PREV_INUSE;
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ // 检查 oldp + oldsize 是否超过地址上限 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) || __builtin_expect (misaligned_chunk (oldp), 0)) { malloc_printerr (check_action, "realloc(): invalid pointer", oldmem, ar_ptr); returnNULL; } // 检查如果申请最小的 chunk 是否会超过地址上限 checked_request2size (bytes, nb);
// 如果是 mmap 得到的内存会单独处理 if (chunk_is_mmapped (oldp)) { void *newmem;
#if HAVE_MREMAP // 如果是 mmap 得到的内存则利用 mremap 系统调用实现 realloc。 // mremap 会重新分配一块内存并将之前的数据复制到新的内存上。 newp = mremap_chunk (oldp, nb); if (newp) return chunk2mem (newp); #endif /* Note the extra SIZE_SZ overhead. */ if (oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */