/* Do nothing if the chunk is a faked mmapped chunk in the dumped main arena. We never free this memory. */ if (DUMPED_MAIN_ARENA_CHUNK(p)) return;
uintptr_t block = (uintptr_t)p - prev_size(p);//前一个chunk的指针 size_t total_size = prev_size(p) + size;//两个chunk的size之和 /* Unfortunately we have to do the compilers job by hand here. Normally we would test BLOCK and TOTAL-SIZE separately for compliance with the page size. But gcc does not recognize the optimization possibility (in the moment at least) so we combine the two values into one before the bit test. */ if (__builtin_expect(((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0)) malloc_printerr("munmap_chunk(): invalid pointer");
/* If munmap failed the process virtual memory address space is in a bad shape. Just leave the block hanging around, the process will terminate shortly anyway since not much can be done. */ __munmap((char *)block, total_size);//系统调用 }
_int_free
先尝试放入tcache,如果不行就尝试放入fast bin,最后进行合并操作后放入unsorted bin
staticvoid _int_free(mstate av, mchunkptr p, int have_lock) { INTERNAL_SIZE_T size; /* its size */ mfastbinptr *fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */
size = chunksize(p);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect((uintptr_t)p > (uintptr_t)-size, 0) || __builtin_expect(misaligned_chunk(p), 0)) malloc_printerr("free(): invalid pointer"); /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ //对size进行检查 if (__glibc_unlikely(size < MINSIZE || !aligned_OK(size))) malloc_printerr("free(): invalid size");
/* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. */ //如果p的size小于等于fast bin的最大size则将p放入fast bin if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast())
#if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ //如果TRIM_FASTBINS被设置为1则不要将与top chunk相邻的chunk放入fast bin && (chunk_at_offset(p, size) != av->top) #endif ) { //检查下一个chunk的size的合法性 if (__builtin_expect(chunksize_nomask(chunk_at_offset(p, size)) <= 2 * SIZE_SZ, 0) || __builtin_expect(chunksize(chunk_at_offset(p, size)) >= av->system_mem, 0)) { bool fail = true; /* We might not have a lock at this point and concurrent modifications of system_mem might result in a false positive. Redo the test after getting the lock. */ //如果刚才的结果是在不加锁的情况下获得的,就会由于系统的并发修改而不正确 //所以在加锁的情况下再做一遍合法性测试 if (!have_lock) { __libc_lock_lock(av->mutex); fail = (chunksize_nomask(chunk_at_offset(p, size)) <= 2 * SIZE_SZ || chunksize(chunk_at_offset(p, size)) >= av->system_mem); __libc_lock_unlock(av->mutex); }
if (fail) malloc_printerr("free(): invalid next size (fast)"); } //将p的数据部分清空 free_perturb(chunk2mem(p), size - 2 * SIZE_SZ);
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ mchunkptr old = *fb, old2;
if (SINGLE_THREAD_P) { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ //检查fast bin第一个chunk是不是要放入的chunk,防止double free if (__builtin_expect(old == p, 0)) malloc_printerr("double free or corruption (fasttop)"); //将p插入fast bin链表 p->fd = old; *fb = p; } else do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect(old == p, 0)) malloc_printerr("double free or corruption (fasttop)"); //将p插入fast bin链表 p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel(fb, p, old2)) != old2);
/* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been allocated again. */ //在保证加锁的情况下检查新的chunk是否加入了正确的fast bin链表 if (have_lock && old != NULL && __builtin_expect(fastbin_index(chunksize(old)) != idx, 0)) malloc_printerr("invalid fastbin entry (free)"); }
/* Consolidate other non-mmapped chunks as they arrive. */ //如果p不满足fast bin,并且不属于mmap,就与相邻的未使用chunk合并放入unsorted bin elseif (!chunk_is_mmapped(p)) {
/* If we're single-threaded, don't lock the arena. */ if (SINGLE_THREAD_P) have_lock = true;
if (!have_lock) __libc_lock_lock(av->mutex);
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the top block. */ //一些安全性检查:检查p是否是top chunk if (__glibc_unlikely(p == av->top)) malloc_printerr("double free or corruption (top)"); /* Or whether the next chunk is beyond the boundaries of the arena. */ //检查p的下一个chunk是否已经超出arena区域 if (__builtin_expect(contiguous(av) && (char *)nextchunk >= ((char *)av->top + chunksize(av->top)), 0)) malloc_printerr("double free or corruption (out)"); /* Or whether the block is actually not marked used. */ //检查p是否实际上没有被标记为正在使用 if (__glibc_unlikely(!prev_inuse(nextchunk))) malloc_printerr("double free or corruption (!prev)");
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ //将p放入unsorted bin链表 bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely(fwd->bk != bck)) malloc_printerr("free(): corrupted unsorted chunks"); p->fd = fwd; p->bk = bck; //如果p的大小属于large bin的范围就将fd_nextsize和bk_nextsize指针置空 if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p;
/* If the chunk borders the current high end of memory, consolidate into top */ //如果p与top chunk相邻就合并到top chunk里 else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); }
/* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top. Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ //如果要合并之后chunk过大,就合并fast bin,并考虑减小top chunk if ((unsignedlong)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (atomic_load_relaxed(&av->have_fastchunks)) malloc_consolidate(av);//合并fast bin //如果是main arena,并且top chunk的大小超出了阀值就缩减top chunk的大小 if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsignedlong)(chunksize(av->top)) >= (unsignedlong)(mp_.trim_threshold)) systrim(mp_.top_pad, av); #endif } //如果不是main arena就调用heap_trim缩减大小 else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));
if (!have_lock) __libc_lock_unlock(av->mutex); } /* If the chunk was allocated via mmap, release via munmap(). */ //如果chunk是通过mmap分配的就调用munmap_chunk来释放 else { munmap_chunk(p); } }
/* ------------------------- malloc_consolidate ------------------------- malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */
staticvoidmalloc_consolidate(mstate av) { mfastbinptr *fb; /* current fastbin being consolidated */ mfastbinptr *maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */
/* These have same use as in free() */ mchunkptr nextchunk; INTERNAL_SIZE_T size; INTERNAL_SIZE_T nextsize; INTERNAL_SIZE_T prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd;
/* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */
if (get_max_fast() != 0)//如果av已经初始化 { //设置av中没有fastchunks,因为即将全部合并 atomic_store_relaxed(&av->have_fastchunks, false);
unsorted_bin = unsorted_chunks(av);
/* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */
maxfb = &fastbin(av, NFASTBINS - 1);//size最大的fastbin fb = &fastbin(av, 0);//size最小的fastbin do { p = atomic_exchange_acq(fb, NULL);//p=fb if (p != 0) { do { check_inuse_chunk(av, p); nextp = p->fd;
/* Slightly streamlined version of consolidation code in free() */ size = chunksize(p); nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk);
//如果前一个chunk是没有被使用,就让它与p合并 if (!prev_inuse(p)) { prevsize = prev_size(p); size += prevsize; p = chunk_at_offset(p, -((long)prevsize)); unlink(av, p, bck, fwd); }