?

glibc 2.31 free

지지리 2021. 3. 1. 19:58

너무 대충해서 수정 필요 - 210301

 

▶ __libc_free

void
__libc_free (void *mem) // mem = 힙 청크 헤더 뺀 부분
{
  mstate ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  void (*hook) (void *, const void *)
    = atomic_forced_read (__free_hook); 
  if (__builtin_expect (hook != NULL, 0)) // hook이 NULL이 아니면
    {
      (*hook)(mem, RETURN_ADDRESS (0)); 
      // hook(mem, [?]) -> malloc에도 두번째 인자로 저걸 보내던데 뭔지 모르겠다 
      return;
    }

  if (mem == 0)                              /* free(0) has no effect */
    return; // free(0)은 아무것도 안함

  p = mem2chunk (mem); // p = 청크 헤더 포함 주소

  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
  // is_mmaped(0x2) 비트가 설정되어있으면.. -> 나중에 정리 
    {
      /* See if the dynamic brk/mmap threshold needs adjusting.
	 Dumped fake mmapped chunks do not affect the threshold.  */
      if (!mp_.no_dyn_threshold
          && chunksize_nomask (p) > mp_.mmap_threshold
          && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
	  && !DUMPED_MAIN_ARENA_CHUNK (p))
        {
          mp_.mmap_threshold = chunksize (p);
          mp_.trim_threshold = 2 * mp_.mmap_threshold;
          LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
                      mp_.mmap_threshold, mp_.trim_threshold);
        }
      munmap_chunk (p);
      return;
    }

  MAYBE_INIT_TCACHE (); // tcache 구조체 없으면 생성

  ar_ptr = arena_for_chunk (p); // ar_ptr = 청크 p를 관리하는 arena
  _int_free (ar_ptr, p, 0); 
  // _int_free(mstate av, mchunkptr p, int have_lock)
  // [?] have_lock
}

▶ _int_free

static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr *fb;             /* associated fastbin */
  mchunkptr nextchunk;         /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int nextinuse;               /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr bck;               /* misc temp for linking */
  mchunkptr fwd;               /* misc temp for linking */

  size = chunksize (p); // size = free하려는 곳에 써져있는 size (use bit 빼고)

  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
      || __builtin_expect (misaligned_chunk (p), 0))
    malloc_printerr ("free(): invalid pointer");
    // p 주소 > -size 면 에러
    
  /* We know that each chunk is at least MINSIZE bytes in size or a
     multiple of MALLOC_ALIGNMENT.  */
  if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
    malloc_printerr ("free(): invalid size");
    // size < 청크 최소 크기 면 에러
    // aligned_OK는 !(size&15 == 0)인지 검증
    // -> 0x8 비트가 설정되어있으면 에러
    // (위에서 size에 use bit가 제거된 size가 저장되니까)

  check_inuse_chunk(av, p);
  // 안에 assert 함수(매크로?)밖에 없다. 디버깅 할 때 쓰는 것 같음

#if USE_TCACHE
  {
    size_t tc_idx = csize2tidx (size); // tc_idx = size에 맞는 tcache 인덱스
    if (tcache != NULL && tc_idx < mp_.tcache_bins)
    // tcache 구조체가 있고 tcache에 들어가는 크기면
      {
	/* Check to see if it's already in the tcache.  */
	tcache_entry *e = (tcache_entry *) chunk2mem (p);
	// e = 청크에서 헤더 뺀 주소

	/* This test succeeds on double free.  However, we don't 100%
	   trust it (it also matches random payload data at a 1 in
	   2^<size_t> chance), so verify it's not an unlikely
	   coincidence before aborting.  */
	if (__glibc_unlikely (e->key == tcache))
	// e->key == tcache면 (이미 free된 경우 or 사용중인데 써져있는 경우)
	  {
	    tcache_entry *tmp;
	    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
        
	    for (tmp = tcache->entries[tc_idx];
		 tmp;
		 tmp = tmp->next)
	      if (tmp == e)
		malloc_printerr ("free(): double free detected in tcache 2");
		// 1. tcache에 들어가있고
		// 2. e->key == tcache면 
		// 에러 출력
        
	    /* If we get here, it was a coincidence.  We've wasted a
	       few cycles, but don't abort.  */
	  }

	if (tcache->counts[tc_idx] < mp_.tcache_count)
	// tc_idx 인덱스 tcache가 꽉차지 않았으면
	  {
	    tcache_put (p, tc_idx); // tcache에 p 추가. 밑에 따로 정리
	    return; // free 끝
	  }
      }
  }
#endif

  /*
    If eligible, place chunk on a fastbin so it can be found
    and used quickly in malloc.
  */

  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
  // size가 fastbin 크기인 경우

#if TRIM_FASTBINS 
// 기본값 0인 것 같다. 설정되어있으면 p가 top 청크랑 인접한 경우는 넘어감
      /*
	If TRIM_FASTBINS set, don't place chunks
	bordering top into fastbins
      */
      && (chunk_at_offset(p, size) != av->top)
#endif
      ) {

    if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
			  <= 2 * SIZE_SZ, 0)
	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
			     >= av->system_mem, 0))
      {
      // 위치상 다음 청크 size가 0x10 이하거나
      // av->system_mem보다 크면
	bool fail = true; // fail = true로 설정
    
	/* We might not have a lock at this point and concurrent modifications
	   of system_mem might result in a false positive.  Redo the test after
	   getting the lock.  */
	if (!have_lock) // [?]
	  {
	    __libc_lock_lock (av->mutex);
	    fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
	    __libc_lock_unlock (av->mutex);
	  }

	if (fail)
	  malloc_printerr ("free(): invalid next size (fast)");
	  // fail == true면 에러
      }

    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
    // perturb_byte가 설정되어있으면 free하는 곳을 size-0x10으로 초기화한다.

    atomic_store_relaxed (&av->have_fastchunks, true); 
    // [?] arena의 have_fastchunks 비트를 설정하는 것 같음
    
    unsigned int idx = fastbin_index(size); 
    // idx = size 크기에 맞는 fastbin 인덱스
    fb = &fastbin (av, idx);
    // fb = free할 size fastbin list 주소

    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
    mchunkptr old = *fb, old2;
    // old = 같은 크기 fastbin list에 제일 최근 들어간 청크

    if (SINGLE_THREAD_P)
      {
	/* Check that the top of the bin is not the record we are going to
	   add (i.e., double free).  */
	if (__builtin_expect (old == p, 0))
	  malloc_printerr ("double free or corruption (fasttop)");
      // fastbin에 제일 마지막에 넣은 청크 == free할 청크면 에러
	
    p->fd = old; // p->fd = fastbin 제일 마지막에 넣은 청크
	*fb = p; // fastbin list에 p 추가 완료
      }
    else // single thread가 아니면.. 언젠간 정리
      do
	{
	  /* Check that the top of the bin is not the record we are going to
	     add (i.e., double free).  */
	  if (__builtin_expect (old == p, 0))
	    malloc_printerr ("double free or corruption (fasttop)");
	  p->fd = old2 = old;
	}
      while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
	     != old2);

    /* Check that size of fastbin chunk at the top is the same as
       size of the chunk that we are adding.  We can dereference OLD
       only if we have the lock, otherwise it might have already been
       allocated again.  */
    if (have_lock && old != NULL
	&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
      malloc_printerr ("invalid fastbin entry (free)");
  }

  /*
    Consolidate other non-mmapped chunks as they arrive.
  */

  else if (!chunk_is_mmapped(p)) {
  // is_mmapped가 설정되지 않았으면

    /* If we're single-threaded, don't lock the arena.  */
    if (SINGLE_THREAD_P)
      have_lock = true; // [?]

    if (!have_lock) // [?]
      __libc_lock_lock (av->mutex);

    nextchunk = chunk_at_offset(p, size);
    // nextchunk = 위치상 p 다음 청크

    /* Lightweight tests: check whether the block is already the
       top block.  */
    if (__glibc_unlikely (p == av->top))
      malloc_printerr ("double free or corruption (top)");
      // 해제하려는 청크가 top 청크면 에러
      
    /* Or whether the next chunk is beyond the boundaries of the arena.  */
    if (__builtin_expect (contiguous (av)
			  && (char *) nextchunk
			  >= ((char *) av->top + chunksize(av->top)), 0))
	malloc_printerr ("double free or corruption (out)");
    // [?] contiguous
    // p 다음 청크 주소가 top 청크 뒤쪽에 있으면 에러?
    
    /* Or whether the block is actually not marked used.  */
    if (__glibc_unlikely (!prev_inuse(nextchunk)))
      malloc_printerr ("double free or corruption (!prev)");
      // p 다음 청크 prev_inuse 비트가 0이면 에러

    nextsize = chunksize(nextchunk); // nextsize = p 다음 청크 크기
    if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
	|| __builtin_expect (nextsize >= av->system_mem, 0))
      malloc_printerr ("free(): invalid next size (normal)");
      // p 다음 청크 크기가 0x10 이하거나
      // av->system_mem보다 크면 에러

    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);

    /* consolidate backward */
    if (!prev_inuse(p)) {
    // p의 prev_inuse 비트가 0이면
    
      prevsize = prev_size (p);
      size += prevsize; // size에 청크 p prev_size에 써져있는 크기 더함
      p = chunk_at_offset(p, -((long) prevsize));
      // p = 원래 p로부터 prev_size만큼 앞의 주소
      
      if (__glibc_unlikely (chunksize(p) != prevsize))
        malloc_printerr ("corrupted size vs. prev_size while consolidating");
        // (free하려는 청크 - prev_size)에 써져있는 크기가 prev_size랑 다르면 에러
        
      unlink_chunk (av, p);
    }

    if (nextchunk != av->top) {
    // 다음 청크가 top청크가 아니면
    
      /* get and clear inuse bit */
      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
      // nextinuse = 청크 p 다다음 청크 prev_inuse 비트

      /* consolidate forward */
      if (!nextinuse) {
      // 다다음 청크 prev_inuse 비트가 0이면 (nextchunk도 free된 청크인 상황)
	unlink_chunk (av, nextchunk); // nextchunk까지 병합
	size += nextsize; // size 에 nextchunk 크기 더함
      } else
	clear_inuse_bit_at_offset(nextchunk, 0);
	// 다다음청크 prev_inuse비트 0으로 설정

      /*
	Place the chunk in unsorted chunk list. Chunks are
	not placed into regular bins until after they have
	been given one chance to be used in malloc.
      */

      bck = unsorted_chunks(av); // bck = unsrotedbin 관리하는 주소
      fwd = bck->fd; // fwd = unsortedbin 맨 앞 청크
      if (__glibc_unlikely (fwd->bk != bck))
	malloc_printerr ("free(): corrupted unsorted chunks");
	// fwd->bk에 main_arena 주소 안써져있으면 에러
    
      p->fd = fwd; // p->fd = unsortedbin 맨 앞..에있던 청크
      p->bk = bck; // p->bk = main_arena에서 unsortedbin 관리하는 곳 주소
      
      if (!in_smallbin_range(size))
      // largebin 크기면 fd_nextsize, bk_nextsize 초기화
	{
	  p->fd_nextsize = NULL;
	  p->bk_nextsize = NULL;
	}
      bck->fd = p;
      fwd->bk = p;
      // unsortedbin : p <-> fwd <-> ... 완료

      set_head(p, size | PREV_INUSE); // 청크 p prev_inuse 설정
      set_foot(p, size); // 다음 청크 prev_size에 p크기 적음

      check_free_chunk(av, p);
    }

    /*
      If the chunk borders the current high end of memory,
      consolidate into top
    */

    else {
      size += nextsize; // top chunk랑 병합
      set_head(p, size | PREV_INUSE); // prev_inuse 설정
      av->top = p; // top chunk 업데이트
      check_chunk(av, p);
    }

    /*
      If freeing a large space, consolidate possibly-surrounding
      chunks. Then, if the total unused topmost memory exceeds trim
      threshold, ask malloc_trim to reduce top.

      Unless max_fast is 0, we don't know if there are fastbins
      bordering top, so we cannot tell for sure whether threshold
      has been reached unless fastbins are consolidated.  But we
      don't want to consolidate on each free.  As a compromise,
      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
      is reached.
    */

    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
    // size가 0x10000 이상일 때
      if (atomic_load_relaxed (&av->have_fastchunks)) // fastbin 청크가 있으면
	malloc_consolidate(av); // 남김없이 병합
//////////////////////////////////////////////////////////////////////////////
      if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
	if ((unsigned long)(chunksize(av->top)) >=
	    (unsigned long)(mp_.trim_threshold))
	  systrim(mp_.top_pad, av);
#endif
      } else {
	/* Always try heap_trim(), even if the top chunk is not
	   large, because the corresponding heap might go away.  */
	heap_info *heap = heap_for_ptr(top(av));

	assert(heap->ar_ptr == av);
	heap_trim(heap, mp_.top_pad);
      }
    }

    if (!have_lock)
      __libc_lock_unlock (av->mutex);
  }
  /*
    If the chunk was allocated via mmap, release via munmap().
  */

  else {
    munmap_chunk (p);
  }
}

▶ tcache_put

static __always_inline void
tcache_put (mchunkptr chunk, size_t tc_idx)
{
  tcache_entry *e = (tcache_entry *) chunk2mem (chunk);

  /* Mark this chunk as "in the tcache" so the test in _int_free will
     detect a double free.  */
  e->key = tcache; // key에 tcache 주소 적음

  e->next = tcache->entries[tc_idx]; // fd에 이미 free list에 있던 청크 적음
  tcache->entries[tc_idx] = e; // tcache->entries 업데이트
  ++(tcache->counts[tc_idx]); // counts 1 증가
}