https://github.com/kraj/glibc/blob/master/malloc/malloc.c
몇 부분만 정리
|
void * | | __libc_malloc (size_t bytes) | | { | | mstate ar_ptr; | | void *victim; | |
| | void *(*hook) (size_t, const void *) | | = atomic_forced_read (__malloc_hook); | | if (__builtin_expect (hook != NULL, 0)) | | return (*hook)(bytes, RETURN_ADDRESS (0)); | | #if USE_TCACHE | | /* int_free also calls request2size, be careful to not pad twice. */ | | size_t tbytes; | | checked_request2size (bytes, tbytes); | | size_t tc_idx = csize2tidx (tbytes); | |
| | MAYBE_INIT_TCACHE (); | |
| | DIAG_PUSH_NEEDS_COMMENT; | | if (tc_idx < mp_.tcache_bins | | /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */ | | && tcache | | && tcache->entries[tc_idx] != NULL) | | { | | return tcache_get (tc_idx); | | } | | DIAG_POP_NEEDS_COMMENT; | | #endif | |
| | if (SINGLE_THREAD_P) | | { | | victim = _int_malloc (&main_arena, bytes); | | assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || | | &main_arena == arena_for_chunk (mem2chunk (victim))); | | return victim; | | } | |
| | arena_get (ar_ptr, bytes); | |
|
static void | | _int_free (mstate av, mchunkptr p, int have_lock) | | { | | INTERNAL_SIZE_T size; /* its size */ | | mfastbinptr *fb; /* associated fastbin */ | | mchunkptr nextchunk; /* next contiguous chunk */ | | INTERNAL_SIZE_T nextsize; /* its size */ | | int nextinuse; /* true if nextchunk is used */ | | INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ | | mchunkptr bck; /* misc temp for linking */ | | mchunkptr fwd; /* misc temp for linking */ | |
| | size = chunksize (p); | |
| | /* Little security check which won't hurt performance: the | | allocator never wrapps around at the end of the address space. | | Therefore we can exclude some size values which might appear | | here by accident or by "design" from some intruder. */ | | if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) | | || __builtin_expect (misaligned_chunk (p), 0)) | | malloc_printerr ("free(): invalid pointer"); | | /* We know that each chunk is at least MINSIZE bytes in size or a | | multiple of MALLOC_ALIGNMENT. */ | | if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) | | malloc_printerr ("free(): invalid size"); | |
| | check_inuse_chunk(av, p); | |
| | #if USE_TCACHE | | { | | size_t tc_idx = csize2tidx (size); | |
| | if (tcache | | && tc_idx < mp_.tcache_bins | | && tcache->counts[tc_idx] < mp_.tcache_count) | | { | | tcache_put (p, tc_idx); | | return; | | } | | } | | #endif |
|
static struct malloc_par mp_ = | | { | | .top_pad = DEFAULT_TOP_PAD, | | .n_mmaps_max = DEFAULT_MMAP_MAX, | | .mmap_threshold = DEFAULT_MMAP_THRESHOLD, | | .trim_threshold = DEFAULT_TRIM_THRESHOLD, | | #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8)) | | .arena_test = NARENAS_FROM_NCORES (1) | | #if USE_TCACHE | | , | | .tcache_count = TCACHE_FILL_COUNT, | | .tcache_bins = TCACHE_MAX_BINS, | | .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1), | | .tcache_unsorted_limit = 0 /* No limit. */ | | #endif | | };
|
# define TCACHE_FILL_COUNT 7 |
하나의 tcache bin에는 최대 7개 청크
static void | | tcache_init(void) | | { | | mstate ar_ptr; | | void *victim = 0; | | const size_t bytes = sizeof (tcache_perthread_struct); | |
| | if (tcache_shutting_down) | | return; | |
| | arena_get (ar_ptr, bytes); | | victim = _int_malloc (ar_ptr, bytes);
...........................................................
|
if (victim) |
| { | | tcache = (tcache_perthread_struct *) victim; | | memset (tcache, 0, sizeof (tcache_perthread_struct)); | | } | |
| |
|
|
|
오우 tcache를 위한 struct를 힙에 할당한다. arbitrary heap free 있으면 저거 free 시켜버려도 될듯.
# define TCACHE_MAX_BINS 64
typedef struct tcache_perthread_struct { char counts[TCACHE_MAX_BINS]; tcache_entry *entries[TCACHE_MAX_BINS]; } tcache_perthread_struct;
typedef struct tcache_entry { struct tcache_entry *next; } tcache_entry;
|
static __always_inline void tcache_put (mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); assert (tc_idx < TCACHE_MAX_BINS); e->next = tcache->entries[tc_idx]; tcache->entries[tc_idx] = e; ++(tcache->counts[tc_idx]); }
/* Caller must ensure that we know tc_idx is valid and there's available chunks to remove. */ static __always_inline void * tcache_get (size_t tc_idx) { tcache_entry *e = tcache->entries[tc_idx]; assert (tc_idx < TCACHE_MAX_BINS); assert (tcache->entries[tc_idx] > 0); tcache->entries[tc_idx] = e->next; --(tcache->counts[tc_idx]); return (void *) e; } |
엥 이거 완전 fastbin 아니냐. fastbin이랑 같은 방식으로 익스 가능.
할당시점에 기본적인 size도 검사도 없다. next자리에 그냥 stack, GOT, hook, ... 넣어두면 될듯.
/* When "x" is from chunksize(). */ | # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT) | |
|