1 /*
2   This is a version (aka dlmalloc) of malloc/free/realloc written by
3   Doug Lea and released to the public domain.  Use, modify, and
4   redistribute this code without permission or acknowledgement in any
5   way you wish.  Send questions, comments, complaints, performance
6   data, etc to dl@cs.oswego.edu
7 
8   VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
9 
10   Note: There may be an updated version of this malloc obtainable at
11            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12   Check before installing!
13 
14   Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
15 */
16 
17 #include "malloc.h"
18 
19 
20 /* ------------------------- __malloc_trim -------------------------
21    __malloc_trim is an inverse of sorts to __malloc_alloc.  It gives memory
22    back to the system (via negative arguments to sbrk) if there is unused
23    memory at the `high' end of the malloc pool. It is called automatically by
24    free() when top space exceeds the trim threshold. It is also called by the
25    public malloc_trim routine.  It returns 1 if it actually released any
26    memory, else 0.
27 */
__malloc_trim(size_t pad,mstate av)28 static int __malloc_trim(size_t pad, mstate av)
29 {
30     long  top_size;        /* Amount of top-most memory */
31     long  extra;           /* Amount to release */
32     long  released;        /* Amount actually released */
33     char* current_brk;     /* address returned by pre-check sbrk call */
34     char* new_brk;         /* address returned by post-check sbrk call */
35     size_t pagesz;
36 
37     pagesz = av->pagesize;
38     top_size = chunksize(av->top);
39 
40     /* Release in pagesize units, keeping at least one page */
41     extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
42 
43     if (extra > 0) {
44 
45 	/*
46 	   Only proceed if end of memory is where we last set it.
47 	   This avoids problems if there were foreign sbrk calls.
48 	   */
49 	current_brk = (char*)(MORECORE(0));
50 	if (current_brk == (char*)(av->top) + top_size) {
51 
52 	    /*
53 	       Attempt to release memory. We ignore MORECORE return value,
54 	       and instead call again to find out where new end of memory is.
55 	       This avoids problems if first call releases less than we asked,
56 	       of if failure somehow altered brk value. (We could still
57 	       encounter problems if it altered brk in some very bad way,
58 	       but the only thing we can do is adjust anyway, which will cause
59 	       some downstream failure.)
60 	       */
61 
62 	    MORECORE(-extra);
63 	    new_brk = (char*)(MORECORE(0));
64 
65 	    if (new_brk != (char*)MORECORE_FAILURE) {
66 		released = (long)(current_brk - new_brk);
67 
68 		if (released != 0) {
69 		    /* Success. Adjust top. */
70 		    av->sbrked_mem -= released;
71 		    set_head(av->top, (top_size - released) | PREV_INUSE);
72 		    check_malloc_state();
73 		    return 1;
74 		}
75 	    }
76 	}
77     }
78     return 0;
79 }
80 
81 /* ------------------------- malloc_trim -------------------------
82   malloc_trim(size_t pad);
83 
84   If possible, gives memory back to the system (via negative
85   arguments to sbrk) if there is unused memory at the `high' end of
86   the malloc pool. You can call this after freeing large blocks of
87   memory to potentially reduce the system-level memory requirements
88   of a program. However, it cannot guarantee to reduce memory. Under
89   some allocation patterns, some large free blocks of memory will be
90   locked between two used chunks, so they cannot be given back to
91   the system.
92 
93   The `pad' argument to malloc_trim represents the amount of free
94   trailing space to leave untrimmed. If this argument is zero,
95   only the minimum amount of memory to maintain internal data
96   structures will be left (one page or less). Non-zero arguments
97   can be supplied to maintain enough trailing space to service
98   future expected allocations without having to re-obtain memory
99   from the system.
100 
101   Malloc_trim returns 1 if it actually released any memory, else 0.
102   On systems that do not support "negative sbrks", it will always
103   return 0.
104 */
malloc_trim(size_t pad)105 int malloc_trim(size_t pad)
106 {
107   int r;
108   __MALLOC_LOCK;
109   mstate av = get_malloc_state();
110   __malloc_consolidate(av);
111   r = __malloc_trim(pad, av);
112   __MALLOC_UNLOCK;
113   return r;
114 }
115 
116 /*
117   Initialize a malloc_state struct.
118 
119   This is called only from within __malloc_consolidate, which needs
120   be called in the same contexts anyway.  It is never called directly
121   outside of __malloc_consolidate because some optimizing compilers try
122   to inline it at all call points, which turns out not to be an
123   optimization at all. (Inlining it in __malloc_consolidate is fine though.)
124 */
malloc_init_state(mstate av)125 static void malloc_init_state(mstate av)
126 {
127     int     i;
128     mbinptr bin;
129 
130     /* Establish circular links for normal bins */
131     for (i = 1; i < NBINS; ++i) {
132 	bin = bin_at(av,i);
133 	bin->fd = bin->bk = bin;
134     }
135 
136     av->top_pad        = DEFAULT_TOP_PAD;
137     av->n_mmaps_max    = DEFAULT_MMAP_MAX;
138     av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
139     av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
140 
141 #if MORECORE_CONTIGUOUS
142     set_contiguous(av);
143 #else
144     set_noncontiguous(av);
145 #endif
146 
147 
148     set_max_fast(av, DEFAULT_MXFAST);
149 
150     av->top            = initial_top(av);
151     av->pagesize       = malloc_getpagesize;
152 }
153 
154 
155 /* ----------------------------------------------------------------------
156  *
157  * PUBLIC STUFF
158  *
159  * ----------------------------------------------------------------------*/
160 
161 
162 /* ------------------------- __malloc_consolidate -------------------------
163 
164   __malloc_consolidate is a specialized version of free() that tears
165   down chunks held in fastbins.  Free itself cannot be used for this
166   purpose since, among other things, it might place chunks back onto
167   fastbins.  So, instead, we need to use a minor variant of the same
168   code.
169 
170   Also, because this routine needs to be called the first time through
171   malloc anyway, it turns out to be the perfect place to trigger
172   initialization code.
173 */
__malloc_consolidate(mstate av)174 void attribute_hidden __malloc_consolidate(mstate av)
175 {
176     mfastbinptr*    fb;                 /* current fastbin being consolidated */
177     mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
178     mchunkptr       p;                  /* current chunk being consolidated */
179     mchunkptr       nextp;              /* next chunk to consolidate */
180     mchunkptr       unsorted_bin;       /* bin header */
181     mchunkptr       first_unsorted;     /* chunk to link to */
182 
183     /* These have same use as in free() */
184     mchunkptr       nextchunk;
185     size_t size;
186     size_t nextsize;
187     size_t prevsize;
188     int             nextinuse;
189     mchunkptr       bck;
190     mchunkptr       fwd;
191 
192     /*
193        If max_fast is 0, we know that av hasn't
194        yet been initialized, in which case do so below
195        */
196 
197     if (av->max_fast != 0) {
198 	clear_fastchunks(av);
199 
200 	unsorted_bin = unsorted_chunks(av);
201 
202 	/*
203 	   Remove each chunk from fast bin and consolidate it, placing it
204 	   then in unsorted bin. Among other reasons for doing this,
205 	   placing in unsorted bin avoids needing to calculate actual bins
206 	   until malloc is sure that chunks aren't immediately going to be
207 	   reused anyway.
208 	   */
209 
210 	maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
211 	fb = &(av->fastbins[0]);
212 	do {
213 	    if ( (p = *fb) != 0) {
214 		*fb = 0;
215 
216 		do {
217             CHECK_PTR(p);
218 		    check_inuse_chunk(p);
219 		    nextp = REVEAL_PTR(&p->fd, p->fd);
220 
221 		    /* Slightly streamlined version of consolidation code in free() */
222 		    size = p->size & ~PREV_INUSE;
223 		    nextchunk = chunk_at_offset(p, size);
224 		    nextsize = chunksize(nextchunk);
225 
226 		    if (!prev_inuse(p)) {
227 			prevsize = p->prev_size;
228 			size += prevsize;
229 			p = chunk_at_offset(p, -((long) prevsize));
230 			unlink(p, bck, fwd);
231 		    }
232 
233 		    if (nextchunk != av->top) {
234 			nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
235 			set_head(nextchunk, nextsize);
236 
237 			if (!nextinuse) {
238 			    size += nextsize;
239 			    unlink(nextchunk, bck, fwd);
240 			}
241 
242 			first_unsorted = unsorted_bin->fd;
243 			unsorted_bin->fd = p;
244 			first_unsorted->bk = p;
245 
246 			set_head(p, size | PREV_INUSE);
247 			p->bk = unsorted_bin;
248 			p->fd = first_unsorted;
249 			set_foot(p, size);
250 		    }
251 
252 		    else {
253 			size += nextsize;
254 			set_head(p, size | PREV_INUSE);
255 			av->top = p;
256 		    }
257 
258 		} while ( (p = nextp) != 0);
259 
260 	    }
261 	} while (fb++ != maxfb);
262     }
263     else {
264 	malloc_init_state(av);
265 	check_malloc_state();
266     }
267 }
268 
269 
270 /* ------------------------------ free ------------------------------ */
free(void * mem)271 void free(void* mem)
272 {
273     mstate av;
274 
275     mchunkptr       p;           /* chunk corresponding to mem */
276     size_t size;        /* its size */
277     mfastbinptr*    fb;          /* associated fastbin */
278     mchunkptr       nextchunk;   /* next contiguous chunk */
279     size_t nextsize;    /* its size */
280     int             nextinuse;   /* true if nextchunk is used */
281     size_t prevsize;    /* size of previous contiguous chunk */
282     mchunkptr       bck;         /* misc temp for linking */
283     mchunkptr       fwd;         /* misc temp for linking */
284 
285     /* free(0) has no effect */
286     if (mem == NULL)
287 	return;
288 
289     __MALLOC_LOCK;
290     av = get_malloc_state();
291     p = mem2chunk(mem);
292     size = chunksize(p);
293 
294     check_inuse_chunk(p);
295 
296     /*
297        If eligible, place chunk on a fastbin so it can be found
298        and used quickly in malloc.
299        */
300 
301     if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
302 
303 #if TRIM_FASTBINS
304 	    /* If TRIM_FASTBINS set, don't place chunks
305 	       bordering top into fastbins */
306 	    && (chunk_at_offset(p, size) != av->top)
307 #endif
308        ) {
309 
310 	set_fastchunks(av);
311 	fb = &(av->fastbins[fastbin_index(size)]);
312 	p->fd = PROTECT_PTR(&p->fd, *fb);
313 	*fb = p;
314     }
315 
316     /*
317        Consolidate other non-mmapped chunks as they arrive.
318        */
319 
320     else if (!chunk_is_mmapped(p)) {
321 	set_anychunks(av);
322 
323 	nextchunk = chunk_at_offset(p, size);
324 	nextsize = chunksize(nextchunk);
325 
326 	/* consolidate backward */
327 	if (!prev_inuse(p)) {
328 	    prevsize = p->prev_size;
329 	    size += prevsize;
330 	    p = chunk_at_offset(p, -((long) prevsize));
331 	    unlink(p, bck, fwd);
332 	}
333 
334 	if (nextchunk != av->top) {
335 	    /* get and clear inuse bit */
336 	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
337 	    set_head(nextchunk, nextsize);
338 
339 	    /* consolidate forward */
340 	    if (!nextinuse) {
341 		unlink(nextchunk, bck, fwd);
342 		size += nextsize;
343 	    }
344 
345 	    /*
346 	       Place the chunk in unsorted chunk list. Chunks are
347 	       not placed into regular bins until after they have
348 	       been given one chance to be used in malloc.
349 	       */
350 
351 	    bck = unsorted_chunks(av);
352 	    fwd = bck->fd;
353 	    p->bk = bck;
354 	    p->fd = fwd;
355 	    bck->fd = p;
356 	    fwd->bk = p;
357 
358 	    set_head(p, size | PREV_INUSE);
359 	    set_foot(p, size);
360 
361 	    check_free_chunk(p);
362 	}
363 
364 	/*
365 	   If the chunk borders the current high end of memory,
366 	   consolidate into top
367 	   */
368 
369 	else {
370 	    size += nextsize;
371 	    set_head(p, size | PREV_INUSE);
372 	    av->top = p;
373 	    check_chunk(p);
374 	}
375 
376 	/*
377 	   If freeing a large space, consolidate possibly-surrounding
378 	   chunks. Then, if the total unused topmost memory exceeds trim
379 	   threshold, ask malloc_trim to reduce top.
380 
381 	   Unless max_fast is 0, we don't know if there are fastbins
382 	   bordering top, so we cannot tell for sure whether threshold
383 	   has been reached unless fastbins are consolidated.  But we
384 	   don't want to consolidate on each free.  As a compromise,
385 	   consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
386 	   is reached.
387 	   */
388 
389 	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
390 	    if (have_fastchunks(av))
391 		__malloc_consolidate(av);
392 
393 	    if ((unsigned long)(chunksize(av->top)) >=
394 		    (unsigned long)(av->trim_threshold))
395 		__malloc_trim(av->top_pad, av);
396 	}
397 
398     }
399     /*
400        If the chunk was allocated via mmap, release via munmap()
401        Note that if HAVE_MMAP is false but chunk_is_mmapped is
402        true, then user must have overwritten memory. There's nothing
403        we can do to catch this error unless DEBUG is set, in which case
404        check_inuse_chunk (above) will have triggered error.
405        */
406 
407     else {
408 	size_t offset = p->prev_size;
409 	av->n_mmaps--;
410 	av->mmapped_mem -= (size + offset);
411 	munmap((char*)p - offset, size + offset);
412     }
413     __MALLOC_UNLOCK;
414 }
415 
416 /* glibc compatibilty  */
417 weak_alias(free, __libc_free)
418