1 /*
2   This is a version (aka dlmalloc) of malloc/free/realloc written by
3   Doug Lea and released to the public domain.  Use, modify, and
4   redistribute this code without permission or acknowledgement in any
5   way you wish.  Send questions, comments, complaints, performance
6   data, etc to dl@cs.oswego.edu
7 
8   VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
9 
10   Note: There may be an updated version of this malloc obtainable at
11            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12   Check before installing!
13 
14   Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
15 */
16 
17 #include "malloc.h"
18 
19 
20 /* ------------------------------ realloc ------------------------------ */
realloc(void * oldmem,size_t bytes)21 void* realloc(void* oldmem, size_t bytes)
22 {
23     mstate av;
24 
25     size_t  nb;              /* padded request size */
26 
27     mchunkptr        oldp;            /* chunk corresponding to oldmem */
28     size_t  oldsize;         /* its size */
29 
30     mchunkptr        newp;            /* chunk to return */
31     size_t  newsize;         /* its size */
32     void*          newmem;          /* corresponding user mem */
33 
34     mchunkptr        next;            /* next contiguous chunk after oldp */
35 
36     mchunkptr        remainder;       /* extra space at end of newp */
37     unsigned long     remainder_size;  /* its size */
38 
39     mchunkptr        bck;             /* misc temp for linking */
40     mchunkptr        fwd;             /* misc temp for linking */
41 
42     unsigned long     copysize;        /* bytes to copy */
43     unsigned int     ncopies;         /* size_t words to copy */
44     size_t* s;               /* copy source */
45     size_t* d;               /* copy destination */
46 
47     void *retval;
48 
49     /* Check for special cases.  */
50     if (! oldmem)
51 	return malloc(bytes);
52     if (! bytes) {
53 	free (oldmem);
54 	return NULL;
55     }
56 
57     checked_request2size(bytes, nb);
58     __MALLOC_LOCK;
59     av = get_malloc_state();
60 
61     oldp    = mem2chunk(oldmem);
62     oldsize = chunksize(oldp);
63 
64     check_inuse_chunk(oldp);
65 
66     if (!chunk_is_mmapped(oldp)) {
67 
68 	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
69 	    /* already big enough; split below */
70 	    newp = oldp;
71 	    newsize = oldsize;
72 	}
73 
74 	else {
75 	    next = chunk_at_offset(oldp, oldsize);
76 
77 	    /* Try to expand forward into top */
78 	    if (next == av->top &&
79 		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
80 		    (unsigned long)(nb + MINSIZE)) {
81 		set_head_size(oldp, nb);
82 		av->top = chunk_at_offset(oldp, nb);
83 		set_head(av->top, (newsize - nb) | PREV_INUSE);
84 		retval = chunk2mem(oldp);
85 		goto DONE;
86 	    }
87 
88 	    /* Try to expand forward into next chunk;  split off remainder below */
89 	    else if (next != av->top &&
90 		    !inuse(next) &&
91 		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
92 		    (unsigned long)(nb)) {
93 		newp = oldp;
94 		unlink(next, bck, fwd);
95 	    }
96 
97 	    /* allocate, copy, free */
98 	    else {
99 		newmem = malloc(nb - MALLOC_ALIGN_MASK);
100 		if (newmem == 0) {
101 		    retval = 0; /* propagate failure */
102 		    goto DONE;
103 		}
104 
105 		newp = mem2chunk(newmem);
106 		newsize = chunksize(newp);
107 
108 		/*
109 		   Avoid copy if newp is next chunk after oldp.
110 		   */
111 		if (newp == next) {
112 		    newsize += oldsize;
113 		    newp = oldp;
114 		}
115 		else {
116 		    /*
117 		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
118 		       We know that contents have an odd number of
119 		       size_t-sized words; minimally 3.
120 		       */
121 
122 		    copysize = oldsize - (sizeof(size_t));
123 		    s = (size_t*)(oldmem);
124 		    d = (size_t*)(newmem);
125 		    ncopies = copysize / sizeof(size_t);
126 		    assert(ncopies >= 3);
127 
128 		    if (ncopies > 9)
129 			memcpy(d, s, copysize);
130 
131 		    else {
132 			*(d+0) = *(s+0);
133 			*(d+1) = *(s+1);
134 			*(d+2) = *(s+2);
135 			if (ncopies > 4) {
136 			    *(d+3) = *(s+3);
137 			    *(d+4) = *(s+4);
138 			    if (ncopies > 6) {
139 				*(d+5) = *(s+5);
140 				*(d+6) = *(s+6);
141 				if (ncopies > 8) {
142 				    *(d+7) = *(s+7);
143 				    *(d+8) = *(s+8);
144 				}
145 			    }
146 			}
147 		    }
148 
149 		    free(oldmem);
150 		    check_inuse_chunk(newp);
151 		    retval = chunk2mem(newp);
152 		    goto DONE;
153 		}
154 	    }
155 	}
156 
157 	/* If possible, free extra space in old or extended chunk */
158 
159 	assert((unsigned long)(newsize) >= (unsigned long)(nb));
160 
161 	remainder_size = newsize - nb;
162 
163 	if (remainder_size < MINSIZE) { /* not enough extra to split off */
164 	    set_head_size(newp, newsize);
165 	    set_inuse_bit_at_offset(newp, newsize);
166 	}
167 	else { /* split remainder */
168 	    remainder = chunk_at_offset(newp, nb);
169 	    set_head_size(newp, nb);
170 	    set_head(remainder, remainder_size | PREV_INUSE);
171 	    /* Mark remainder as inuse so free() won't complain */
172 	    set_inuse_bit_at_offset(remainder, remainder_size);
173 	    free(chunk2mem(remainder));
174 	}
175 
176 	check_inuse_chunk(newp);
177 	retval = chunk2mem(newp);
178 	goto DONE;
179     }
180 
181     /*
182        Handle mmap cases
183        */
184 
185     else {
186 	size_t offset = oldp->prev_size;
187 	size_t pagemask = av->pagesize - 1;
188 	char *cp;
189 	unsigned long  sum;
190 
191 	/* Note the extra (sizeof(size_t)) overhead */
192 	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
193 
194 	/* don't need to remap if still within same page */
195 	if (oldsize == newsize - offset) {
196 	    retval = oldmem;
197 	    goto DONE;
198 	}
199 
200 	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
201 
202 	if (cp != (char*)MORECORE_FAILURE) {
203 
204 	    newp = (mchunkptr)(cp + offset);
205 	    set_head(newp, (newsize - offset)|IS_MMAPPED);
206 
207 	    assert(aligned_OK(chunk2mem(newp)));
208 	    assert((newp->prev_size == offset));
209 
210 	    /* update statistics */
211 	    sum = av->mmapped_mem += newsize - oldsize;
212 	    if (sum > (unsigned long)(av->max_mmapped_mem))
213 		av->max_mmapped_mem = sum;
214 	    sum += av->sbrked_mem;
215 	    if (sum > (unsigned long)(av->max_total_mem))
216 		av->max_total_mem = sum;
217 
218 	    retval = chunk2mem(newp);
219 	    goto DONE;
220 	}
221 
222 	/* Note the extra (sizeof(size_t)) overhead. */
223 	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
224 	    newmem = oldmem; /* do nothing */
225 	else {
226 	    /* Must alloc, copy, free. */
227 	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
228 	    if (newmem != 0) {
229 		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
230 		free(oldmem);
231 	    }
232 	}
233 	retval = newmem;
234     }
235 
236  DONE:
237     __MALLOC_UNLOCK;
238     return retval;
239 }
240 
241 /* glibc compatibilty  */
242 weak_alias(realloc, __libc_realloc)
243