1 #define JEMALLOC_LARGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5
6 void *
large_malloc(tsdn_t * tsdn,arena_t * arena,size_t usize,bool zero)7 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
8 {
9 assert(usize == s2u(usize));
10
11 return (large_palloc(tsdn, arena, usize, CACHELINE, zero));
12 }
13
14 void *
large_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)15 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
16 bool zero)
17 {
18 size_t ausize;
19 extent_t *extent;
20 bool is_zeroed;
21 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
22
23 assert(!tsdn_null(tsdn) || arena != NULL);
24
25 ausize = sa2u(usize, alignment);
26 if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS))
27 return (NULL);
28
29 /*
30 * Copy zero into is_zeroed and pass the copy to extent_alloc(), so that
31 * it is possible to make correct junk/zero fill decisions below.
32 */
33 is_zeroed = zero;
34 if (likely(!tsdn_null(tsdn)))
35 arena = arena_choose(tsdn_tsd(tsdn), arena);
36 if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
37 arena, usize, alignment, &is_zeroed)) == NULL)
38 return (NULL);
39
40 /* Insert extent into large. */
41 malloc_mutex_lock(tsdn, &arena->large_mtx);
42 ql_elm_new(extent, ql_link);
43 ql_tail_insert(&arena->large, extent, ql_link);
44 malloc_mutex_unlock(tsdn, &arena->large_mtx);
45 if (config_prof && arena_prof_accum(tsdn, arena, usize))
46 prof_idump(tsdn);
47
48 if (zero || (config_fill && unlikely(opt_zero))) {
49 if (!is_zeroed) {
50 memset(extent_addr_get(extent), 0,
51 extent_usize_get(extent));
52 }
53 } else if (config_fill && unlikely(opt_junk_alloc)) {
54 memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
55 extent_usize_get(extent));
56 }
57
58 arena_decay_tick(tsdn, arena);
59 return (extent_addr_get(extent));
60 }
61
62 #ifdef JEMALLOC_JET
63 #undef large_dalloc_junk
64 #define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
65 #endif
66 void
large_dalloc_junk(void * ptr,size_t usize)67 large_dalloc_junk(void *ptr, size_t usize)
68 {
69 memset(ptr, JEMALLOC_FREE_JUNK, usize);
70 }
71 #ifdef JEMALLOC_JET
72 #undef large_dalloc_junk
73 #define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
74 large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
75 #endif
76
77 #ifdef JEMALLOC_JET
78 #undef large_dalloc_maybe_junk
79 #define large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk)
80 #endif
81 void
large_dalloc_maybe_junk(void * ptr,size_t usize)82 large_dalloc_maybe_junk(void *ptr, size_t usize)
83 {
84 if (config_fill && have_dss && unlikely(opt_junk_free)) {
85 /*
86 * Only bother junk filling if the extent isn't about to be
87 * unmapped.
88 */
89 if (!config_munmap || (have_dss && extent_in_dss(ptr)))
90 large_dalloc_junk(ptr, usize);
91 }
92 }
93 #ifdef JEMALLOC_JET
94 #undef large_dalloc_maybe_junk
95 #define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
96 large_dalloc_maybe_junk_t *large_dalloc_maybe_junk =
97 JEMALLOC_N(n_large_dalloc_maybe_junk);
98 #endif
99
100 static bool
large_ralloc_no_move_shrink(tsdn_t * tsdn,extent_t * extent,size_t usize)101 large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
102 {
103 arena_t *arena = extent_arena_get(extent);
104 size_t oldusize = extent_usize_get(extent);
105 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
106 size_t diff = extent_size_get(extent) - (usize + large_pad);
107
108 assert(oldusize > usize);
109
110 if (extent_hooks->split == NULL)
111 return (true);
112
113 /* Split excess pages. */
114 if (diff != 0) {
115 extent_t *trail = extent_split_wrapper(tsdn, arena,
116 &extent_hooks, extent, usize + large_pad, usize, diff,
117 diff);
118 if (trail == NULL)
119 return (true);
120
121 if (config_fill && unlikely(opt_junk_free)) {
122 large_dalloc_maybe_junk(extent_addr_get(trail),
123 extent_usize_get(trail));
124 }
125
126 arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, trail);
127 }
128
129 arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
130
131 return (false);
132 }
133
134 static bool
large_ralloc_no_move_expand(tsdn_t * tsdn,extent_t * extent,size_t usize,bool zero)135 large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
136 bool zero)
137 {
138 arena_t *arena = extent_arena_get(extent);
139 size_t oldusize = extent_usize_get(extent);
140 bool is_zeroed_trail = false;
141 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
142 size_t trailsize = usize - extent_usize_get(extent);
143 extent_t *trail;
144
145 if (extent_hooks->merge == NULL)
146 return (true);
147
148 if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
149 extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
150 NULL) {
151 bool commit = true;
152 if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
153 extent_past_get(extent), trailsize, 0, CACHELINE,
154 &is_zeroed_trail, &commit, false)) == NULL)
155 return (true);
156 }
157
158 if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
159 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
160 return (true);
161 }
162
163 if (zero || (config_fill && unlikely(opt_zero))) {
164 if (config_cache_oblivious) {
165 /*
166 * Zero the trailing bytes of the original allocation's
167 * last page, since they are in an indeterminate state.
168 * There will always be trailing bytes, because ptr's
169 * offset from the beginning of the extent is a multiple
170 * of CACHELINE in [0 .. PAGE).
171 */
172 void *zbase = (void *)
173 ((uintptr_t)extent_addr_get(extent) + oldusize);
174 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
175 PAGE));
176 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
177 assert(nzero > 0);
178 memset(zbase, 0, nzero);
179 }
180 if (!is_zeroed_trail) {
181 memset((void *)((uintptr_t)extent_addr_get(extent) +
182 oldusize), 0, usize - oldusize);
183 }
184 } else if (config_fill && unlikely(opt_junk_alloc)) {
185 memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
186 JEMALLOC_ALLOC_JUNK, usize - oldusize);
187 }
188
189 arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
190
191 return (false);
192 }
193
194 bool
large_ralloc_no_move(tsdn_t * tsdn,extent_t * extent,size_t usize_min,size_t usize_max,bool zero)195 large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
196 size_t usize_max, bool zero)
197 {
198 assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
199 /* The following should have been caught by callers. */
200 assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
201 /* Both allocation sizes must be large to avoid a move. */
202 assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
203 LARGE_MINCLASS);
204
205 if (usize_max > extent_usize_get(extent)) {
206 /* Attempt to expand the allocation in-place. */
207 if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
208 zero)) {
209 arena_decay_tick(tsdn, extent_arena_get(extent));
210 return (false);
211 }
212 /* Try again, this time with usize_min. */
213 if (usize_min < usize_max && usize_min >
214 extent_usize_get(extent) &&
215 large_ralloc_no_move_expand(tsdn, extent, usize_min,
216 zero)) {
217 arena_decay_tick(tsdn, extent_arena_get(extent));
218 return (false);
219 }
220 }
221
222 /*
223 * Avoid moving the allocation if the existing extent size accommodates
224 * the new size.
225 */
226 if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
227 usize_max) {
228 arena_decay_tick(tsdn, extent_arena_get(extent));
229 return (false);
230 }
231
232 /* Attempt to shrink the allocation in-place. */
233 if (extent_usize_get(extent) > usize_max) {
234 if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
235 arena_decay_tick(tsdn, extent_arena_get(extent));
236 return (false);
237 }
238 }
239 return (true);
240 }
241
242 static void *
large_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)243 large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
244 size_t alignment, bool zero)
245 {
246 if (alignment <= CACHELINE)
247 return (large_malloc(tsdn, arena, usize, zero));
248 return (large_palloc(tsdn, arena, usize, alignment, zero));
249 }
250
251 void *
large_ralloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t usize,size_t alignment,bool zero,tcache_t * tcache)252 large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
253 size_t alignment, bool zero, tcache_t *tcache)
254 {
255 void *ret;
256 size_t copysize;
257
258 /* The following should have been caught by callers. */
259 assert(usize > 0 && usize <= LARGE_MAXCLASS);
260 /* Both allocation sizes must be large to avoid a move. */
261 assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
262 LARGE_MINCLASS);
263
264 /* Try to avoid moving the allocation. */
265 if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero))
266 return (extent_addr_get(extent));
267
268 /*
269 * usize and old size are different enough that we need to use a
270 * different size class. In that case, fall back to allocating new
271 * space and copying.
272 */
273 ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
274 if (ret == NULL)
275 return (NULL);
276
277 copysize = (usize < extent_usize_get(extent)) ? usize :
278 extent_usize_get(extent);
279 memcpy(ret, extent_addr_get(extent), copysize);
280 isdalloct(tsdn, extent, extent_addr_get(extent),
281 extent_usize_get(extent), tcache, true);
282 return (ret);
283 }
284
285 /*
286 * junked_locked indicates whether the extent's data have been junk-filled, and
287 * whether the arena's lock is currently held. The arena's large_mtx is
288 * independent of these considerations.
289 */
290 static void
large_dalloc_impl(tsdn_t * tsdn,extent_t * extent,bool junked_locked)291 large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
292 {
293 arena_t *arena;
294
295 arena = extent_arena_get(extent);
296 malloc_mutex_lock(tsdn, &arena->large_mtx);
297 ql_remove(&arena->large, extent, ql_link);
298 malloc_mutex_unlock(tsdn, &arena->large_mtx);
299 if (!junked_locked) {
300 large_dalloc_maybe_junk(extent_addr_get(extent),
301 extent_usize_get(extent));
302 }
303 arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
304
305 if (!junked_locked)
306 arena_decay_tick(tsdn, arena);
307 }
308
309 void
large_dalloc_junked_locked(tsdn_t * tsdn,extent_t * extent)310 large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
311 {
312 large_dalloc_impl(tsdn, extent, true);
313 }
314
315 void
large_dalloc(tsdn_t * tsdn,extent_t * extent)316 large_dalloc(tsdn_t *tsdn, extent_t *extent)
317 {
318 large_dalloc_impl(tsdn, extent, false);
319 }
320
321 size_t
large_salloc(tsdn_t * tsdn,const extent_t * extent)322 large_salloc(tsdn_t *tsdn, const extent_t *extent)
323 {
324 return (extent_usize_get(extent));
325 }
326
327 prof_tctx_t *
large_prof_tctx_get(tsdn_t * tsdn,const extent_t * extent)328 large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
329 {
330 return (extent_prof_tctx_get(extent));
331 }
332
333 void
large_prof_tctx_set(tsdn_t * tsdn,extent_t * extent,prof_tctx_t * tctx)334 large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
335 {
336 extent_prof_tctx_set(extent, tctx);
337 }
338
339 void
large_prof_tctx_reset(tsdn_t * tsdn,extent_t * extent)340 large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
341 {
342 large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
343 }
344