1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2022, Linaro Limited.
5 */
6
7 #define PROTOTYPES
8
9 /*
10 * BGET CONFIGURATION
11 * ==================
12 */
13 /* #define BGET_ENABLE_ALL_OPTIONS */
14 #ifdef BGET_ENABLE_OPTION
15 #define TestProg 20000 /* Generate built-in test program
16 if defined. The value specifies
17 how many buffer allocation attempts
18 the test program should make. */
19 #endif
20
21
22 #ifdef __LP64__
23 #define SizeQuant 16
24 #endif
25 #ifdef __ILP32__
26 #define SizeQuant 8
27 #endif
28 /* Buffer allocation size quantum:
29 all buffers allocated are a
30 multiple of this size. This
31 MUST be a power of two. */
32
33 #ifdef BGET_ENABLE_OPTION
34 #define BufDump 1 /* Define this symbol to enable the
35 bpoold() function which dumps the
36 buffers in a buffer pool. */
37
38 #define BufValid 1 /* Define this symbol to enable the
39 bpoolv() function for validating
40 a buffer pool. */
41
42 #define DumpData 1 /* Define this symbol to enable the
43 bufdump() function which allows
44 dumping the contents of an allocated
45 or free buffer. */
46
47 #define BufStats 1 /* Define this symbol to enable the
48 bstats() function which calculates
49 the total free space in the buffer
50 pool, the largest available
51 buffer, and the total space
52 currently allocated. */
53
54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
55 pattern of garbage to trip up
56 miscreants who attempt to use
57 pointers into released buffers. */
58
59 #define BestFit 1 /* Use a best fit algorithm when
60 searching for space for an
61 allocation request. This uses
62 memory more efficiently, but
63 allocation will be much slower. */
64
65 #define BECtl 1 /* Define this symbol to enable the
66 bectl() function for automatic
67 pool space control. */
68 #endif
69
70 #ifdef MEM_DEBUG
71 #undef NDEBUG
72 #define DumpData 1
73 #define BufValid 1
74 #define FreeWipe 1
75 #endif
76
77 #ifdef CFG_WITH_STATS
78 #define BufStats 1
79 #endif
80
81 #include <compiler.h>
82 #include <config.h>
83 #include <malloc.h>
84 #include <memtag.h>
85 #include <stdbool.h>
86 #include <stdint.h>
87 #include <stdlib_ext.h>
88 #include <stdlib.h>
89 #include <string.h>
90 #include <trace.h>
91 #include <util.h>
92
93 #if defined(__KERNEL__)
94 /* Compiling for TEE Core */
95 #include <kernel/asan.h>
96 #include <kernel/spinlock.h>
97 #include <kernel/unwind.h>
98
memset_unchecked(void * s,int c,size_t n)99 static void *memset_unchecked(void *s, int c, size_t n)
100 {
101 return asan_memset_unchecked(s, c, n);
102 }
103
memcpy_unchecked(void * dst,const void * src,size_t n)104 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
105 size_t n)
106 {
107 return asan_memcpy_unchecked(dst, src, n);
108 }
109
110 #else /*__KERNEL__*/
111 /* Compiling for TA */
112
memset_unchecked(void * s,int c,size_t n)113 static void *memset_unchecked(void *s, int c, size_t n)
114 {
115 return memset(s, c, n);
116 }
117
memcpy_unchecked(void * dst,const void * src,size_t n)118 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
119 size_t n)
120 {
121 return memcpy(dst, src, n);
122 }
123
124 #endif /*__KERNEL__*/
125
126 #include "bget.c" /* this is ugly, but this is bget */
127
128 struct malloc_pool {
129 void *buf;
130 size_t len;
131 };
132
133 struct malloc_ctx {
134 struct bpoolset poolset;
135 struct malloc_pool *pool;
136 size_t pool_len;
137 #ifdef BufStats
138 struct malloc_stats mstats;
139 #endif
140 #ifdef __KERNEL__
141 unsigned int spinlock;
142 #endif
143 };
144
145 #ifdef __KERNEL__
146
malloc_lock(struct malloc_ctx * ctx)147 static uint32_t malloc_lock(struct malloc_ctx *ctx)
148 {
149 return cpu_spin_lock_xsave(&ctx->spinlock);
150 }
151
malloc_unlock(struct malloc_ctx * ctx,uint32_t exceptions)152 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
153 {
154 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
155 }
156
157 #else /* __KERNEL__ */
158
malloc_lock(struct malloc_ctx * ctx __unused)159 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
160 {
161 return 0;
162 }
163
malloc_unlock(struct malloc_ctx * ctx __unused,uint32_t exceptions __unused)164 static void malloc_unlock(struct malloc_ctx *ctx __unused,
165 uint32_t exceptions __unused)
166 {
167 }
168
169 #endif /* __KERNEL__ */
170
171 #define DEFINE_CTX(name) struct malloc_ctx name = \
172 { .poolset = { .freelist = { {0, 0}, \
173 {&name.poolset.freelist, \
174 &name.poolset.freelist}}}}
175
176 static DEFINE_CTX(malloc_ctx);
177
178 #ifdef CFG_VIRTUALIZATION
179 static __nex_data DEFINE_CTX(nex_malloc_ctx);
180 #endif
181
print_oom(size_t req_size __maybe_unused,void * ctx __maybe_unused)182 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
183 {
184 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
185 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
186 print_kernel_stack();
187 #endif
188 }
189
190 /* Most of the stuff in this function is copied from bgetr() in bget.c */
bget_buf_size(void * buf)191 static __maybe_unused bufsize bget_buf_size(void *buf)
192 {
193 bufsize osize; /* Old size of buffer */
194 struct bhead *b;
195
196 b = BH(((char *)buf) - sizeof(struct bhead));
197 osize = -b->bsize;
198 #ifdef BECtl
199 if (osize == 0) {
200 /* Buffer acquired directly through acqfcn. */
201 struct bdhead *bd;
202
203 bd = BDH(((char *)buf) - sizeof(struct bdhead));
204 osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
205 } else
206 #endif
207 osize -= sizeof(struct bhead);
208 assert(osize > 0);
209 return osize;
210 }
211
maybe_tag_buf(uint8_t * buf,size_t hdr_size,size_t requested_size)212 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size)
213 {
214 if (!buf)
215 return NULL;
216
217 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant);
218
219 if (MEMTAG_IS_ENABLED) {
220 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE);
221
222 /*
223 * Allocated buffer can be larger than requested when
224 * allocating with memalign(), but we should never tag more
225 * than allocated.
226 */
227 assert(bget_buf_size(buf) >= sz + hdr_size);
228 return memtag_set_random_tags(buf, sz + hdr_size);
229 }
230
231 #if defined(__KERNEL__)
232 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
233 asan_tag_access(buf, buf + hdr_size + requested_size);
234 #endif
235 return buf;
236 }
237
maybe_untag_buf(void * buf)238 static void *maybe_untag_buf(void *buf)
239 {
240 if (!buf)
241 return NULL;
242
243 if (MEMTAG_IS_ENABLED) {
244 size_t sz = 0;
245
246 memtag_assert_tag(buf); /* Trying to catch double free early */
247 sz = bget_buf_size(memtag_strip_tag(buf));
248 return memtag_set_tags(buf, sz, 0);
249 }
250
251 #if defined(__KERNEL__)
252 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
253 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf));
254 #endif
255 return buf;
256 }
257
strip_tag(void * buf)258 static void *strip_tag(void *buf)
259 {
260 if (MEMTAG_IS_ENABLED)
261 return memtag_strip_tag(buf);
262 return buf;
263 }
264
tag_asan_free(void * buf __maybe_unused,size_t len __maybe_unused)265 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused)
266 {
267 #if defined(__KERNEL__)
268 asan_tag_heap_free(buf, (uint8_t *)buf + len);
269 #endif
270 }
271
272 #ifdef BufStats
273
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)274 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
275 size_t requested_size,
276 struct malloc_ctx *ctx)
277 {
278 if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
279 ctx->mstats.max_allocated = ctx->poolset.totalloc;
280
281 if (!p) {
282 ctx->mstats.num_alloc_fail++;
283 print_oom(requested_size, ctx);
284 if (requested_size > ctx->mstats.biggest_alloc_fail) {
285 ctx->mstats.biggest_alloc_fail = requested_size;
286 ctx->mstats.biggest_alloc_fail_used =
287 ctx->poolset.totalloc;
288 }
289 }
290
291 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size));
292 }
293
gen_malloc_reset_stats(struct malloc_ctx * ctx)294 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
295 {
296 uint32_t exceptions = malloc_lock(ctx);
297
298 ctx->mstats.max_allocated = 0;
299 ctx->mstats.num_alloc_fail = 0;
300 ctx->mstats.biggest_alloc_fail = 0;
301 ctx->mstats.biggest_alloc_fail_used = 0;
302 malloc_unlock(ctx, exceptions);
303 }
304
malloc_reset_stats(void)305 void malloc_reset_stats(void)
306 {
307 gen_malloc_reset_stats(&malloc_ctx);
308 }
309
gen_malloc_get_stats(struct malloc_ctx * ctx,struct malloc_stats * stats)310 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
311 struct malloc_stats *stats)
312 {
313 uint32_t exceptions = malloc_lock(ctx);
314
315 raw_malloc_get_stats(ctx, stats);
316 malloc_unlock(ctx, exceptions);
317 }
318
malloc_get_stats(struct malloc_stats * stats)319 void malloc_get_stats(struct malloc_stats *stats)
320 {
321 gen_malloc_get_stats(&malloc_ctx, stats);
322 }
323
324 #else /* BufStats */
325
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)326 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
327 size_t requested_size,
328 struct malloc_ctx *ctx )
329 {
330 if (!p)
331 print_oom(requested_size, ctx);
332
333 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size));
334 }
335
336 #endif /* BufStats */
337
338 #ifdef BufValid
raw_malloc_validate_pools(struct malloc_ctx * ctx)339 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
340 {
341 size_t n;
342
343 for (n = 0; n < ctx->pool_len; n++)
344 bpoolv(ctx->pool[n].buf);
345 }
346 #else
raw_malloc_validate_pools(struct malloc_ctx * ctx __unused)347 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
348 {
349 }
350 #endif
351
352 struct bpool_iterator {
353 struct bfhead *next_buf;
354 size_t pool_idx;
355 };
356
bpool_foreach_iterator_init(struct malloc_ctx * ctx,struct bpool_iterator * iterator)357 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
358 struct bpool_iterator *iterator)
359 {
360 iterator->pool_idx = 0;
361 iterator->next_buf = BFH(ctx->pool[0].buf);
362 }
363
bpool_foreach_pool(struct bpool_iterator * iterator,void ** buf,size_t * len,bool * isfree)364 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
365 size_t *len, bool *isfree)
366 {
367 struct bfhead *b = iterator->next_buf;
368 bufsize bs = b->bh.bsize;
369
370 if (bs == ESent)
371 return false;
372
373 if (bs < 0) {
374 /* Allocated buffer */
375 bs = -bs;
376
377 *isfree = false;
378 } else {
379 /* Free Buffer */
380 *isfree = true;
381
382 /* Assert that the free list links are intact */
383 assert(b->ql.blink->ql.flink == b);
384 assert(b->ql.flink->ql.blink == b);
385 }
386
387 *buf = (uint8_t *)b + sizeof(struct bhead);
388 *len = bs - sizeof(struct bhead);
389
390 iterator->next_buf = BFH((uint8_t *)b + bs);
391 return true;
392 }
393
bpool_foreach(struct malloc_ctx * ctx,struct bpool_iterator * iterator,void ** buf)394 static bool bpool_foreach(struct malloc_ctx *ctx,
395 struct bpool_iterator *iterator, void **buf)
396 {
397 while (true) {
398 size_t len;
399 bool isfree;
400
401 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
402 if (isfree)
403 continue;
404 return true;
405 }
406
407 if ((iterator->pool_idx + 1) >= ctx->pool_len)
408 return false;
409
410 iterator->pool_idx++;
411 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
412 }
413 }
414
415 /* Convenience macro for looping over all allocated buffers */
416 #define BPOOL_FOREACH(ctx, iterator, bp) \
417 for (bpool_foreach_iterator_init((ctx),(iterator)); \
418 bpool_foreach((ctx),(iterator), (bp));)
419
raw_memalign(size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_size,struct malloc_ctx * ctx)420 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
421 size_t pl_size, struct malloc_ctx *ctx)
422 {
423 void *ptr = NULL;
424 bufsize s;
425
426 if (!alignment || !IS_POWER_OF_TWO(alignment))
427 return NULL;
428
429 raw_malloc_validate_pools(ctx);
430
431 /* Compute total size, excluding the header */
432 if (ADD_OVERFLOW(pl_size, ftr_size, &s))
433 goto out;
434
435 /* BGET doesn't like 0 sized allocations */
436 if (!s)
437 s++;
438
439 ptr = bget(alignment, hdr_size, s, &ctx->poolset);
440 out:
441 return raw_malloc_return_hook(ptr, hdr_size, pl_size, ctx);
442 }
443
raw_malloc(size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)444 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
445 struct malloc_ctx *ctx)
446 {
447 /*
448 * Note that we're feeding SizeQ as alignment, this is the smallest
449 * alignment that bget() can use.
450 */
451 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx);
452 }
453
raw_free(void * ptr,struct malloc_ctx * ctx,bool wipe)454 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
455 {
456 raw_malloc_validate_pools(ctx);
457
458 if (ptr)
459 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe);
460 }
461
raw_calloc(size_t hdr_size,size_t ftr_size,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)462 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
463 size_t pl_size, struct malloc_ctx *ctx)
464 {
465 void *ptr = NULL;
466 bufsize s;
467
468 raw_malloc_validate_pools(ctx);
469
470 /* Compute total size, excluding hdr_size */
471 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
472 goto out;
473 if (ADD_OVERFLOW(s, ftr_size, &s))
474 goto out;
475
476 /* BGET doesn't like 0 sized allocations */
477 if (!s)
478 s++;
479
480 ptr = bgetz(0, hdr_size, s, &ctx->poolset);
481 out:
482 return raw_malloc_return_hook(ptr, hdr_size, pl_nmemb * pl_size, ctx);
483 }
484
raw_realloc(void * ptr,size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)485 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
486 size_t pl_size, struct malloc_ctx *ctx)
487 {
488 void *p = NULL;
489 bufsize s;
490
491 /* Compute total size */
492 if (ADD_OVERFLOW(pl_size, hdr_size, &s))
493 goto out;
494 if (ADD_OVERFLOW(s, ftr_size, &s))
495 goto out;
496
497 raw_malloc_validate_pools(ctx);
498
499 /* BGET doesn't like 0 sized allocations */
500 if (!s)
501 s++;
502
503 p = bget(0, 0, s, &ctx->poolset);
504
505 if (p && ptr) {
506 void *old_ptr = maybe_untag_buf(ptr);
507 bufsize old_sz = bget_buf_size(old_ptr);
508
509 if (old_sz < s) {
510 memcpy(p, old_ptr, old_sz);
511 #ifndef __KERNEL__
512 /* User space reallocations are always zeroed */
513 memset((uint8_t *)p + old_sz, 0, s - old_sz);
514 #endif
515 } else {
516 memcpy(p, old_ptr, s);
517 }
518
519 brel(old_ptr, &ctx->poolset, false /*!wipe*/);
520 }
521 out:
522 return raw_malloc_return_hook(p, hdr_size, pl_size, ctx);
523 }
524
525 #ifdef ENABLE_MDBG
526
527 struct mdbg_hdr {
528 const char *fname;
529 uint16_t line;
530 uint32_t pl_size;
531 uint32_t magic;
532 #if defined(ARM64)
533 uint64_t pad;
534 #endif
535 };
536
537 #define MDBG_HEADER_MAGIC 0xadadadad
538 #define MDBG_FOOTER_MAGIC 0xecececec
539
mdbg_get_ftr_size(size_t pl_size)540 static size_t mdbg_get_ftr_size(size_t pl_size)
541 {
542 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
543
544 return ftr_pad + sizeof(uint32_t);
545 }
546
mdbg_get_footer(struct mdbg_hdr * hdr)547 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
548 {
549 uint32_t *footer;
550
551 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
552 mdbg_get_ftr_size(hdr->pl_size));
553 footer--;
554 return strip_tag(footer);
555 }
556
mdbg_update_hdr(struct mdbg_hdr * hdr,const char * fname,int lineno,size_t pl_size)557 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
558 int lineno, size_t pl_size)
559 {
560 uint32_t *footer;
561
562 hdr->fname = fname;
563 hdr->line = lineno;
564 hdr->pl_size = pl_size;
565 hdr->magic = MDBG_HEADER_MAGIC;
566
567 footer = mdbg_get_footer(hdr);
568 *footer = MDBG_FOOTER_MAGIC;
569 }
570
gen_mdbg_malloc(struct malloc_ctx * ctx,const char * fname,int lineno,size_t size)571 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname,
572 int lineno, size_t size)
573 {
574 struct mdbg_hdr *hdr;
575 uint32_t exceptions = malloc_lock(ctx);
576
577 /*
578 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
579 */
580 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
581
582 hdr = raw_malloc(sizeof(struct mdbg_hdr),
583 mdbg_get_ftr_size(size), size, ctx);
584 if (hdr) {
585 mdbg_update_hdr(hdr, fname, lineno, size);
586 hdr++;
587 }
588
589 malloc_unlock(ctx, exceptions);
590 return hdr;
591 }
592
assert_header(struct mdbg_hdr * hdr __maybe_unused)593 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
594 {
595 assert(hdr->magic == MDBG_HEADER_MAGIC);
596 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
597 }
598
gen_mdbg_free(struct malloc_ctx * ctx,void * ptr,bool wipe)599 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe)
600 {
601 struct mdbg_hdr *hdr = ptr;
602
603 if (hdr) {
604 hdr--;
605 assert_header(hdr);
606 hdr->magic = 0;
607 *mdbg_get_footer(hdr) = 0;
608 raw_free(hdr, ctx, wipe);
609 }
610 }
611
free_helper(void * ptr,bool wipe)612 static void free_helper(void *ptr, bool wipe)
613 {
614 uint32_t exceptions = malloc_lock(&malloc_ctx);
615
616 gen_mdbg_free(&malloc_ctx, ptr, wipe);
617 malloc_unlock(&malloc_ctx, exceptions);
618 }
619
gen_mdbg_calloc(struct malloc_ctx * ctx,const char * fname,int lineno,size_t nmemb,size_t size)620 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno,
621 size_t nmemb, size_t size)
622 {
623 struct mdbg_hdr *hdr;
624 uint32_t exceptions = malloc_lock(ctx);
625
626 hdr = raw_calloc(sizeof(struct mdbg_hdr),
627 mdbg_get_ftr_size(nmemb * size), nmemb, size,
628 ctx);
629 if (hdr) {
630 mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
631 hdr++;
632 }
633 malloc_unlock(ctx, exceptions);
634 return hdr;
635 }
636
gen_mdbg_realloc_unlocked(struct malloc_ctx * ctx,const char * fname,int lineno,void * ptr,size_t size)637 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname,
638 int lineno, void *ptr, size_t size)
639 {
640 struct mdbg_hdr *hdr = ptr;
641
642 if (hdr) {
643 hdr--;
644 assert_header(hdr);
645 }
646 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
647 mdbg_get_ftr_size(size), size, ctx);
648 if (hdr) {
649 mdbg_update_hdr(hdr, fname, lineno, size);
650 hdr++;
651 }
652 return hdr;
653 }
654
gen_mdbg_realloc(struct malloc_ctx * ctx,const char * fname,int lineno,void * ptr,size_t size)655 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname,
656 int lineno, void *ptr, size_t size)
657 {
658 void *p;
659 uint32_t exceptions = malloc_lock(ctx);
660
661 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size);
662 malloc_unlock(ctx, exceptions);
663 return p;
664 }
665
666 #define realloc_unlocked(ctx, ptr, size) \
667 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size))
668
gen_mdbg_memalign(struct malloc_ctx * ctx,const char * fname,int lineno,size_t alignment,size_t size)669 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname,
670 int lineno, size_t alignment, size_t size)
671 {
672 struct mdbg_hdr *hdr;
673 uint32_t exceptions = malloc_lock(ctx);
674
675 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
676 alignment, size, ctx);
677 if (hdr) {
678 mdbg_update_hdr(hdr, fname, lineno, size);
679 hdr++;
680 }
681 malloc_unlock(ctx, exceptions);
682 return hdr;
683 }
684
685
get_payload_start_size(void * raw_buf,size_t * size)686 static void *get_payload_start_size(void *raw_buf, size_t *size)
687 {
688 struct mdbg_hdr *hdr = raw_buf;
689
690 assert(bget_buf_size(hdr) >= hdr->pl_size);
691 *size = hdr->pl_size;
692 return hdr + 1;
693 }
694
gen_mdbg_check(struct malloc_ctx * ctx,int bufdump)695 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
696 {
697 struct bpool_iterator itr;
698 void *b;
699 uint32_t exceptions = malloc_lock(ctx);
700
701 raw_malloc_validate_pools(ctx);
702
703 BPOOL_FOREACH(ctx, &itr, &b) {
704 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
705
706 assert_header(hdr);
707
708 if (bufdump > 0) {
709 const char *fname = hdr->fname;
710
711 if (!fname)
712 fname = "unknown";
713
714 IMSG("buffer: %d bytes %s:%d\n",
715 hdr->pl_size, fname, hdr->line);
716 }
717 }
718
719 malloc_unlock(ctx, exceptions);
720 }
721
mdbg_malloc(const char * fname,int lineno,size_t size)722 void *mdbg_malloc(const char *fname, int lineno, size_t size)
723 {
724 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size);
725 }
726
mdbg_calloc(const char * fname,int lineno,size_t nmemb,size_t size)727 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
728 {
729 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size);
730 }
731
mdbg_realloc(const char * fname,int lineno,void * ptr,size_t size)732 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
733 {
734 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size);
735 }
736
mdbg_memalign(const char * fname,int lineno,size_t alignment,size_t size)737 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
738 size_t size)
739 {
740 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size);
741 }
742
743 #if __STDC_VERSION__ >= 201112L
mdbg_aligned_alloc(const char * fname,int lineno,size_t alignment,size_t size)744 void *mdbg_aligned_alloc(const char *fname, int lineno, size_t alignment,
745 size_t size)
746 {
747 if (size % alignment)
748 return NULL;
749
750 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size);
751 }
752 #endif /* __STDC_VERSION__ */
753
mdbg_check(int bufdump)754 void mdbg_check(int bufdump)
755 {
756 gen_mdbg_check(&malloc_ctx, bufdump);
757 }
758
759 /*
760 * Since malloc debug is enabled, malloc() and friends are redirected by macros
761 * to mdbg_malloc() etc.
762 * We still want to export the standard entry points in case they are referenced
763 * by the application, either directly or via external libraries.
764 */
765 #undef malloc
malloc(size_t size)766 void *malloc(size_t size)
767 {
768 return mdbg_malloc(__FILE__, __LINE__, size);
769 }
770
771 #undef calloc
calloc(size_t nmemb,size_t size)772 void *calloc(size_t nmemb, size_t size)
773 {
774 return mdbg_calloc(__FILE__, __LINE__, nmemb, size);
775 }
776
777 #undef realloc
realloc(void * ptr,size_t size)778 void *realloc(void *ptr, size_t size)
779 {
780 return mdbg_realloc(__FILE__, __LINE__, ptr, size);
781 }
782
783 #else /* ENABLE_MDBG */
784
malloc(size_t size)785 void *malloc(size_t size)
786 {
787 void *p;
788 uint32_t exceptions = malloc_lock(&malloc_ctx);
789
790 p = raw_malloc(0, 0, size, &malloc_ctx);
791 malloc_unlock(&malloc_ctx, exceptions);
792 return p;
793 }
794
free_helper(void * ptr,bool wipe)795 static void free_helper(void *ptr, bool wipe)
796 {
797 uint32_t exceptions = malloc_lock(&malloc_ctx);
798
799 raw_free(ptr, &malloc_ctx, wipe);
800 malloc_unlock(&malloc_ctx, exceptions);
801 }
802
calloc(size_t nmemb,size_t size)803 void *calloc(size_t nmemb, size_t size)
804 {
805 void *p;
806 uint32_t exceptions = malloc_lock(&malloc_ctx);
807
808 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx);
809 malloc_unlock(&malloc_ctx, exceptions);
810 return p;
811 }
812
realloc_unlocked(struct malloc_ctx * ctx,void * ptr,size_t size)813 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr,
814 size_t size)
815 {
816 return raw_realloc(ptr, 0, 0, size, ctx);
817 }
818
realloc(void * ptr,size_t size)819 void *realloc(void *ptr, size_t size)
820 {
821 void *p;
822 uint32_t exceptions = malloc_lock(&malloc_ctx);
823
824 p = realloc_unlocked(&malloc_ctx, ptr, size);
825 malloc_unlock(&malloc_ctx, exceptions);
826 return p;
827 }
828
memalign(size_t alignment,size_t size)829 void *memalign(size_t alignment, size_t size)
830 {
831 void *p;
832 uint32_t exceptions = malloc_lock(&malloc_ctx);
833
834 p = raw_memalign(0, 0, alignment, size, &malloc_ctx);
835 malloc_unlock(&malloc_ctx, exceptions);
836 return p;
837 }
838
839 #if __STDC_VERSION__ >= 201112L
aligned_alloc(size_t alignment,size_t size)840 void *aligned_alloc(size_t alignment, size_t size)
841 {
842 if (size % alignment)
843 return NULL;
844
845 return memalign(alignment, size);
846 }
847 #endif /* __STDC_VERSION__ */
848
get_payload_start_size(void * ptr,size_t * size)849 static void *get_payload_start_size(void *ptr, size_t *size)
850 {
851 *size = bget_buf_size(ptr);
852 return ptr;
853 }
854
855 #endif
856
free(void * ptr)857 void free(void *ptr)
858 {
859 free_helper(ptr, false);
860 }
861
free_wipe(void * ptr)862 void free_wipe(void *ptr)
863 {
864 free_helper(ptr, true);
865 }
866
gen_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)867 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
868 {
869 uint32_t exceptions = malloc_lock(ctx);
870
871 raw_malloc_add_pool(ctx, buf, len);
872 malloc_unlock(ctx, exceptions);
873 }
874
gen_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)875 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
876 void *buf, size_t len)
877 {
878 uint32_t exceptions = malloc_lock(ctx);
879 bool ret = false;
880
881 ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len);
882 malloc_unlock(ctx, exceptions);
883
884 return ret;
885 }
886
gen_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)887 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
888 void *buf, size_t len)
889 {
890 bool ret = false;
891 uint32_t exceptions = malloc_lock(ctx);
892
893 ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len);
894 malloc_unlock(ctx, exceptions);
895 return ret;
896 }
897
raw_malloc_get_ctx_size(void)898 size_t raw_malloc_get_ctx_size(void)
899 {
900 return sizeof(struct malloc_ctx);
901 }
902
raw_malloc_init_ctx(struct malloc_ctx * ctx)903 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
904 {
905 memset(ctx, 0, sizeof(*ctx));
906 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
907 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
908 }
909
raw_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)910 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
911 {
912 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
913 uintptr_t start = (uintptr_t)buf;
914 uintptr_t end = start + len;
915 void *p = NULL;
916 size_t l = 0;
917
918 start = ROUNDUP(start, SizeQuant);
919 end = ROUNDDOWN(end, SizeQuant);
920
921 if (start > end || (end - start) < min_len) {
922 DMSG("Skipping too small pool");
923 return;
924 }
925
926 /* First pool requires a bigger size */
927 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
928 DMSG("Skipping too small initial pool");
929 return;
930 }
931
932 tag_asan_free((void *)start, end - start);
933 bpool((void *)start, end - start, &ctx->poolset);
934 l = ctx->pool_len + 1;
935 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
936 assert(p);
937 ctx->pool = p;
938 ctx->pool[ctx->pool_len].buf = (void *)start;
939 ctx->pool[ctx->pool_len].len = end - start;
940 #ifdef BufStats
941 ctx->mstats.size += ctx->pool[ctx->pool_len].len;
942 #endif
943 ctx->pool_len = l;
944 }
945
raw_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)946 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
947 void *buf, size_t len)
948 {
949 uintptr_t buf_start = (uintptr_t)strip_tag(buf);
950 uintptr_t buf_end = buf_start + len;
951 size_t n = 0;
952
953 raw_malloc_validate_pools(ctx);
954
955 for (n = 0; n < ctx->pool_len; n++) {
956 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf;
957 uintptr_t pool_end = pool_start + ctx->pool[n].len;
958
959 if (buf_start > buf_end || pool_start > pool_end)
960 return true; /* Wrapping buffers, shouldn't happen */
961
962 if ((buf_start >= pool_start && buf_start < pool_end) ||
963 (buf_end >= pool_start && buf_end < pool_end))
964 return true;
965 }
966
967 return false;
968 }
969
raw_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)970 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
971 void *buf, size_t len)
972 {
973 struct bpool_iterator itr = { };
974 void *b = NULL;
975 uint8_t *start_buf = strip_tag(buf);
976 uint8_t *end_buf = start_buf + len;
977
978 raw_malloc_validate_pools(ctx);
979
980 /* Check for wrapping */
981 if (start_buf > end_buf)
982 return false;
983
984 BPOOL_FOREACH(ctx, &itr, &b) {
985 uint8_t *start_b = NULL;
986 uint8_t *end_b = NULL;
987 size_t s = 0;
988
989 start_b = get_payload_start_size(b, &s);
990 end_b = start_b + s;
991 if (start_buf >= start_b && end_buf <= end_b)
992 return true;
993 }
994
995 return false;
996 }
997
998 #ifdef CFG_WITH_STATS
raw_malloc_get_stats(struct malloc_ctx * ctx,struct malloc_stats * stats)999 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats)
1000 {
1001 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
1002 stats->allocated = ctx->poolset.totalloc;
1003 }
1004 #endif
1005
malloc_add_pool(void * buf,size_t len)1006 void malloc_add_pool(void *buf, size_t len)
1007 {
1008 gen_malloc_add_pool(&malloc_ctx, buf, len);
1009 }
1010
malloc_buffer_is_within_alloced(void * buf,size_t len)1011 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
1012 {
1013 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
1014 }
1015
malloc_buffer_overlaps_heap(void * buf,size_t len)1016 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
1017 {
1018 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
1019 }
1020
1021 #ifdef CFG_VIRTUALIZATION
1022
1023 #ifndef ENABLE_MDBG
1024
nex_malloc(size_t size)1025 void *nex_malloc(size_t size)
1026 {
1027 void *p;
1028 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1029
1030 p = raw_malloc(0, 0, size, &nex_malloc_ctx);
1031 malloc_unlock(&nex_malloc_ctx, exceptions);
1032 return p;
1033 }
1034
nex_calloc(size_t nmemb,size_t size)1035 void *nex_calloc(size_t nmemb, size_t size)
1036 {
1037 void *p;
1038 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1039
1040 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx);
1041 malloc_unlock(&nex_malloc_ctx, exceptions);
1042 return p;
1043 }
1044
nex_realloc(void * ptr,size_t size)1045 void *nex_realloc(void *ptr, size_t size)
1046 {
1047 void *p;
1048 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1049
1050 p = realloc_unlocked(&nex_malloc_ctx, ptr, size);
1051 malloc_unlock(&nex_malloc_ctx, exceptions);
1052 return p;
1053 }
1054
nex_memalign(size_t alignment,size_t size)1055 void *nex_memalign(size_t alignment, size_t size)
1056 {
1057 void *p;
1058 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1059
1060 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx);
1061 malloc_unlock(&nex_malloc_ctx, exceptions);
1062 return p;
1063 }
1064
nex_free(void * ptr)1065 void nex_free(void *ptr)
1066 {
1067 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1068
1069 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */);
1070 malloc_unlock(&nex_malloc_ctx, exceptions);
1071 }
1072
1073 #else /* ENABLE_MDBG */
1074
nex_mdbg_malloc(const char * fname,int lineno,size_t size)1075 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size)
1076 {
1077 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size);
1078 }
1079
nex_mdbg_calloc(const char * fname,int lineno,size_t nmemb,size_t size)1080 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
1081 {
1082 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size);
1083 }
1084
nex_mdbg_realloc(const char * fname,int lineno,void * ptr,size_t size)1085 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
1086 {
1087 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size);
1088 }
1089
nex_mdbg_memalign(const char * fname,int lineno,size_t alignment,size_t size)1090 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment,
1091 size_t size)
1092 {
1093 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size);
1094 }
1095
nex_mdbg_check(int bufdump)1096 void nex_mdbg_check(int bufdump)
1097 {
1098 gen_mdbg_check(&nex_malloc_ctx, bufdump);
1099 }
1100
nex_free(void * ptr)1101 void nex_free(void *ptr)
1102 {
1103 uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1104
1105 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */);
1106 malloc_unlock(&nex_malloc_ctx, exceptions);
1107 }
1108
1109 #endif /* ENABLE_MDBG */
1110
nex_malloc_add_pool(void * buf,size_t len)1111 void nex_malloc_add_pool(void *buf, size_t len)
1112 {
1113 gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
1114 }
1115
nex_malloc_buffer_is_within_alloced(void * buf,size_t len)1116 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
1117 {
1118 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
1119 }
1120
nex_malloc_buffer_overlaps_heap(void * buf,size_t len)1121 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
1122 {
1123 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
1124 }
1125
1126 #ifdef BufStats
1127
nex_malloc_reset_stats(void)1128 void nex_malloc_reset_stats(void)
1129 {
1130 gen_malloc_reset_stats(&nex_malloc_ctx);
1131 }
1132
nex_malloc_get_stats(struct malloc_stats * stats)1133 void nex_malloc_get_stats(struct malloc_stats *stats)
1134 {
1135 gen_malloc_get_stats(&nex_malloc_ctx, stats);
1136 }
1137
1138 #endif
1139
1140 #endif
1141