1 /****************************************************************************
2 *
3 * Copyright (C) 2007-2009, 2013-2014 Gregory Nutt. All rights reserved.
4 * Author: Gregory Nutt <gnutt@nuttx.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * 3. Neither the name NuttX nor the names of its contributors may be
17 * used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 ****************************************************************************/
34
35 #ifndef __INCLUDE_NUTTX_MM_MM_H
36 #define __INCLUDE_NUTTX_MM_MM_H
37
38 /****************************************************************************
39 * Included Files
40 ****************************************************************************/
41 #include <csi_config.h>
42
43 #include <sys/types.h>
44 #include <stdbool.h>
45 #include <stdlib.h>
46 #include <stdint.h>
47
48 /****************************************************************************
49 * Pre-processor Definitions
50 ****************************************************************************/
51 #ifndef CONFIG_MM_MAX_USED
52 #define CONFIG_MM_MAX_USED 1
53 #endif
54
55 #define true 1
56 #define false 0
57 #define OK 0
58 /* Configuration ************************************************************/
59 /* If the MCU has a small (16-bit) address capability, then we will use
60 * a smaller chunk header that contains 16-bit size/offset information.
61 * We will also use the smaller header on MCUs with wider addresses if
62 * CONFIG_MM_SMALL is selected. This configuration is common with MCUs
63 * that have a large FLASH space, but only a tiny internal SRAM.
64 */
65
66 #ifdef CONFIG_SMALL_MEMORY
67 /* If the MCU has a small addressing capability, then for the smaller
68 * chunk header.
69 */
70
71 # undef CONFIG_MM_SMALL
72 # define CONFIG_MM_SMALL 1
73 #endif
74
75 /* Terminology:
76 *
77 * - Flat Build: In the flat build (CONFIG_BUILD_FLAT=y), there is only a
78 * single heap access with the standard allocations (malloc/free). This
79 * heap is referred to as the user heap. The kernel logic must
80 * initialize this single heap at boot time.
81 * - Protected build: In the protected build (CONFIG_BUILD_PROTECTED=y)
82 * where an MPU is used to protect a region of otherwise flat memory,
83 * there will be two allocators: One that allocates protected (kernel)
84 * memory and one that allocates unprotected (user) memory. These are
85 * referred to as the kernel and user heaps, respectively. Both must be
86 * initialized by the kernel logic at boot time.
87 * - Kernel Build: If the architecture has an MMU, then it may support the
88 * kernel build (CONFIG_BUILD_KERNEL=y). In this configuration, there
89 * is one kernel heap but multiple user heaps: One per task group.
90 * However, in this case, the kernel need only be concerned about
91 * initializing the single kernel heap here. User heaps will be created
92 * as tasks are created.
93 *
94 * These special definitions are provided:
95 *
96 * MM_KERNEL_USRHEAP_INIT
97 * Special kernel interfaces to the kernel user-heap are required
98 * for heap initialization.
99 * CONFIG_MM_KERNEL_HEAP
100 * The configuration requires a kernel heap that must initialized
101 * at boot-up.
102 */
103
104 #undef MM_KERNEL_USRHEAP_INIT
105 #if defined(CONFIG_BUILD_PROTECTED) && defined(__KERNEL__)
106 # define MM_KERNEL_USRHEAP_INIT 1
107 #elif !defined(CONFIG_BUILD_KERNEL)
108 # define MM_KERNEL_USRHEAP_INIT 1
109 #endif
110
111 /* The kernel heap is never accessible from user code */
112
113 #ifndef __KERNEL__
114 # undef CONFIG_MM_KERNEL_HEAP
115 #endif
116
117 /* Chunk Header Definitions *************************************************/
118 /* These definitions define the characteristics of allocator
119 *
120 * MM_MIN_SHIFT is used to define MM_MIN_CHUNK.
121 * MM_MIN_CHUNK - is the smallest physical chunk that can be allocated. It
122 * must be at least a large as sizeof(struct mm_freenode_s). Larger values
123 * may improve performance slightly, but will waste memory due to
124 * quantization losses.
125 *
126 * MM_MAX_SHIFT is used to define MM_MAX_CHUNK
127 * MM_MAX_CHUNK is the largest, contiguous chunk of memory that can be
128 * allocated. It can range from 16-bytes to 4Gb. Larger values of
129 * MM_MAX_SHIFT can cause larger data structure sizes and, perhaps,
130 * minor performance losses.
131 */
132
133 #if defined(CONFIG_MM_SMALL) && UINTPTR_MAX <= UINT32_MAX
134 /* Two byte offsets; Pointers may be 2 or 4 bytes;
135 * sizeof(struct mm_freenode_s) is 8 or 12 bytes.
136 * REVISIT: We could do better on machines with 16-bit addressing.
137 */
138
139 # define MM_MIN_SHIFT 4 /* 16 bytes */
140 # define MM_MAX_SHIFT 15 /* 32 Kb */
141
142 #elif defined(CONFIG_HAVE_LONG_LONG)
143 /* Four byte offsets; Pointers may be 4 or 8 bytes
144 * sizeof(struct mm_freenode_s) is 16 or 24 bytes.
145 */
146
147 # if UINTPTR_MAX <= UINT32_MAX
148 # define MM_MIN_SHIFT 4 /* 16 bytes */
149 # elif UINTPTR_MAX <= UINT64_MAX
150 # define MM_MIN_SHIFT 5 /* 32 bytes */
151 # endif
152 # define MM_MAX_SHIFT 22 /* 4 Mb */
153
154 #else
155 /* Four byte offsets; Pointers must be 4 bytes.
156 * sizeof(struct mm_freenode_s) is 16 bytes.
157 */
158
159 # define MM_MIN_SHIFT 4 /* 16 bytes */
160 # define MM_MAX_SHIFT 22 /* 4 Mb */
161 #endif
162
163 /* All other definitions derive from these two */
164
165 #define MM_MIN_CHUNK (1 << MM_MIN_SHIFT)
166 #define MM_MAX_CHUNK (1 << MM_MAX_SHIFT)
167 #define MM_NNODES (MM_MAX_SHIFT - MM_MIN_SHIFT + 1)
168
169 #define MM_GRAN_MASK (MM_MIN_CHUNK-1)
170 #define MM_ALIGN_UP(a) (((a) + MM_GRAN_MASK) & ~MM_GRAN_MASK)
171 #define MM_ALIGN_DOWN(a) ((a) & ~MM_GRAN_MASK)
172
173 /* An allocated chunk is distinguished from a free chunk by bit 31 (or 15)
174 * of the 'preceding' chunk size. If set, then this is an allocated chunk.
175 */
176
177 #ifdef CONFIG_MM_SMALL
178 # define MM_ALLOC_BIT 0x8000
179 #else
180 # define MM_ALLOC_BIT 0x80000000
181 #endif
182 #define MM_IS_ALLOCATED(n) \
183 ((int)((struct mm_allocnode_s*)(n)->preceding) < 0))
184
185 /****************************************************************************
186 * Public Types
187 ****************************************************************************/
188
189 struct mallinfo
190 {
191 int arena; /* This is the total size of memory allocated
192 * for use by malloc in bytes. */
193 int ordblks; /* This is the number of free (not in use) chunks */
194 int mxordblk; /* Size of the largest free (not in use) chunk */
195 int uordblks; /* This is the total size of memory occupied by
196 * chunks handed out by malloc. */
197 int fordblks; /* This is the total size of memory occupied
198 * by free (not in use) chunks.*/
199 };
200
201 /* Determines the size of the chunk size/offset type */
202
203 #ifdef CONFIG_MM_SMALL
204 typedef uint16_t mmsize_t;
205 # define MMSIZE_MAX 0xffff
206 #else
207 typedef size_t mmsize_t;
208 # define MMSIZE_MAX SIZE_MAX
209 #endif
210
211 /* This describes an allocated chunk. An allocated chunk is
212 * distinguished from a free chunk by bit 15/31 of the 'preceding' chunk
213 * size. If set, then this is an allocated chunk.
214 */
215
216 struct mm_allocnode_s
217 {
218 mmsize_t size; /* Size of this chunk */
219 mmsize_t preceding; /* Size of the preceding chunk */
220 };
221
222 /* What is the size of the allocnode? */
223
224 #ifdef CONFIG_MM_SMALL
225 # define SIZEOF_MM_ALLOCNODE 4
226 #else
227 # define SIZEOF_MM_ALLOCNODE 8
228 #endif
229
230 #define CHECK_ALLOCNODE_SIZE \
231 DEBUGASSERT(sizeof(struct mm_allocnode_s) == SIZEOF_MM_ALLOCNODE)
232
233 /* This describes a free chunk */
234
235 struct mm_freenode_s
236 {
237 mmsize_t size; /* Size of this chunk */
238 mmsize_t preceding; /* Size of the preceding chunk */
239 struct mm_freenode_s *flink; /* Supports a doubly linked list */
240 struct mm_freenode_s *blink;
241 };
242
243 /* What is the size of the freenode? */
244
245 #define MM_PTR_SIZE sizeof(struct mm_freenode_s *)
246 #define SIZEOF_MM_FREENODE (SIZEOF_MM_ALLOCNODE + 2*MM_PTR_SIZE)
247
248 #define CHECK_FREENODE_SIZE \
249 DEBUGASSERT(sizeof(struct mm_freenode_s) == SIZEOF_MM_FREENODE)
250
251 #ifndef CONFIG_MM_REGIONS
252 #define CONFIG_MM_REGIONS 1
253 #endif
254
255 /* This describes one heap (possibly with multiple regions) */
256
257 typedef void* sem_t;
258 struct mm_heap_s
259 {
260 /* Mutually exclusive access to this data set is enforced with
261 * the following un-named semaphore.
262 */
263
264 sem_t mm_semaphore;
265 uint16_t mm_holder;
266 int mm_counts_held;
267
268 /* This is the size of the heap provided to mm */
269
270 size_t mm_heapsize;
271
272 /* This is the first and last nodes of the heap */
273
274 struct mm_allocnode_s *mm_heapstart[CONFIG_MM_REGIONS];
275 struct mm_allocnode_s *mm_heapend[CONFIG_MM_REGIONS];
276
277 #if CONFIG_MM_REGIONS > 1
278 int mm_nregions;
279 #endif
280
281 /* All free nodes are maintained in a doubly linked list. This
282 * array provides some hooks into the list at various points to
283 * speed searches for free nodes.
284 */
285
286 struct mm_freenode_s mm_nodelist[MM_NNODES];
287 };
288
289 /****************************************************************************
290 * Public Data
291 ****************************************************************************/
292
293 #undef EXTERN
294 #if defined(__cplusplus)
295 #define EXTERN extern "C"
296 extern "C"
297 {
298 #else
299 #define EXTERN extern
300 #endif
301
302 /* User heap structure:
303 *
304 * - Flat build: In the FLAT build, the user heap structure is a globally
305 * accessible variable.
306 * - Protected build: The user heap structure is directly available only
307 * in user space.
308 * - Kernel build: There are multiple heaps, one per process. The heap
309 * structure is associated with the address environment and there is
310 * no global user heap structure.
311 */
312
313 #if defined(CONFIG_ARCH_ADDRENV) && defined(CONFIG_BUILD_KERNEL)
314 /* In the kernel build, there a multiple user heaps; one for each task
315 * group. In this build configuration, the user heap structure lies
316 * in a reserved region at the beginning of the .bss/.data address
317 * space (CONFIG_ARCH_DATA_VBASE). The size of that region is given by
318 * ARCH_DATA_RESERVE_SIZE
319 */
320
321 #elif defined(CONFIG_BUILD_PROTECTED) && defined(__KERNEL__)
322 /* In the protected mode, there are two heaps: A kernel heap and a single
323 * user heap. In that case the user heap structure lies in the user space
324 * (with a reference in the userspace interface).
325 */
326
327 #else
328 /* Otherwise, the user heap data structures are in common .bss */
329
330 EXTERN struct mm_heap_s g_mmheap;
331 #endif
332
333 #ifdef CONFIG_MM_KERNEL_HEAP
334 /* This is the kernel heap */
335
336 EXTERN struct mm_heap_s g_kmmheap;
337 #endif
338
339 /****************************************************************************
340 * Public Function Prototypes
341 ****************************************************************************/
342
343 /* Functions contained in mm_initialize.c ***********************************/
344
345 void mm_initialize(struct mm_heap_s *heap, void *heap_start,
346 size_t heap_size);
347 void mm_addregion(struct mm_heap_s *heap, void *heapstart,
348 size_t heapsize);
349
350 /* Functions contained in umm_initialize.c **********************************/
351
352 void umm_initialize(void *heap_start, size_t heap_size);
353
354 /* Functions contained in kmm_initialize.c **********************************/
355
356 #ifdef CONFIG_MM_KERNEL_HEAP
357 void kmm_initialize(void *heap_start, size_t heap_size);
358 #endif
359
360 /* Functions contained in umm_addregion.c ***********************************/
361
362 void umm_addregion(void *heapstart, size_t heapsize);
363
364 /* Functions contained in kmm_addregion.c ***********************************/
365
366 #ifdef CONFIG_MM_KERNEL_HEAP
367 void kmm_addregion(void *heapstart, size_t heapsize);
368 #endif
369
370 /* Functions contained in mm_sem.c ******************************************/
371
372 #define mm_seminitialize(heap)
373 #define mm_takesemaphore(heap)
374 #define mm_trysemaphore(heap)
375 #define mm_givesemaphore(heap)
376
377 /* Functions contained in umm_sem.c ****************************************/
378
379 int umm_trysemaphore(void);
380 void umm_givesemaphore(void);
381
382 /* Functions contained in kmm_sem.c ****************************************/
383
384 #ifdef CONFIG_MM_KERNEL_HEAP
385 int kmm_trysemaphore(void);
386 void kmm_givesemaphore(void);
387 #endif
388
389 /* Functions contained in mm_malloc.c ***************************************/
390
391 #include "mm_queue.h"
392
393 struct m_dbg_hdr {
394 dq_entry_t node;
395 void *caller;
396 uint32_t size:23;
397 uint32_t referenced:1;
398 uint32_t pid:8;
399 #define MAGIC_INUSE 0x65657575
400 #define MAGIC_FREE 0x3f3f3f3f
401 #define MAGIC_END 0xe5e5e5e5
402 uint32_t magic;
403 };
404
405 #define MDBG_SZ_HEAD sizeof(struct m_dbg_hdr)
406 #define MDBG_SZ_TAIL 16
407
mdbg_calc_magic(struct m_dbg_hdr * hdr)408 static inline bool mdbg_calc_magic(struct m_dbg_hdr *hdr)
409 {
410 uint64_t magic = (uint64_t)hdr->caller;
411 magic ^= hdr->size;
412 magic ^= hdr->pid;
413 magic ^= MAGIC_INUSE;
414 return magic;
415 }
416
mdbg_check_magic_hdr(struct m_dbg_hdr * hdr)417 static inline bool mdbg_check_magic_hdr(struct m_dbg_hdr *hdr)
418 {
419 return mdbg_calc_magic(hdr) == hdr->magic;
420 }
421
mdbg_check_magic_end(struct m_dbg_hdr * hdr)422 static inline bool mdbg_check_magic_end(struct m_dbg_hdr *hdr)
423 {
424 void *p = hdr + 1;
425 uint64_t *m = (uint64_t *)((uint64_t)p + hdr->size);
426 uint32_t magic = MAGIC_END ^ hdr->magic;
427 int i;
428
429 for (i=0;i<MDBG_SZ_TAIL/4;i++) {
430 if (m[i] != magic)
431 return false;
432 }
433
434 return true;
435 }
436
mdbg_set_magic_hdr(struct m_dbg_hdr * hdr)437 static inline void mdbg_set_magic_hdr(struct m_dbg_hdr *hdr)
438 {
439 hdr->magic = mdbg_calc_magic(hdr);
440 }
441
mdbg_set_magic_end(struct m_dbg_hdr * hdr)442 static inline void mdbg_set_magic_end(struct m_dbg_hdr *hdr)
443 {
444 void *p = hdr + 1;
445 uint64_t *m = (uint64_t *)((uint64_t)p + hdr->size);
446 int i;
447
448 for (i=0;i<MDBG_SZ_TAIL/4;i++) {
449 m[i] = MAGIC_END ^ hdr->magic;
450 }
451 }
452
453
454 void *mm_malloc(struct mm_heap_s *heap, size_t size, void *caller);
455
456 #if (CONFIG_MM_MAX_USED)
457 int mm_get_max_usedsize(void);
458 int mm_max_usedsize_update(struct mm_heap_s *heap);
459 #endif
460
461 /* Functions contained in kmm_malloc.c **************************************/
462
463 #ifdef CONFIG_MM_KERNEL_HEAP
464 void *kmm_malloc(size_t size);
465 #endif
466
467 /* Functions contained in mm_free.c *****************************************/
468
469 void mm_free(struct mm_heap_s *heap, void *mem, void *caller);
470
471 /* Functions contained in kmm_free.c ****************************************/
472
473 #ifdef CONFIG_MM_KERNEL_HEAP
474 void kmm_free(void *mem);
475 #endif
476
477 /* Functions contained in mm_realloc.c **************************************/
478
479 void *mm_realloc(struct mm_heap_s *heap, void *oldmem,
480 size_t size);
481
482 /* Functions contained in kmm_realloc.c *************************************/
483
484 #ifdef CONFIG_MM_KERNEL_HEAP
485 void *kmm_realloc(void *oldmem, size_t newsize);
486 #endif
487
488 /* Functions contained in mm_calloc.c ***************************************/
489
490 void *mm_calloc(struct mm_heap_s *heap, size_t n, size_t elem_size);
491
492 /* Functions contained in kmm_calloc.c **************************************/
493
494 #ifdef CONFIG_MM_KERNEL_HEAP
495 void *kmm_calloc(size_t n, size_t elem_size);
496 #endif
497
498 /* Functions contained in mm_zalloc.c ***************************************/
499
500 void *mm_zalloc(struct mm_heap_s *heap, size_t size);
501
502 /* Functions contained in kmm_zalloc.c **************************************/
503
504 #ifdef CONFIG_MM_KERNEL_HEAP
505 void *kmm_zalloc(size_t size);
506 #endif
507
508 /* Functions contained in mm_memalign.c *************************************/
509
510 void *mm_memalign(struct mm_heap_s *heap, size_t alignment,
511 size_t size);
512
513 /* Functions contained in kmm_memalign.c ************************************/
514
515 #ifdef CONFIG_MM_KERNEL_HEAP
516 void *kmm_memalign(size_t alignment, size_t size);
517 #endif
518
519 /* Functions contained in kmm_heapmember.c **********************************/
520
521 #if defined(CONFIG_MM_KERNEL_HEAP)
522 bool kmm_heapmember(void *mem);
523 #endif
524
525 /* Functions contained in mm_brkaddr.c **************************************/
526
527 void *mm_brkaddr(struct mm_heap_s *heap, int region);
528
529 /* Functions contained in umm_brkaddr.c *************************************/
530
531 #if !defined(CONFIG_BUILD_PROTECTED) || !defined(__KERNEL__)
532 void *umm_brkaddr(int region);
533 #endif
534
535 /* Functions contained in kmm_brkaddr.c *************************************/
536
537 #ifdef CONFIG_MM_KERNEL_HEAP
538 void *kmm_brkaddr(int region);
539 #endif
540
541 /* Functions contained in mm_sbrk.c *****************************************/
542
543 #if defined(CONFIG_ARCH_ADDRENV) && defined(CONFIG_MM_PGALLOC) && \
544 defined(CONFIG_ARCH_USE_MMU)
545 void *mm_sbrk(struct mm_heap_s *heap, intptr_t incr,
546 uintptr_t maxbreak);
547 #endif
548
549 /* Functions contained in kmm_sbrk.c ****************************************/
550
551 #if defined(CONFIG_MM_KERNEL_HEAP) && defined(CONFIG_ARCH_ADDRENV) && \
552 defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
553 void *kmm_sbrk(intptr_t incr);
554 #endif
555
556 /* Functions contained in mm_extend.c ***************************************/
557
558 void mm_extend(struct mm_heap_s *heap, void *mem, size_t size,
559 int region);
560
561 /* Functions contained in umm_extend.c **************************************/
562
563 #if !defined(CONFIG_BUILD_PROTECTED) || !defined(__KERNEL__)
564 void umm_extend(void *mem, size_t size, int region);
565 #endif
566
567 /* Functions contained in kmm_extend.c **************************************/
568
569 #ifdef CONFIG_MM_KERNEL_HEAP
570 void kmm_extend(void *mem, size_t size, int region);
571 #endif
572
573 /* Functions contained in mm_mallinfo.c *************************************/
574
575 struct mallinfo; /* Forward reference */
576 int mm_mallinfo(struct mm_heap_s *heap, struct mallinfo *info);
577
578 /* Functions contained in kmm_mallinfo.c ************************************/
579
580 #ifdef CONFIG_MM_KERNEL_HEAP
581 #ifdef CONFIG_CAN_PASS_STRUCTS
582 struct mallinfo kmm_mallinfo(void);
583 #else
584 int kmm_mallinfo(struct mallinfo *info);
585 #endif
586 #endif /* CONFIG_CAN_PASS_STRUCTS */
587
588 /* Functions contained in mm_shrinkchunk.c **********************************/
589
590 void mm_shrinkchunk(struct mm_heap_s *heap,
591 struct mm_allocnode_s *node, size_t size);
592
593 /* Functions contained in mm_addfreechunk.c *********************************/
594
595 void mm_addfreechunk(struct mm_heap_s *heap,
596 struct mm_freenode_s *node);
597
598 /* Functions contained in mm_size2ndx.c.c ***********************************/
599
600 int mm_size2ndx(size_t size);
601
602 #if defined(CONFIG_MM_DETECT_ERROR)
603 void mm_leak_add_chunk(struct m_dbg_hdr *chunk);
604 void mm_leak_del_chunk(struct m_dbg_hdr *chunk);
605 void mm_leak_dump(void);
606 void mm_leak_search_chunk(void *mem);
607 #else
608 //static inline void mm_leak_add_chunk(struct m_dbg_hdr *chunk){}
609 //static inline void mm_leak_del_chunk(struct m_dbg_hdr *chunk){}
610 //static inline void mm_leak_dump(void){}
611 //static inline void mm_leak_search_chunk(void *mem){}
612 #endif
613
614 #undef EXTERN
615 #ifdef __cplusplus
616 }
617 #endif
618
619 #endif /* __INCLUDE_NUTTX_MM_MM_H */
620