1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7
8 #include "k_api.h"
9
10 #if AOS_COMP_DEBUG
11 #include "aos/debug.h"
12 extern uint32_t debug_task_id_now();
13 extern void debug_cpu_stop(void);
14 #endif
15
16 #if (RHINO_CONFIG_MM_TLF > 0)
17 extern k_mm_region_t g_mm_region[];
18 extern int g_region_num;
19
20 #if RHINO_CONFIG_MM_DEBUG
21 #if (RHINO_CONFIG_MM_TRACE_LVL > 0)
22
23 volatile uint32_t g_kmm_bt = 0;
24 int backtrace_now_get(void *trace[], int size, int offset);
25
kmm_bt_disable(void)26 void kmm_bt_disable(void)
27 {
28 g_kmm_bt = KMM_BT_SET_BY_KV;
29 }
30
31 /* check bt status
32 * ret 0 : enable
33 * ret 1 : disable
34 * */
kmm_bt_check(void)35 int kmm_bt_check(void)
36 {
37 return (g_kmm_bt == KMM_BT_SET_BY_KV);
38 }
39 #endif
40
kmm_error(uint32_t mm_status_locked)41 void kmm_error(uint32_t mm_status_locked)
42 {
43 dumpsys_mm_info_func(mm_status_locked);
44 k_err_proc(RHINO_SYS_FATAL_ERR);
45 }
46 #endif
47
k_mm_init(void)48 void k_mm_init(void)
49 {
50 uint32_t e = 0;
51
52 /* init memory region */
53 (void)krhino_init_mm_head(&g_kmm_head, g_mm_region[0].start, g_mm_region[0].len);
54 for (e = 1 ; e < g_region_num ; e++) {
55 krhino_add_mm_region(g_kmm_head, g_mm_region[e].start, g_mm_region[e].len);
56 }
57 }
58
59 /* init a region, contain 3 mmblk
60 * -------------------------------------------------------------------
61 * | k_mm_list_t | k_mm_region_info_t | k_mm_list_t | free space |k_mm_list_t|
62 * -------------------------------------------------------------------
63 *
64 * "regionaddr" and "len" is aligned by caller
65 */
init_mm_region(void * regionaddr,size_t len)66 RHINO_INLINE k_mm_list_t *init_mm_region(void *regionaddr, size_t len)
67 {
68 k_mm_list_t *midblk, *lastblk, *firstblk;
69 k_mm_region_info_t *region;
70
71 /* first mmblk for region info */
72 firstblk = (k_mm_list_t *) regionaddr;
73 firstblk->prev = NULL;
74 firstblk->buf_size = MM_ALIGN_UP(sizeof(k_mm_region_info_t))
75 | MM_BUFF_USED | MM_BUFF_PREV_USED;
76
77 #if (RHINO_CONFIG_MM_DEBUG > 0u)
78 firstblk->dye = MM_DYE_USED;
79 firstblk->owner_id = MM_OWNER_ID_SELF;
80 firstblk->trace_id = 0;
81 firstblk->owner = 0;
82 #endif
83
84 /*last mmblk for stop merge */
85 lastblk = (k_mm_list_t *)((char *)regionaddr + len - MMLIST_HEAD_SIZE);
86
87 /*middle mmblk for heap use */
88 midblk = MM_GET_NEXT_BLK(firstblk);
89
90 midblk->buf_size = ((char *)lastblk - (char *)midblk->mbinfo.buffer)
91 | MM_BUFF_USED | MM_BUFF_PREV_USED;
92
93 midblk->mbinfo.free_ptr.prev = midblk->mbinfo.free_ptr.next = 0;
94
95 /*last mmblk for stop merge */
96 lastblk->prev = midblk;
97
98 /* set alloced, can't be merged */
99 lastblk->buf_size = 0 | MM_BUFF_USED | MM_BUFF_PREV_FREE;
100
101 #if (RHINO_CONFIG_MM_DEBUG > 0u)
102 lastblk->dye = MM_DYE_USED;
103 lastblk->owner_id = MM_OWNER_ID_SELF;
104 lastblk->trace_id = 0;
105 lastblk->owner = MM_LAST_BLK_MAGIC;
106 #endif
107
108 region = (k_mm_region_info_t *)firstblk->mbinfo.buffer;
109 region->next = 0;
110 region->end = lastblk;
111
112 return firstblk;
113 }
114
115 /* 2^(N + MM_MIN_BIT) <= size < 2^(1 + N + MM_MIN_BIT) */
size_to_level(size_t size)116 static int32_t size_to_level(size_t size)
117 {
118 size_t cnt = 32 - krhino_clz32(size);
119
120 if (cnt < MM_MIN_BIT) {
121 return 0;
122 }
123
124 if (cnt > MM_MAX_BIT) {
125 return -1;
126 }
127
128 return cnt - MM_MIN_BIT;
129 }
130
131 #if (K_MM_STATISTIC > 0)
addsize(k_mm_head * mmhead,size_t size,size_t req_size)132 static void addsize(k_mm_head *mmhead, size_t size, size_t req_size)
133 {
134 int32_t level;
135
136 if (mmhead->free_size > size) {
137 mmhead->free_size -= size;
138 } else {
139 mmhead->free_size = 0;
140 }
141
142 mmhead->used_size += size;
143 if (mmhead->used_size > mmhead->maxused_size) {
144 mmhead->maxused_size = mmhead->used_size;
145 }
146
147 if (req_size > 0) {
148 level = size_to_level(req_size);
149 if (level != -1) {
150 mmhead->alloc_times[level]++;
151 }
152 }
153 }
154
removesize(k_mm_head * mmhead,size_t size)155 static void removesize(k_mm_head *mmhead, size_t size)
156 {
157 if (mmhead->used_size > size) {
158 mmhead->used_size -= size;
159 } else {
160 mmhead->used_size = 0;
161 }
162 mmhead->free_size += size;
163 }
164
165 /* used_size++, free_size--, maybe maxused_size++ */
166 #define stats_addsize(mmhead, size, req_size) addsize(mmhead, size, req_size)
167 /* used_size--, free_size++ */
168 #define stats_removesize(mmhead, size) removesize(mmhead, size)
169 #else
170 #define stats_addsize(mmhead, size, req_size) do {} while (0)
171 #define stats_removesize(mmhead, size) do {} while (0)
172 #endif
173
krhino_init_mm_head(k_mm_head ** ppmmhead,void * addr,size_t len)174 kstat_t krhino_init_mm_head(k_mm_head **ppmmhead, void *addr, size_t len)
175 {
176 k_mm_list_t *nextblk;
177 k_mm_list_t *firstblk;
178 k_mm_head *pmmhead;
179 void *orig_addr;
180 #if (RHINO_CONFIG_MM_BLK > 0)
181 mblk_pool_t *mmblk_pool;
182 kstat_t stat;
183 #endif
184
185 NULL_PARA_CHK(ppmmhead);
186 NULL_PARA_CHK(addr);
187
188 memset(addr, 0, len);
189
190 /* check paramters, addr and len need algin
191 * 1. the length at least need RHINO_CONFIG_MM_TLF_BLK_SIZE for fixed size memory block
192 * 2. and also ast least have 1k for user alloced
193 */
194 orig_addr = addr;
195
196 addr = (void *) MM_ALIGN_UP((size_t)addr);
197 len -= (size_t)addr - (size_t)orig_addr;
198 len = MM_ALIGN_DOWN(len);
199
200 if (len == 0
201 || len < MM_MIN_HEAP_SIZE + RHINO_CONFIG_MM_TLF_BLK_SIZE
202 || len > MM_MAX_SIZE) {
203 return RHINO_MM_POOL_SIZE_ERR;
204 }
205
206 pmmhead = (k_mm_head *)addr;
207
208 /* Zeroing the memory head */
209 memset(pmmhead, 0, sizeof(k_mm_head));
210
211 #if (RHINO_CONFIG_MM_REGION_MUTEX > 0)
212 krhino_mutex_create(&pmmhead->mm_mutex, "mm_mutex");
213 #else
214 krhino_spin_lock_init(&pmmhead->mm_lock);
215 #endif
216
217 firstblk = init_mm_region((void *)((size_t)addr + MM_ALIGN_UP(sizeof(k_mm_head))),
218 MM_ALIGN_DOWN(len - sizeof(k_mm_head)));
219
220
221 pmmhead->regioninfo = (k_mm_region_info_t *)firstblk->mbinfo.buffer;
222
223 nextblk = MM_GET_NEXT_BLK(firstblk);
224
225 *ppmmhead = pmmhead;
226
227 /*mark it as free and set it to bitmap*/
228 #if (RHINO_CONFIG_MM_DEBUG > 0u)
229 nextblk->dye = MM_DYE_USED;
230 nextblk->owner_id = MM_OWNER_ID_SELF;
231 nextblk->trace_id = 0;
232 nextblk->owner = 0;
233 #endif
234
235 /* release free blk */
236 k_mm_free(pmmhead, nextblk->mbinfo.buffer);
237
238 #if (K_MM_STATISTIC > 0)
239 pmmhead->free_size = MM_GET_BUF_SIZE(nextblk);
240 pmmhead->used_size = len - MM_GET_BUF_SIZE(nextblk);
241 pmmhead->maxused_size = pmmhead->used_size;
242 #endif
243
244 #if (RHINO_CONFIG_MM_BLK > 0)
245 pmmhead->fix_pool = NULL;
246 mmblk_pool = k_mm_alloc(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE + MM_ALIGN_UP(sizeof(mblk_pool_t)));
247 if (mmblk_pool) {
248 stat = krhino_mblk_pool_init(mmblk_pool, "fixed_mm_blk",
249 (void *)((size_t)mmblk_pool + MM_ALIGN_UP(sizeof(mblk_pool_t))),
250 RHINO_CONFIG_MM_TLF_BLK_SIZE);
251 if (stat == RHINO_SUCCESS) {
252 pmmhead->fix_pool = mmblk_pool;
253 } else {
254 k_mm_free(pmmhead, mmblk_pool);
255 }
256 }
257 #endif
258
259 return RHINO_SUCCESS;
260 }
261
krhino_deinit_mm_head(k_mm_head * mmhead)262 kstat_t krhino_deinit_mm_head(k_mm_head *mmhead)
263 {
264 #if (RHINO_CONFIG_MM_REGION_MUTEX > 0)
265 krhino_mutex_del(&mmhead->mm_mutex);
266 #endif
267
268 memset(mmhead, 0, sizeof(k_mm_head));
269
270 return RHINO_SUCCESS;
271 }
272
krhino_add_mm_region(k_mm_head * mmhead,void * addr,size_t len)273 kstat_t krhino_add_mm_region(k_mm_head *mmhead, void *addr, size_t len)
274 {
275 void *orig_addr;
276
277 k_mm_region_info_t *region;
278 k_mm_list_t *firstblk;
279 k_mm_list_t *nextblk;
280 cpu_cpsr_t flags_cpsr;
281 (void)flags_cpsr;
282
283 NULL_PARA_CHK(mmhead);
284 NULL_PARA_CHK(addr);
285
286 orig_addr = addr;
287
288 addr = (void *) MM_ALIGN_UP((size_t)addr);
289 len -= (size_t)addr - (size_t)orig_addr;
290 len = MM_ALIGN_DOWN(len);
291
292 if (!len || len < sizeof(k_mm_region_info_t) + MMLIST_HEAD_SIZE * 3 + MM_MIN_SIZE) {
293 return RHINO_MM_POOL_SIZE_ERR;
294 }
295
296 memset(addr, 0, len);
297
298 MM_CRITICAL_ENTER(mmhead, flags_cpsr);
299
300 firstblk = init_mm_region(addr, len);
301 nextblk = MM_GET_NEXT_BLK(firstblk);
302
303 /* Inserting the area in the list of linked areas */
304 region = (k_mm_region_info_t *)firstblk->mbinfo.buffer;
305 region->next = mmhead->regioninfo;
306 mmhead->regioninfo = region;
307
308 #if (RHINO_CONFIG_MM_DEBUG > 0u)
309 nextblk->dye = MM_DYE_USED;
310 nextblk->owner_id = MM_OWNER_ID_SELF;
311 nextblk->trace_id = 0;
312 nextblk->owner = 0;
313 #endif
314
315 #if (K_MM_STATISTIC > 0)
316 /* keep "used_size" not changed.
317 * change "used_size" here then k_mm_free will decrease it.
318 */
319 mmhead->used_size += MM_GET_BLK_SIZE(nextblk);
320 #endif
321
322 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
323
324 /*mark nextblk as free*/
325 k_mm_free(mmhead, nextblk->mbinfo.buffer);
326
327 return RHINO_SUCCESS;
328 }
329
330 /* insert blk to freelist[level], and set freebitmap */
k_mm_freelist_insert(k_mm_head * mmhead,k_mm_list_t * blk)331 static void k_mm_freelist_insert(k_mm_head *mmhead, k_mm_list_t *blk)
332 {
333 int32_t level;
334
335 level = size_to_level(MM_GET_BUF_SIZE(blk));
336 if (level < 0 || level >= MM_BIT_LEVEL) {
337 return;
338 }
339
340 blk->mbinfo.free_ptr.prev = NULL;
341 blk->mbinfo.free_ptr.next = mmhead->freelist[level];
342
343 if (mmhead->freelist[level] != NULL) {
344 mmhead->freelist[level]->mbinfo.free_ptr.prev = blk;
345 }
346
347 mmhead->freelist[level] = blk;
348
349 /* freelist not null, so set the bit */
350 mmhead->free_bitmap |= (1 << level);
351 }
352
k_mm_freelist_delete(k_mm_head * mmhead,k_mm_list_t * blk)353 static void k_mm_freelist_delete(k_mm_head *mmhead, k_mm_list_t *blk)
354 {
355 int32_t level;
356
357 level = size_to_level(MM_GET_BUF_SIZE(blk));
358 if (level < 0 || level >= MM_BIT_LEVEL) {
359 return;
360 }
361
362 if (blk->mbinfo.free_ptr.next != NULL) {
363 blk->mbinfo.free_ptr.next->mbinfo.free_ptr.prev = blk->mbinfo.free_ptr.prev;
364 }
365 if (blk->mbinfo.free_ptr.prev != NULL) {
366 blk->mbinfo.free_ptr.prev->mbinfo.free_ptr.next = blk->mbinfo.free_ptr.next;
367 }
368
369 if (mmhead->freelist[level] == blk) {
370 /* first blk in this freelist */
371 mmhead->freelist[level] = blk->mbinfo.free_ptr.next;
372 if (mmhead->freelist[level] == NULL) {
373 /* freelist null, so clear the bit */
374 mmhead->free_bitmap &= (~(1 << level));
375 }
376 }
377
378 blk->mbinfo.free_ptr.prev = NULL;
379 blk->mbinfo.free_ptr.next = NULL;
380 }
381
find_up_level(k_mm_head * mmhead,int32_t level)382 static k_mm_list_t *find_up_level(k_mm_head *mmhead, int32_t level)
383 {
384 uint32_t bitmap;
385
386 bitmap = mmhead->free_bitmap & (0xfffffffful << (level + 1));
387 level = krhino_ctz32(bitmap);
388
389 if (level < MM_BIT_LEVEL) {
390 return mmhead->freelist[level];
391 }
392
393 return NULL;
394 }
395
k_mm_alloc(k_mm_head * mmhead,size_t size)396 void *k_mm_alloc(k_mm_head *mmhead, size_t size)
397 {
398 void *retptr;
399 k_mm_list_t *get_b, *new_b, *next_b;
400 int32_t level;
401 size_t left_size;
402 size_t req_size = size;
403 cpu_cpsr_t flags_cpsr;
404
405 if (!mmhead) {
406 return NULL;
407 }
408
409 if (size == 0) {
410 return NULL;
411 }
412
413 MM_CRITICAL_ENTER(mmhead, flags_cpsr);
414
415 #if (RHINO_CONFIG_MM_BLK > 0)
416 /* little blk, try to get from mm_pool */
417 if (mmhead->fix_pool != NULL && size <= RHINO_CONFIG_MM_BLK_SIZE) {
418 retptr = krhino_mblk_alloc_nolock((mblk_pool_t *)mmhead->fix_pool, size);
419 if (retptr) {
420 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
421 return retptr;
422 }
423 }
424 #endif
425
426 retptr = NULL;
427
428 size = MM_ALIGN_UP(size);
429 size = size < MM_MIN_SIZE ? MM_MIN_SIZE : size;
430
431 if ((level = size_to_level(size)) == -1) {
432 goto ALLOCEXIT;
433 }
434
435 #if (RHINO_CONFIG_MM_QUICK > 0)
436 /* try to find in higher level */
437 get_b = find_up_level(mmhead, level);
438 if (get_b == NULL) {
439 /* try to find in same level */
440 get_b = mmhead->freelist[level];
441 while (get_b != NULL) {
442 if (MM_GET_BUF_SIZE(get_b) >= size) {
443 break;
444 }
445 get_b = get_b->mbinfo.free_ptr.next;
446 }
447
448 if (get_b == NULL) {
449 /* do not find availalbe freeblk */
450 goto ALLOCEXIT;
451 }
452 }
453 #else
454 /* try to find in same level */
455 get_b = mmhead->freelist[level];
456 while (get_b != NULL) {
457 if (MM_GET_BUF_SIZE(get_b) >= size) {
458 break;
459 }
460 get_b = get_b->mbinfo.free_ptr.next;
461 }
462
463 if (get_b == NULL) {
464 /* try to find in higher level */
465 get_b = find_up_level(mmhead, level);
466 if (get_b == NULL) {
467 /* do not find availalbe freeblk */
468 goto ALLOCEXIT;
469 }
470 }
471 #endif
472
473 k_mm_freelist_delete(mmhead, get_b);
474
475 next_b = MM_GET_NEXT_BLK(get_b);
476
477 /* Should the block be split? */
478 if (MM_GET_BUF_SIZE(get_b) >= size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
479 left_size = MM_GET_BUF_SIZE(get_b) - size - MMLIST_HEAD_SIZE;
480
481 get_b->buf_size = size | (get_b->buf_size & MM_PRESTAT_MASK);
482 new_b = MM_GET_NEXT_BLK(get_b);
483
484 new_b->prev = get_b;
485 new_b->buf_size = left_size | MM_BUFF_FREE | MM_BUFF_PREV_USED;
486 #if (RHINO_CONFIG_MM_DEBUG > 0u)
487 new_b->dye = MM_DYE_FREE;
488 new_b->owner_id = 0;
489 new_b->trace_id = 0;
490 new_b->owner = 0;
491 #endif
492 next_b->prev = new_b;
493 k_mm_freelist_insert(mmhead, new_b);
494 } else {
495 next_b->buf_size &= (~MM_BUFF_PREV_FREE);
496 }
497 get_b->buf_size &= (~MM_BUFF_FREE); /* Now it's used */
498
499 #if (RHINO_CONFIG_MM_DEBUG > 0u)
500 get_b->dye = MM_DYE_USED;
501 get_b->owner_id = (uint8_t)debug_task_id_now();
502 get_b->trace_id = g_mmlk_cnt;
503 get_b->owner = 0;
504 #endif
505 retptr = (void *)get_b->mbinfo.buffer;
506 if (retptr != NULL) {
507 stats_addsize(mmhead, MM_GET_BLK_SIZE(get_b), req_size);
508 }
509
510 ALLOCEXIT:
511
512 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
513
514 return retptr ;
515 }
516
k_mm_free(k_mm_head * mmhead,void * ptr)517 void k_mm_free(k_mm_head *mmhead, void *ptr)
518 {
519 k_mm_list_t *free_b, *next_b, *prev_b;
520 cpu_cpsr_t flags_cpsr;
521 (void)flags_cpsr;
522
523 if (!ptr || !mmhead) {
524 return;
525 }
526
527 MM_CRITICAL_ENTER(mmhead, flags_cpsr);
528
529 #if (RHINO_CONFIG_MM_BLK > 0)
530 if (krhino_mblk_check(mmhead->fix_pool, ptr)) {
531 (void)krhino_mblk_free_nolock((mblk_pool_t *)mmhead->fix_pool, ptr);
532 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
533 return;
534 }
535 #endif
536
537 free_b = MM_GET_THIS_BLK(ptr);
538
539 #if (RHINO_CONFIG_MM_DEBUG > 0u)
540 if (free_b->dye == MM_DYE_FREE) {
541 /* step 1 : do not call mm_critical_exit to stop malloc by other core */
542 //MM_CRITICAL_EXIT(mmhead, flags_cpsr);
543 /* step 2 : freeze other core */
544 debug_cpu_stop();
545 /* step 3 :printk(do not use printf, maybe malloc) log */
546 printk("WARNING, memory maybe double free!! 0x%x\r\n", (unsigned int)free_b);
547 /* setp 4 :dumpsys memory and then go to fatal error */
548 kmm_error(KMM_ERROR_LOCKED);
549 }
550 if (free_b->dye != MM_DYE_USED) {
551 //MM_CRITICAL_EXIT(mmhead, flags_cpsr);
552 debug_cpu_stop();
553 printk("WARNING, memory maybe corrupt!! 0x%x\r\n", (unsigned int)free_b);
554 kmm_error(KMM_ERROR_LOCKED);
555 }
556 free_b->dye = MM_DYE_FREE;
557 free_b->owner_id = 0;
558 free_b->trace_id = 0;
559 free_b->owner = 0;
560 #endif
561 free_b->buf_size |= MM_BUFF_FREE;
562
563 stats_removesize(mmhead, MM_GET_BLK_SIZE(free_b));
564
565 /* if the blk after this freed one is freed too, merge them */
566 next_b = MM_GET_NEXT_BLK(free_b);
567 #if (RHINO_CONFIG_MM_DEBUG > 0u)
568 if (next_b->dye != MM_DYE_FREE && next_b->dye != MM_DYE_USED) {
569 //MM_CRITICAL_EXIT(mmhead, flags_cpsr);
570 debug_cpu_stop();
571 printk("WARNING, memory overwritten!! 0x%x 0x%x\r\n", (unsigned int)free_b, (unsigned int)next_b);
572 kmm_error(KMM_ERROR_LOCKED);
573 } else if (MM_LAST_BLK_MAGIC != next_b->owner) {
574 k_mm_list_t *nnext_b = MM_GET_NEXT_BLK(next_b);
575 if (nnext_b->dye != MM_DYE_FREE && nnext_b->dye != MM_DYE_USED) {
576 debug_cpu_stop();
577 printk("WARNING, nnext memory overwritten!! 0x%x 0x%x 0x%x\r\n", (unsigned int)free_b, (unsigned int)next_b,
578 (unsigned int)nnext_b);
579 kmm_error(KMM_ERROR_LOCKED);
580 }
581 }
582 #endif
583
584 if (next_b->buf_size & MM_BUFF_FREE) {
585 k_mm_freelist_delete(mmhead, next_b);
586 free_b->buf_size += MM_GET_BLK_SIZE(next_b);
587 }
588
589 /* if the blk before this freed one is freed too, merge them */
590 if (free_b->buf_size & MM_BUFF_PREV_FREE) {
591 prev_b = free_b->prev;
592 k_mm_freelist_delete(mmhead, prev_b);
593 prev_b->buf_size += MM_GET_BLK_SIZE(free_b);
594 free_b = prev_b;
595 }
596
597 /* after merge, free to list */
598 k_mm_freelist_insert(mmhead, free_b);
599
600 next_b = MM_GET_NEXT_BLK(free_b);
601 next_b->prev = free_b;
602 next_b->buf_size |= MM_BUFF_PREV_FREE;
603
604 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
605 }
606
k_mm_realloc(k_mm_head * mmhead,void * oldmem,size_t new_size)607 void *k_mm_realloc(k_mm_head *mmhead, void *oldmem, size_t new_size)
608 {
609 void *ptr_aux = NULL;
610 uint32_t cpsize;
611 k_mm_list_t *this_b, *split_b, *next_b;
612 size_t old_size, split_size;
613 size_t req_size = 0;
614 cpu_cpsr_t flags_cpsr;
615 (void)flags_cpsr;
616
617 (void)req_size;
618
619 if (oldmem == NULL) {
620 if (new_size > 0) {
621 return (void *)k_mm_alloc(mmhead, new_size);
622 } else {
623 return NULL;
624 }
625 } else if (new_size == 0) {
626 k_mm_free(mmhead, oldmem);
627 return NULL;
628 }
629
630 req_size = new_size;
631
632 #if (RHINO_CONFIG_MM_BLK > 0)
633 if (krhino_mblk_check(mmhead->fix_pool, oldmem)) {
634 ptr_aux = k_mm_alloc(mmhead, new_size);
635 if (ptr_aux) {
636 int cp_len = krhino_mblk_get_size(mmhead->fix_pool, oldmem);
637 cp_len = cp_len > new_size ? new_size : cp_len;
638 memcpy(ptr_aux, oldmem, cp_len);
639 MM_CRITICAL_ENTER(mmhead, flags_cpsr);
640 (void)krhino_mblk_free_nolock((mblk_pool_t *)mmhead->fix_pool, oldmem);
641 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
642 }
643 return ptr_aux;
644 }
645 #endif
646 MM_CRITICAL_ENTER(mmhead, flags_cpsr);
647
648 this_b = MM_GET_THIS_BLK(oldmem);
649 old_size = MM_GET_BUF_SIZE(this_b);
650 next_b = MM_GET_NEXT_BLK(this_b);
651 new_size = MM_ALIGN_UP(new_size);
652 new_size = new_size < MM_MIN_SIZE ? MM_MIN_SIZE : new_size;
653
654 if (new_size <= old_size) {
655 /* shrink blk */
656 stats_removesize(mmhead, MM_GET_BLK_SIZE(this_b));
657 if (next_b->buf_size & MM_BUFF_FREE) {
658 /* merge next free */
659 k_mm_freelist_delete(mmhead, next_b);
660 old_size += MM_GET_BLK_SIZE(next_b);
661 next_b = MM_GET_NEXT_BLK(next_b);
662 }
663 if (old_size >= new_size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
664 /* split blk */
665 split_size = old_size - new_size - MMLIST_HEAD_SIZE;
666
667 this_b->buf_size = new_size | (this_b->buf_size & MM_PRESTAT_MASK);
668
669 split_b = MM_GET_NEXT_BLK(this_b);
670
671 split_b->prev = this_b;
672 split_b->buf_size = split_size | MM_BUFF_FREE | MM_BUFF_PREV_USED;
673
674 #if (RHINO_CONFIG_MM_DEBUG > 0u)
675 split_b->dye = MM_DYE_FREE;
676 split_b->owner_id = 0;
677 split_b->trace_id = 0;
678 split_b->owner = 0;
679 #endif
680
681 next_b->prev = split_b;
682 next_b->buf_size |= MM_BUFF_PREV_FREE;
683
684 k_mm_freelist_insert(mmhead, split_b);
685 }
686 stats_addsize(mmhead, MM_GET_BLK_SIZE(this_b), req_size);
687 ptr_aux = (void *)this_b->mbinfo.buffer;
688 } else if ((next_b->buf_size & MM_BUFF_FREE)) {
689 /* enlarge blk */
690 if (new_size <= (old_size + MM_GET_BLK_SIZE(next_b))) {
691 stats_removesize(mmhead, MM_GET_BLK_SIZE(this_b));
692
693 /* delete next blk from freelist */
694 k_mm_freelist_delete(mmhead, next_b);
695
696 /* enlarge this blk */
697 this_b->buf_size += MM_GET_BLK_SIZE(next_b);
698
699 next_b = MM_GET_NEXT_BLK(this_b);
700 next_b->prev = this_b;
701 next_b->buf_size &= ~MM_BUFF_PREV_FREE;
702
703 if (MM_GET_BUF_SIZE(this_b) >= new_size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
704 /* split blk */
705 split_size = MM_GET_BUF_SIZE(this_b) - new_size - MMLIST_HEAD_SIZE;
706
707 this_b->buf_size = new_size | (this_b->buf_size & MM_PRESTAT_MASK);
708 split_b = MM_GET_NEXT_BLK(this_b);
709
710 split_b->prev = this_b;
711 split_b->buf_size = split_size | MM_BUFF_FREE | MM_BUFF_PREV_USED;
712 #if (RHINO_CONFIG_MM_DEBUG > 0u)
713 split_b->dye = MM_DYE_FREE;
714 split_b->owner_id = 0;
715 split_b->trace_id = 0;
716 split_b->owner = 0;
717 #endif
718 next_b->prev = split_b;
719 next_b->buf_size |= MM_BUFF_PREV_FREE;
720 k_mm_freelist_insert(mmhead, split_b);
721 }
722 stats_addsize(mmhead, MM_GET_BLK_SIZE(this_b), req_size);
723 ptr_aux = (void *)this_b->mbinfo.buffer;
724 }
725 }
726
727 if (ptr_aux) {
728
729 #if (RHINO_CONFIG_MM_DEBUG > 0u)
730 this_b->dye = MM_DYE_USED;
731 this_b->owner_id = (uint8_t)debug_task_id_now();
732 this_b->trace_id = g_mmlk_cnt;
733 #endif
734
735 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
736 return ptr_aux;
737 }
738
739 MM_CRITICAL_EXIT(mmhead, flags_cpsr);
740
741 /* re alloc blk */
742 ptr_aux = k_mm_alloc(mmhead, new_size);
743 if (!ptr_aux) {
744 return NULL;
745 }
746
747 cpsize = (MM_GET_BUF_SIZE(this_b) > new_size) ? new_size : MM_GET_BUF_SIZE(this_b);
748
749 memcpy(ptr_aux, oldmem, cpsize);
750 k_mm_free(mmhead, oldmem);
751
752 return ptr_aux;
753 }
754
755 #if (RHINO_CONFIG_MM_DEBUG > 0u)
krhino_owner_attach(void * addr,size_t allocator)756 void krhino_owner_attach(void *addr, size_t allocator)
757 {
758 k_mm_list_t *blk;
759
760 char *PC;
761 int *SP;
762
763 __asm__ volatile("mov %0, sp\n" : "=r"(SP));
764 __asm__ volatile("mov %0, pc\n" : "=r"(PC));
765
766 if (NULL == addr) {
767 return;
768 }
769
770 #if (RHINO_CONFIG_MM_BLK > 0)
771 /* fix blk, do not support debug info */
772 if (krhino_mblk_check(g_kmm_head->fix_pool, addr)) {
773 return;
774 }
775 #endif
776
777 blk = MM_GET_THIS_BLK(addr);
778
779 #if (RHINO_CONFIG_MM_TRACE_LVL > 0)
780 if ((g_sys_stat == RHINO_RUNNING) &&
781 (kmm_bt_check() == 0)) {
782 backtrace_now_get((void **) blk->trace, RHINO_CONFIG_MM_TRACE_LVL, 2);
783 } else {
784 memset(blk->trace, 0, sizeof(blk->trace));
785 }
786 #endif
787
788 blk->owner = allocator;
789 }
790 #endif
791
krhino_mm_alloc(size_t size)792 void *krhino_mm_alloc(size_t size)
793 {
794 void *tmp;
795
796 #if (RHINO_CONFIG_MM_DEBUG > 0u)
797 uint32_t app_malloc = size & AOS_UNSIGNED_INT_MSB;
798 size = size & (~AOS_UNSIGNED_INT_MSB);
799 #endif
800
801 if (size == 0) {
802 printf("WARNING, malloc size = 0\r\n");
803 return NULL;
804 }
805
806 tmp = k_mm_alloc(g_kmm_head, size);
807 if (tmp == NULL) {
808 #if (RHINO_CONFIG_MM_DEBUG > 0)
809 static int32_t dumped;
810 int32_t freesize;
811
812 freesize = g_kmm_head->free_size;
813
814 printf("WARNING, malloc failed!!!! need size:%d, but free size:%d\r\n", size, freesize);
815
816 if (dumped) {
817 return tmp;
818 }
819
820 dumped = 1;
821
822 debug_cpu_stop();
823 kmm_error(KMM_ERROR_UNLOCKED);
824 #endif
825 }
826
827 #if (RHINO_CONFIG_USER_HOOK > 0)
828 krhino_mm_alloc_hook(tmp, size);
829 #endif
830
831 #if (RHINO_CONFIG_MM_DEBUG > 0u)
832 if (app_malloc == 0) {
833 krhino_owner_return_addr(tmp);
834 }
835 #endif
836
837 return tmp;
838 }
839
krhino_mm_free(void * ptr)840 void krhino_mm_free(void *ptr)
841 {
842 k_mm_free(g_kmm_head, ptr);
843 }
844
krhino_mm_realloc(void * oldmem,size_t newsize)845 void *krhino_mm_realloc(void *oldmem, size_t newsize)
846 {
847 void *tmp;
848
849 #if (RHINO_CONFIG_MM_DEBUG > 0u)
850 uint32_t app_malloc = newsize & AOS_UNSIGNED_INT_MSB;
851 newsize = newsize & (~AOS_UNSIGNED_INT_MSB);
852 #endif
853
854 tmp = k_mm_realloc(g_kmm_head, oldmem, newsize);
855
856 #if (RHINO_CONFIG_MM_DEBUG > 0u)
857 if (app_malloc == 0) {
858 krhino_owner_return_addr(tmp);
859 }
860 #endif
861 if (tmp == NULL && newsize != 0) {
862 #if (RHINO_CONFIG_MM_DEBUG > 0)
863 static int32_t reallocdumped;
864 printf("WARNING, realloc failed!!!! newsize : %d\r\n", newsize);
865 if (reallocdumped) {
866 return tmp;
867 }
868 reallocdumped = 1;
869 debug_cpu_stop();
870 kmm_error(KMM_ERROR_UNLOCKED);
871 #endif
872 }
873 return tmp;
874 }
875
krhino_mm_max_free_size_get(void)876 size_t krhino_mm_max_free_size_get(void)
877 {
878 int32_t index;
879 k_mm_list_t *max, *tmp;
880 size_t max_free_block_size = 0;
881
882 /* In order to avoid getting stuck after the exception, the critical zone protection is removed.
883 Currently, this interface can only be invoked in exception handling. */
884
885 //cpu_cpsr_t flags_cpsr;
886
887 //MM_CRITICAL_ENTER(g_kmm_head,flags_cpsr);
888
889 index = krhino_clz32(g_kmm_head->free_bitmap);
890 if (index > 31) {
891 return 0;
892 }
893
894 max = g_kmm_head->freelist[31 - index];
895
896 while (max) {
897 if (max_free_block_size < MM_GET_BUF_SIZE(max)) {
898 max_free_block_size = MM_GET_BUF_SIZE(max);
899 }
900 tmp = max->mbinfo.free_ptr.next;
901 max = tmp;
902 }
903
904 return max_free_block_size;
905 }
906 #endif
907
908