1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
krhino_mblk_pool_init(mblk_pool_t * pool,const name_t * name,void * pool_start,size_t pool_size)7 kstat_t krhino_mblk_pool_init(mblk_pool_t *pool, const name_t *name,
8 void *pool_start, size_t pool_size)
9 {
10 uint32_t blk_type; /* max blocks mem pool offers */
11 uint8_t align_mask; /* address alignment */
12 mblk_list_t *blk_list;
13
14 NULL_PARA_CHK(pool);
15 NULL_PARA_CHK(name);
16 NULL_PARA_CHK(pool_start);
17
18 memset(pool_start, 0, pool_size);
19
20 /* check address & size alignment */
21 align_mask = sizeof(uintptr_t) - 1u;
22
23 if (((size_t)pool_start & align_mask) || (pool_size & align_mask)) {
24 return RHINO_INV_ALIGN;
25 }
26
27 if (pool_size % MM_BLK_SLICE_SIZE) {
28 return RHINO_MM_POOL_SIZE_ERR;
29 }
30
31 krhino_spin_lock_init(&pool->blk_lock);
32
33 pool->pool_name = name;
34 pool->pool_start = (uintptr_t)pool_start;
35 pool->pool_end = (uintptr_t)(pool_start + pool_size);
36 pool->slice_cnt = 0;
37
38 memset(pool->slice_type, 0, sizeof(pool->slice_type));
39
40 for (blk_type = 0 ; blk_type < MM_BLK_SLICE_BIT ; blk_type++) {
41 blk_list = &pool->blk_list[blk_type];
42 memset(blk_list, 0, sizeof(*blk_list));
43 blk_list->blk_size = MM_BLK_TYPE2SIZE(blk_type);
44 }
45
46 TRACE_MBLK_POOL_CREATE(krhino_cur_task_get(), pool);
47
48 return RHINO_SUCCESS;
49 }
50
krhino_mblk_alloc_nolock(mblk_pool_t * pool,uint32_t size)51 void *krhino_mblk_alloc_nolock(mblk_pool_t *pool, uint32_t size)
52 {
53 uint32_t blk_type;
54 mblk_list_t *blk_list = NULL;
55 uintptr_t avail_blk = (uintptr_t)NULL;
56
57 if (pool == NULL) {
58 return NULL;
59 }
60
61 size = size < sizeof(uintptr_t) ? sizeof(uintptr_t) : size;
62
63 blk_type = MM_BLK_SIZE2TYPE(size);
64
65 while (blk_type < MM_BLK_SLICE_BIT) {
66 blk_list = &(pool->blk_list[blk_type]);
67
68 /* try to get from freelist */
69 if ((avail_blk = blk_list->free_head) != (uintptr_t)NULL) {
70 blk_list->free_head = *(uintptr_t *)avail_blk;
71 blk_list->freelist_cnt--;
72 break;
73 }
74
75 /* check if need new slice */
76 if (blk_list->slice_addr == 0 || blk_list->slice_offset == MM_BLK_SLICE_SIZE) {
77 if (pool->slice_cnt == MM_BLK_SLICE_NUM) {
78 blk_type++;
79 continue;
80 }
81
82 /* get new slice for this type blks */
83 blk_list->slice_addr = pool->pool_start + pool->slice_cnt * MM_BLK_SLICE_SIZE;
84 pool->slice_type[pool->slice_cnt] = blk_type;
85 blk_list->slice_offset = 0;
86 pool->slice_cnt++;
87 blk_list->slice_cnt++;
88 }
89
90 /* cut blk from slice */
91 avail_blk = blk_list->slice_addr + blk_list->slice_offset;
92 blk_list->slice_offset += blk_list->blk_size;
93 break;
94 };
95
96 if (blk_list) {
97 (avail_blk == (uintptr_t)0) ? blk_list->fail_cnt++ : blk_list->nofree_cnt++;
98 }
99
100 return (void *)avail_blk;
101 }
102
krhino_mblk_free_nolock(mblk_pool_t * pool,void * blk)103 kstat_t krhino_mblk_free_nolock(mblk_pool_t *pool, void *blk)
104 {
105 uint32_t slice_idx;
106 uint32_t blk_type;
107 mblk_list_t *blk_list;
108
109 NULL_PARA_CHK(pool);
110 NULL_PARA_CHK(blk);
111
112 slice_idx = ((uintptr_t)blk - pool->pool_start) >> MM_BLK_SLICE_BIT;
113 if (slice_idx >= MM_BLK_SLICE_NUM) {
114 return RHINO_MM_FREE_ADDR_ERR;
115 }
116
117 blk_type = pool->slice_type[slice_idx];
118 if (blk_type >= MM_BLK_SLICE_BIT) {
119 return RHINO_MM_FREE_ADDR_ERR;
120 }
121
122 blk_list = &(pool->blk_list[blk_type]);
123 /* use the first 4 byte of the free block point to head of free list */
124 *((uintptr_t *)blk) = blk_list->free_head;
125 blk_list->free_head = (uintptr_t)blk;
126 blk_list->nofree_cnt--;
127 blk_list->freelist_cnt++;
128
129 return RHINO_SUCCESS;
130 }
131
krhino_mblk_info_nolock(mblk_pool_t * pool,mblk_info_t * info)132 kstat_t krhino_mblk_info_nolock(mblk_pool_t *pool, mblk_info_t *info)
133 {
134 size_t blk_size = 0;
135 uint32_t idx;
136 mblk_list_t *blk_list;
137 uint32_t size_in_list = 0;
138 uint32_t size_in_slice = 0;
139
140 NULL_PARA_CHK(pool);
141 NULL_PARA_CHK(info);
142
143 /* no data changed, no lock needed. The info may be not absolutely precise */
144
145 for (idx = 0 ; idx < MM_BLK_SLICE_BIT ; idx++) {
146 blk_list = &(pool->blk_list[idx]);
147 size_in_list += blk_list->nofree_cnt * blk_list->blk_size;
148 if (blk_list->slice_cnt > 0) {
149 size_in_slice += (blk_list->slice_cnt - 1) * MM_BLK_SLICE_SIZE + blk_list->slice_offset;
150 blk_size = blk_list->blk_size;
151 }
152 }
153
154 info->pool_name = pool->pool_name;
155 info->pool_size = pool->pool_end - pool->pool_start;
156 info->used_size = size_in_list;
157 info->max_used_size = size_in_slice;
158 info->max_blk_size = blk_size;
159
160 return RHINO_SUCCESS;
161 }
162
krhino_mblk_alloc(mblk_pool_t * pool,uint32_t size)163 void *krhino_mblk_alloc(mblk_pool_t *pool, uint32_t size)
164 {
165 uintptr_t avail_blk;
166 cpu_cpsr_t flags_cpsr;
167
168 if (pool == NULL) {
169 return NULL;
170 }
171
172 krhino_spin_lock_irq_save(&pool->blk_lock, flags_cpsr);
173
174 avail_blk = (uintptr_t)krhino_mblk_alloc_nolock(pool, size);
175
176 krhino_spin_unlock_irq_restore(&pool->blk_lock, flags_cpsr);
177
178 return (void *)avail_blk;
179 }
180
krhino_mblk_free(mblk_pool_t * pool,void * blk)181 kstat_t krhino_mblk_free(mblk_pool_t *pool, void *blk)
182 {
183 kstat_t ret;
184 cpu_cpsr_t flags_cpsr;
185
186 NULL_PARA_CHK(pool);
187 NULL_PARA_CHK(blk);
188
189 krhino_spin_lock_irq_save(&pool->blk_lock, flags_cpsr);
190
191 ret = krhino_mblk_free_nolock(pool, blk);
192
193 krhino_spin_unlock_irq_restore(&pool->blk_lock, flags_cpsr);
194
195 return ret;
196 }
197
krhino_mblk_info(mblk_pool_t * pool,mblk_info_t * info)198 kstat_t krhino_mblk_info(mblk_pool_t *pool, mblk_info_t *info)
199 {
200 kstat_t ret;
201 cpu_cpsr_t flags_cpsr;
202
203 NULL_PARA_CHK(pool);
204 NULL_PARA_CHK(info);
205
206 krhino_spin_lock_irq_save(&pool->blk_lock, flags_cpsr);
207
208 ret = krhino_mblk_info_nolock(pool, info);
209
210 krhino_spin_unlock_irq_restore(&pool->blk_lock, flags_cpsr);
211
212 return ret;
213 }
214
215
216