1 /*
2 * Copyright (c) 2006-2020, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-10-12 Jesven first version
9 * 2023-02-20 wangxiaoyao adapt to mm
10 */
11 #include <rthw.h>
12 #include <rtthread.h>
13
14 #ifdef ARCH_MM_MMU
15 #include <lwp.h>
16 #include <lwp_shm.h>
17 #include <lwp_mm.h>
18
19 #include <lwp_user_mm.h>
20 #include <mmu.h>
21
22 /* the kernel structure to represent a share-memory */
23 struct lwp_shm_struct
24 {
25 struct rt_mem_obj mem_obj;
26 size_t addr; /* point to the next item in the free list when not used */
27 size_t size;
28 int ref;
29 size_t key;
30 };
31
32 static struct lwp_avl_struct *shm_tree_key;
33 static struct lwp_avl_struct *shm_tree_pa;
34
35 static int shm_free_list = -1; /* the single-direct list of freed items */
36 static int shm_id_used = 0; /* the latest allocated item in the array */
37 static struct lwp_shm_struct _shm_ary[RT_LWP_SHM_MAX_NR];
38
get_shm_name(rt_varea_t varea)39 static const char *get_shm_name(rt_varea_t varea)
40 {
41 return "user.shm";
42 }
43
on_shm_varea_open(struct rt_varea * varea)44 static void on_shm_varea_open(struct rt_varea *varea)
45 {
46 struct lwp_shm_struct *shm;
47 shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
48 shm->ref += 1;
49 }
50
on_shm_varea_close(struct rt_varea * varea)51 static void on_shm_varea_close(struct rt_varea *varea)
52 {
53 struct lwp_shm_struct *shm;
54 shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
55 shm->ref -= 1;
56 }
57
on_shm_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)58 static void on_shm_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
59 {
60 struct lwp_shm_struct *shm;
61 int err;
62 shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
63
64 /* map all share page frames to user space in a time */
65 void *page = (void *)shm->addr;
66 void *pg_paddr = (char *)page + PV_OFFSET;
67 err = rt_varea_map_range(varea, varea->start, pg_paddr, shm->size);
68
69 if (err == RT_EOK)
70 {
71 msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
72 msg->response.size = shm->size;
73 msg->response.vaddr = page;
74 }
75
76 return ;
77 }
78
79 /*
80 * Try to allocate an structure 'lwp_shm_struct' from the freed list or the
81 * static array.
82 */
_shm_id_alloc(void)83 static int _shm_id_alloc(void)
84 {
85 int id = -1;
86
87 if (shm_free_list != -1) /* first try the freed list */
88 {
89 id = shm_free_list;
90 shm_free_list = (int)_shm_ary[shm_free_list].addr; /* single-direction */
91 }
92 else if (shm_id_used < RT_LWP_SHM_MAX_NR) /* then try the array */
93 {
94 id = shm_id_used;
95 shm_id_used++;
96 }
97 return id;
98 }
99
100 /* Release the item in the static array to the freed list. */
shm_id_free(int id)101 static void shm_id_free(int id)
102 {
103 /* link the freed itme to the single-direction list */
104 _shm_ary[id].addr = (size_t)shm_free_list;
105 shm_free_list = id;
106 }
107
108 /* Locate the shared memory through 'key' or create a new one. */
_lwp_shmget(size_t key,size_t size,int create)109 static int _lwp_shmget(size_t key, size_t size, int create)
110 {
111 int id = -1;
112 struct lwp_avl_struct *node_key = 0;
113 struct lwp_avl_struct *node_pa = 0;
114 void *page_addr = 0;
115 uint32_t bit = 0;
116
117 /* try to locate the item with the key in the binary tree */
118 node_key = lwp_avl_find(key, shm_tree_key);
119 if (node_key)
120 {
121 return (struct lwp_shm_struct *)node_key->data - _shm_ary; /* the index */
122 }
123
124 /* If there doesn't exist such an item and we're allowed to create one ... */
125 if (create)
126 {
127 struct lwp_shm_struct* p;
128
129 if (!size)
130 {
131 goto err;
132 }
133
134 id = _shm_id_alloc();
135 if (id == -1)
136 {
137 goto err;
138 }
139
140 /* allocate pages up to 2's exponent to cover the required size */
141 bit = rt_page_bits(size);
142 page_addr = rt_pages_alloc_ext(bit, PAGE_ANY_AVAILABLE); /* virtual address */
143 if (!page_addr)
144 {
145 goto err;
146 }
147
148 /* initialize the shared memory structure */
149 p = _shm_ary + id;
150 p->addr = (size_t)page_addr;
151 p->size = (1UL << (bit + ARCH_PAGE_SHIFT));
152 p->ref = 0;
153 p->key = key;
154 p->mem_obj.get_name = get_shm_name;
155 p->mem_obj.on_page_fault = on_shm_page_fault;
156 p->mem_obj.on_varea_open = on_shm_varea_open;
157 p->mem_obj.on_varea_close = on_shm_varea_close;
158 p->mem_obj.hint_free = NULL;
159
160 /* then insert it into the balancing binary tree */
161 node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);
162 if (!node_key)
163 {
164 goto err;
165 }
166 node_key->avl_key = p->key;
167 node_key->data = (void *)p;
168 lwp_avl_insert(node_key, &shm_tree_key);
169 node_pa = node_key + 1;
170 node_pa->avl_key = p->addr;
171 node_pa->data = (void *)p;
172 lwp_avl_insert(node_pa, &shm_tree_pa);
173 }
174 return id;
175
176 err:
177 if (id != -1)
178 {
179 shm_id_free(id);
180 }
181 if (page_addr)
182 {
183 rt_pages_free(page_addr, bit);
184 }
185 if (node_key)
186 {
187 rt_free(node_key);
188 }
189 return -1;
190 }
191
192 /* A wrapping function, get the shared memory with interrupts disabled. */
lwp_shmget(size_t key,size_t size,int create)193 int lwp_shmget(size_t key, size_t size, int create)
194 {
195 int ret = 0;
196
197 rt_mm_lock();
198 ret = _lwp_shmget(key, size, create);
199 rt_mm_unlock();
200 return ret;
201 }
202
203 /* Locate the binary tree node_key corresponding to the shared-memory id. */
shm_id_to_node(int id)204 static struct lwp_avl_struct *shm_id_to_node(int id)
205 {
206 struct lwp_avl_struct *node_key = 0;
207 struct lwp_shm_struct *p = RT_NULL;
208
209 /* check id */
210 if (id < 0 || id >= RT_LWP_SHM_MAX_NR)
211 {
212 return RT_NULL;
213 }
214
215 p = _shm_ary + id; /* the address of the shared-memory structure */
216 node_key = lwp_avl_find(p->key, shm_tree_key);
217 if (!node_key)
218 {
219 return RT_NULL;
220 }
221 if (node_key->data != (void *)p)
222 {
223 return RT_NULL;
224 }
225 return node_key;
226 }
227
228 /* Free the shared pages, the shared-memory structure and its binary tree node_key. */
_lwp_shmrm(int id)229 static int _lwp_shmrm(int id)
230 {
231 struct lwp_avl_struct *node_key = RT_NULL;
232 struct lwp_avl_struct *node_pa = RT_NULL;
233 struct lwp_shm_struct* p = RT_NULL;
234 uint32_t bit = 0;
235
236 node_key = shm_id_to_node(id);
237 if (!node_key)
238 {
239 return -1;
240 }
241 p = (struct lwp_shm_struct *)node_key->data;
242 if (p->ref)
243 {
244 return 0;
245 }
246 bit = rt_page_bits(p->size);
247 rt_pages_free((void *)p->addr, bit);
248 lwp_avl_remove(node_key, &shm_tree_key);
249 node_pa = node_key + 1;
250 lwp_avl_remove(node_pa, &shm_tree_pa);
251 rt_free(node_key);
252 shm_id_free(id);
253 return 0;
254 }
255
256 /* A wrapping function, free the shared memory with interrupt disabled. */
lwp_shmrm(int id)257 int lwp_shmrm(int id)
258 {
259 int ret = 0;
260
261 ret = _lwp_shmrm(id);
262
263 return ret;
264 }
265
266 /* Map the shared memory specified by 'id' to the specified virtual address. */
_lwp_shmat(int id,void * shm_vaddr)267 static void *_lwp_shmat(int id, void *shm_vaddr)
268 {
269 int err;
270 struct rt_lwp *lwp = RT_NULL;
271 struct lwp_avl_struct *node_key = RT_NULL;
272 struct lwp_shm_struct *p = RT_NULL;
273 void *va = shm_vaddr;
274
275 /* The id is used to locate the node_key in the binary tree, and then get the
276 * shared-memory structure linked to the node_key. We don't use the id to refer
277 * to the shared-memory structure directly, because the binary tree is used
278 * to verify the structure is really in use.
279 */
280 node_key = shm_id_to_node(id);
281 if (!node_key)
282 {
283 return RT_NULL;
284 }
285 p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
286
287 /* map the shared memory into the address space of the current thread */
288 lwp = lwp_self();
289 if (!lwp)
290 {
291 return RT_NULL;
292 }
293
294 err = rt_aspace_map(lwp->aspace, &va, p->size, MMU_MAP_U_RWCB, MMF_PREFETCH,
295 &p->mem_obj, 0);
296 if (err != RT_EOK)
297 {
298 va = RT_NULL;
299 }
300 return va;
301 }
302
303 /* A wrapping function: attach the shared memory to the specified address. */
lwp_shmat(int id,void * shm_vaddr)304 void *lwp_shmat(int id, void *shm_vaddr)
305 {
306 void *ret = RT_NULL;
307
308 if (((size_t)shm_vaddr & ARCH_PAGE_MASK) != 0)
309 {
310 return RT_NULL;
311 }
312
313 ret = _lwp_shmat(id, shm_vaddr);
314
315 return ret;
316 }
317
_lwp_shm_struct_get(struct rt_lwp * lwp,void * shm_vaddr)318 static struct lwp_shm_struct *_lwp_shm_struct_get(struct rt_lwp *lwp, void *shm_vaddr)
319 {
320 void *pa = RT_NULL;
321 struct lwp_avl_struct *node_pa = RT_NULL;
322
323 if (!lwp)
324 {
325 return RT_NULL;
326 }
327 pa = lwp_v2p(lwp, shm_vaddr); /* physical memory */
328
329 node_pa = lwp_avl_find((size_t)pa, shm_tree_pa);
330 if (!node_pa)
331 {
332 return RT_NULL;
333 }
334 return (struct lwp_shm_struct *)node_pa->data;
335 }
336
_lwp_shm_ref_inc(struct rt_lwp * lwp,void * shm_vaddr)337 static int _lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
338 {
339 struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
340
341 if (p)
342 {
343 p->ref++;
344 return p->ref;
345 }
346 return -1;
347 }
348
lwp_shm_ref_inc(struct rt_lwp * lwp,void * shm_vaddr)349 int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
350 {
351 int ret = 0;
352
353 rt_mm_lock();
354 ret = _lwp_shm_ref_inc(lwp, shm_vaddr);
355 rt_mm_unlock();
356
357 return ret;
358 }
359
_lwp_shm_ref_dec(struct rt_lwp * lwp,void * shm_vaddr)360 static int _lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
361 {
362 struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
363
364 if (p && (p->ref > 0))
365 {
366 p->ref--;
367 return p->ref;
368 }
369 return -1;
370 }
371
lwp_shm_ref_dec(struct rt_lwp * lwp,void * shm_vaddr)372 int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
373 {
374 int ret = 0;
375
376 rt_mm_lock();
377 ret = _lwp_shm_ref_dec(lwp, shm_vaddr);
378 rt_mm_unlock();
379
380 return ret;
381 }
382
383 /* Unmap the shared memory from the address space of the current thread. */
_lwp_shmdt(void * shm_vaddr)384 int _lwp_shmdt(void *shm_vaddr)
385 {
386 struct rt_lwp *lwp = RT_NULL;
387 int ret = 0;
388
389 lwp = lwp_self();
390 if (!lwp)
391 {
392 return -1;
393 }
394
395 ret = rt_aspace_unmap(lwp->aspace, shm_vaddr);
396 if (ret != RT_EOK)
397 {
398 ret = -1;
399 }
400 return ret;
401 }
402
403 /* A wrapping function: detach the mapped shared memory. */
lwp_shmdt(void * shm_vaddr)404 int lwp_shmdt(void *shm_vaddr)
405 {
406 int ret = 0;
407
408 rt_mm_lock();
409 ret = _lwp_shmdt(shm_vaddr);
410 rt_mm_unlock();
411
412 return ret;
413 }
414
415 /* Get the virtual address of a shared memory in kernel. */
_lwp_shminfo(int id)416 void *_lwp_shminfo(int id)
417 {
418 struct lwp_avl_struct *node_key = RT_NULL;
419 struct lwp_shm_struct *p = RT_NULL;
420
421 /* the share memory is in use only if it exsits in the binary tree */
422 node_key = shm_id_to_node(id);
423 if (!node_key)
424 {
425 return RT_NULL;
426 }
427 p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
428
429 return (void *)((char *)p->addr - PV_OFFSET); /* get the virtual address */
430 }
431
432 /* A wrapping function: get the virtual address of a shared memory. */
lwp_shminfo(int id)433 void *lwp_shminfo(int id)
434 {
435 void *vaddr = RT_NULL;
436
437 rt_mm_lock();
438 vaddr = _lwp_shminfo(id);
439 rt_mm_unlock();
440 return vaddr;
441 }
442
443 #ifdef RT_USING_FINSH
_shm_info(struct lwp_avl_struct * node_key,void * data)444 static int _shm_info(struct lwp_avl_struct* node_key, void *data)
445 {
446 int id = 0;
447 struct lwp_shm_struct* p = (struct lwp_shm_struct *)node_key->data;
448
449 id = p - _shm_ary;
450 rt_kprintf("0x%08x 0x%08x 0x%08x %8d\n", p->key, p->addr, p->size, id);
451 return 0;
452 }
453
list_shm(void)454 void list_shm(void)
455 {
456 rt_kprintf(" key paddr size id\n");
457 rt_kprintf("---------- ---------- ---------- --------\n");
458 rt_mm_lock();
459 lwp_avl_traversal(shm_tree_key, _shm_info, NULL);
460 rt_mm_unlock();
461 }
462 MSH_CMD_EXPORT(list_shm, show share memory info);
463 #endif
464
465 #endif
466