1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2022-11-30     WangXiaoyao  the first version
9  * 2023-08-19     Shell        Support varea modification handler
10  * 2023-10-13     Shell        Replace the page management algorithm of pgmgr
11  */
12 
13 #define DBG_TAG "mm.object"
14 #define DBG_LVL DBG_INFO
15 #include <rtdbg.h>
16 
17 #include <rtthread.h>
18 
19 #include "mm_aspace.h"
20 #include "mm_fault.h"
21 #include "mm_page.h"
22 #include <mmu.h>
23 
24 #include <string.h>
25 #include <stdlib.h>
26 
27 /** varea based dummy memory object whose data comes directly from page frame */
28 
get_name(rt_varea_t varea)29 static const char *get_name(rt_varea_t varea)
30 {
31     return "dummy-mapper";
32 }
33 
on_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)34 static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
35 {
36     void *page;
37     int affid = RT_PAGE_PICK_AFFID(msg->fault_vaddr);
38     page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
39 
40     if (!page)
41     {
42         LOG_W("%s: page alloc failed", __func__);
43         return;
44     }
45 
46     msg->response.status = MM_FAULT_STATUS_OK;
47     msg->response.size = ARCH_PAGE_SIZE;
48     msg->response.vaddr = page;
49 }
50 
on_varea_open(struct rt_varea * varea)51 static void on_varea_open(struct rt_varea *varea)
52 {
53     varea->data = NULL;
54 }
55 
on_varea_close(struct rt_varea * varea)56 static void on_varea_close(struct rt_varea *varea)
57 {
58 }
59 
on_varea_expand(struct rt_varea * varea,void * new_vaddr,rt_size_t size)60 static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
61 {
62     return RT_EOK;
63 }
64 
on_varea_shrink(rt_varea_t varea,void * new_start,rt_size_t size)65 static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
66 {
67     return RT_EOK;
68 }
69 
on_varea_split(struct rt_varea * existed,void * unmap_start,rt_size_t unmap_len,struct rt_varea * subset)70 static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
71 {
72     return RT_EOK;
73 }
74 
on_varea_merge(struct rt_varea * merge_to,struct rt_varea * merge_from)75 static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
76 {
77     return RT_EOK;
78 }
79 
page_read(struct rt_varea * varea,struct rt_aspace_io_msg * msg)80 static void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
81 {
82     char *dst_k;
83     rt_aspace_t aspace = varea->aspace;
84     dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
85     if (dst_k != ARCH_MAP_FAILED)
86     {
87         RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
88         dst_k = (void *)((char *)dst_k - PV_OFFSET);
89         memcpy(msg->buffer_vaddr, dst_k, ARCH_PAGE_SIZE);
90         msg->response.status = MM_FAULT_STATUS_OK;
91     }
92 }
93 
page_write(struct rt_varea * varea,struct rt_aspace_io_msg * msg)94 static void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
95 {
96     void *dst_k;
97     rt_aspace_t aspace = varea->aspace;
98     dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
99     if (dst_k != ARCH_MAP_FAILED)
100     {
101         RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
102         dst_k = (void *)((char *)dst_k - PV_OFFSET);
103         memcpy(dst_k, msg->buffer_vaddr, ARCH_PAGE_SIZE);
104         msg->response.status = MM_FAULT_STATUS_OK;
105     }
106 }
107 
108 struct rt_mem_obj rt_mm_dummy_mapper = {
109     .get_name = get_name,
110     .on_page_fault = on_page_fault,
111     .hint_free = NULL,
112     .on_varea_open = on_varea_open,
113     .on_varea_close = on_varea_close,
114 
115     .on_varea_shrink = on_varea_shrink,
116     .on_varea_split = on_varea_split,
117     .on_varea_expand = on_varea_expand,
118     .on_varea_merge = on_varea_merge,
119 
120     .page_write = page_write,
121     .page_read = page_read,
122 };
123