1 /*
2 * Copyright (c) 2006-2025 RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 */
9
10 #include "dfs_file.h"
11 #include "dfs_dentry.h"
12 #include "dfs_mnt.h"
13
14 #define DBG_TAG "dfs.mmap"
15 #define DBG_LVL DBG_WARNING
16 #include <rtdbg.h>
17
18 #if defined(RT_USING_SMART) && defined(ARCH_MM_MMU) && defined(RT_USING_PAGECACHE)
19
20 #include "dfs_pcache.h"
21
22 #include <lwp.h>
23
24 #include <sys/mman.h>
25
26 #include <lwp_user_mm.h>
27 #include <mm_aspace.h>
28 #include <mm_fault.h>
29 #include <mm_flag.h>
30 #include <mm_page.h>
31 #include <mmu.h>
32 #include <page.h>
33 #include <tlb.h>
34
35 static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file);
36 static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj);
37
38 /**
39 * @brief Perform memory mapping operation
40 *
41 * @param[in] lwp Pointer to the lightweight process structure
42 * @param[in] map_vaddr Requested virtual address for mapping (may be NULL)
43 * @param[in] map_size Size of the memory region to map
44 * @param[in] attr Memory attributes for the mapping
45 * @param[in] flags Memory mapping flags
46 * @param[in] pgoffset Offset in pages from the start of the memory object
47 * @param[in] data Pointer to the file descriptor to be mapped
48 * @param[out] code Pointer to store the operation result code
49 *
50 * @return void* The mapped virtual address on success, NULL on failure
51 *
52 * @note This is a low-level mapping function that interacts directly with the address space manager.
53 * The actual mapping is performed by rt_aspace_map().
54 */
_do_mmap(struct rt_lwp * lwp,void * map_vaddr,size_t map_size,size_t attr,mm_flag_t flags,off_t pgoffset,void * data,rt_err_t * code)55 static void *_do_mmap(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr,
56 mm_flag_t flags, off_t pgoffset, void *data, rt_err_t *code)
57 {
58 int ret = 0;
59 void *vaddr = map_vaddr;
60 rt_mem_obj_t mem_obj = dfs_get_mem_obj(data);
61
62 ret = rt_aspace_map(lwp->aspace, &vaddr, map_size,
63 attr, flags, mem_obj, pgoffset);
64 if (ret != RT_EOK)
65 {
66 vaddr = RT_NULL;
67 LOG_E("failed to map %lx with size %lx with errno %d", map_vaddr,
68 map_size, ret);
69 }
70
71 if (code)
72 {
73 *code = ret;
74 }
75
76 return vaddr;
77 }
78
79 /**
80 * @brief Map data to user space address
81 *
82 * @param[in,out] mmap2 Pointer to memory mapping arguments structure
83 * - Input: Contains mapping parameters (addr, length, etc.)
84 * - Output: Contains the mapped address in ret field if successful
85 * @param[in] data Pointer to the file descriptor to be mapped
86 * @param[out] code Pointer to store the error code if mapping fails
87 *
88 * @return void* The mapped virtual address on success, NULL on failure
89 *
90 * @note This function performs page alignment on the mapping parameters and
91 * converts user-space flags/attributes to kernel-space before mapping.
92 */
_map_data_to_uspace(struct dfs_mmap2_args * mmap2,void * data,rt_err_t * code)93 static void *_map_data_to_uspace(struct dfs_mmap2_args *mmap2, void *data, rt_err_t *code)
94 {
95 size_t offset = 0;
96 void *map_vaddr = mmap2->addr;
97 size_t map_size = mmap2->length;
98 struct rt_lwp *lwp = mmap2->lwp;
99 rt_size_t k_attr;
100 rt_size_t k_flags;
101
102 if (map_size)
103 {
104 offset = (size_t)map_vaddr & ARCH_PAGE_MASK;
105 map_size += (offset + ARCH_PAGE_SIZE - 1);
106 map_size &= ~ARCH_PAGE_MASK;
107 map_vaddr = (void *)((size_t)map_vaddr & ~ARCH_PAGE_MASK);
108
109 k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
110 k_flags = MMF_CREATE(k_flags, mmap2->min_align_size);
111 k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);
112
113 map_vaddr = _do_mmap(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data, code);
114 }
115
116 return map_vaddr;
117 }
118
hint_free(rt_mm_va_hint_t hint)119 static void hint_free(rt_mm_va_hint_t hint)
120 {
121 }
122
123 /**
124 * @brief Handle page fault for memory mapped file
125 *
126 * @param[in] varea Pointer to the virtual memory area structure
127 * @param[in,out] msg Pointer to the page fault message structure
128 * - Input: Contains fault information (fault_vaddr, etc.)
129 * - Output: Contains response status and mapped page address
130 *
131 * @note This function is called when a page fault occurs in a memory mapped file region.
132 * It attempts to map the faulting page and updates the response accordingly.
133 */
on_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)134 static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
135 {
136 void *page;
137 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
138
139 if (file)
140 {
141 LOG_I("%s varea: %p", __func__, varea);
142 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
143 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
144 LOG_I("fault vaddr: %p", msg->fault_vaddr);
145
146 if (file->dentry)
147 {
148 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
149 }
150
151 page = dfs_aspace_mmap(file, varea, msg->fault_vaddr);
152 if (page)
153 {
154 msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
155 msg->response.size = ARCH_PAGE_SIZE;
156 msg->response.vaddr = page;
157 }
158 else
159 {
160 LOG_E("%s varea %p mmap failed at vaddr %p", __func__, varea, msg->fault_vaddr);
161 }
162 }
163 else
164 {
165 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
166 }
167 }
168
169 /**
170 * @brief Handle virtual memory area opening event
171 *
172 * @param[in] varea Pointer to the virtual memory area structure
173 *
174 * @note This function is called when a virtual memory area is opened.
175 * It increments the reference count of the associated file and
176 * initializes varea->data to NULL.
177 */
on_varea_open(struct rt_varea * varea)178 static void on_varea_open(struct rt_varea *varea)
179 {
180 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
181 varea->data = RT_NULL;
182 rt_atomic_add(&(file->ref_count), 1);
183 }
184
185 /**
186 * @brief Handle virtual memory area closing event
187 *
188 * @param[in] varea Pointer to the virtual memory area structure
189 *
190 * @note This function is called when a virtual memory area is closed.
191 * It performs cleanup operations including:
192 * - Unmapping the file from memory
193 * - Decrementing file reference count
194 * - Closing and destroying file if reference count reaches zero
195 */
on_varea_close(struct rt_varea * varea)196 static void on_varea_close(struct rt_varea *varea)
197 {
198 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
199
200 if (file)
201 {
202 LOG_I("%s varea: %p", __func__, varea);
203 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
204 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
205
206 if (file->dentry)
207 {
208 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
209 }
210
211 dfs_aspace_unmap(file, varea);
212 dfs_file_lock();
213 if (rt_atomic_load(&(file->ref_count)) == 1)
214 {
215 dfs_file_close(file);
216 dfs_file_destroy(file);
217 }
218 else
219 {
220 rt_atomic_sub(&(file->ref_count), 1);
221 }
222 dfs_file_unlock();
223 }
224 else
225 {
226 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
227 }
228 }
229
230 /**
231 * @brief Get the name of the memory mapped file
232 *
233 * @param[in] varea Pointer to the virtual memory area structure
234 *
235 * @return const char* The name of the mapped file if available,
236 * otherwise returns "file-mapper" as default name
237 *
238 * @note This function retrieves the file name from the dentry structure
239 * associated with the memory mapped file.
240 */
get_name(rt_varea_t varea)241 static const char *get_name(rt_varea_t varea)
242 {
243 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
244
245 return (file && file->dentry) ? file->dentry->pathname : "file-mapper";
246 }
247
248 /**
249 * @brief Read data from memory mapped file page
250 *
251 * @param[in] varea Pointer to the virtual memory area structure
252 * @param[in,out] msg Pointer to the I/O message structure
253 * - Input: Contains read request information
254 * - Output: Contains response status and read data
255 *
256 * @note This function handles page read operations for memory mapped files.
257 * If the read size is less than page size, it zero-fills the remaining space.
258 */
page_read(struct rt_varea * varea,struct rt_aspace_io_msg * msg)259 void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
260 {
261 rt_ubase_t ret;
262 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
263
264 if (file)
265 {
266 LOG_I("%s varea: %p", __func__, varea);
267 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
268 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
269
270 ret = dfs_aspace_mmap_read(file, varea, msg);
271 if (ret >= 0)
272 {
273 msg->response.status = MM_FAULT_STATUS_OK;
274 if (ret < ARCH_PAGE_SIZE)
275 {
276 memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
277 }
278 }
279 }
280 else
281 {
282 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
283 }
284 }
285
286 /**
287 * @brief Write data to memory mapped file page
288 *
289 * @param[in] varea Pointer to the virtual memory area structure
290 * @param[in,out] msg Pointer to the I/O message structure
291 * - Input: Contains write request information
292 * - Output: Contains response status and write result
293 *
294 * @note This function handles page write operations for memory mapped files.
295 * If the write size is less than page size, it zero-fills the remaining space.
296 */
page_write(struct rt_varea * varea,struct rt_aspace_io_msg * msg)297 void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
298 {
299 rt_ubase_t ret;
300 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
301
302 if (file)
303 {
304 LOG_I("%s varea: %p", __func__, varea);
305 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
306 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
307
308 ret = dfs_aspace_mmap_write(file, varea, msg);
309 if (ret > 0)
310 {
311 msg->response.status = MM_FAULT_STATUS_OK;
312 if (ret < ARCH_PAGE_SIZE)
313 {
314 memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
315 }
316 }
317 }
318 else
319 {
320 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
321 }
322 }
323
324 /**
325 * @brief Unmap pages from virtual memory area
326 *
327 * @param[in] varea Pointer to the virtual memory area structure
328 * @param[in] rm_start Starting address of the range to unmap (must be page aligned)
329 * @param[in] rm_end Ending address of the range to unmap (must be page aligned)
330 *
331 * @return rt_err_t Error code:
332 * - RT_EOK: Success
333 * - -RT_ERROR: Failure (varea not associated with a file)
334 *
335 * @note This function performs page-by-page unmapping.
336 * Both rm_start and rm_end must be page-aligned (checked by RT_ASSERT).
337 */
unmap_pages(rt_varea_t varea,void * rm_start,void * rm_end)338 static rt_err_t unmap_pages(rt_varea_t varea, void *rm_start, void *rm_end)
339 {
340 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
341
342 if (file)
343 {
344 LOG_I("%s varea: %p start: %p end: %p", __func__, varea, rm_start, rm_end);
345
346 RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
347 RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
348 while (rm_start != rm_end)
349 {
350 dfs_aspace_page_unmap(file, varea, rm_start);
351 rm_start += ARCH_PAGE_SIZE;
352 }
353
354 return RT_EOK;
355 }
356 else
357 {
358 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
359 }
360
361 return -RT_ERROR;
362 }
363
364 /**
365 * @brief Handle virtual memory area shrinking operation
366 *
367 * @param[in] varea Pointer to the virtual memory area structure
368 * @param[in] new_vaddr New starting address after shrinking
369 * @param[in] size New size of the virtual memory area
370 *
371 * @return rt_err_t Error code:
372 * - RT_EOK: Success
373 * - Other errors from unmap_pages()
374 *
375 * @note This function determines the range of pages to unmap based on whether
376 * the varea is shrinking from the start or end.
377 */
on_varea_shrink(struct rt_varea * varea,void * new_vaddr,rt_size_t size)378 rt_err_t on_varea_shrink(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
379 {
380 char *varea_start = varea->start;
381 void *rm_start;
382 void *rm_end;
383
384 LOG_I("%s varea: %p", __func__, varea);
385 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
386 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
387 LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
388
389 if (varea_start == (char *)new_vaddr)
390 {
391 rm_start = varea_start + size;
392 rm_end = varea_start + varea->size;
393 }
394 else
395 {
396 rm_start = varea_start;
397 rm_end = new_vaddr;
398 }
399
400 return unmap_pages(varea, rm_start, rm_end);
401 }
402
403 /**
404 * @brief Handle virtual memory area expansion operation
405 *
406 * @param[in] varea Pointer to the virtual memory area structure
407 * @param[in] new_vaddr New starting address after expansion
408 * @param[in] size New size of the expanded virtual memory area
409 *
410 * @return rt_err_t returns RT_EOK (success).
411 *
412 * @note This function is currently not implemented.
413 */
on_varea_expand(struct rt_varea * varea,void * new_vaddr,rt_size_t size)414 rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
415 {
416 LOG_I("%s varea: %p", __func__, varea);
417 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
418 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
419 LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
420
421 return RT_EOK;
422 }
423
424 /**
425 * @brief Handle virtual memory area splitting operation
426 *
427 * @param[in] existed Pointer to the existing virtual memory area to be split
428 * @param[in] unmap_start Starting address of the range to unmap
429 * @param[in] unmap_len Length of the range to unmap
430 * @param[in,out] subset Pointer to the new subset virtual memory area
431 * - Input: Contains new varea parameters
432 * - Output: Contains initialized varea after splitting
433 *
434 * @return rt_err_t Error code:
435 * - RT_EOK: Success
436 * - -RT_ERROR: Failure (varea not associated with a file)
437 *
438 * @note This function splits an existing virtual memory area into two parts.
439 * It unmaps the specified range and initializes the new subset area.
440 */
on_varea_split(struct rt_varea * existed,void * unmap_start,rt_size_t unmap_len,struct rt_varea * subset)441 rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
442 {
443 rt_err_t rc;
444 struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
445
446 if (file)
447 {
448 LOG_I("%s varea: %p", __func__, existed);
449 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
450 existed->start, existed->size, existed->offset, existed->attr, existed->flag);
451 LOG_I("unmap_start: %p unmap_len: %p", unmap_start, unmap_len);
452
453 if (file->dentry)
454 {
455 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
456 }
457
458 rc = unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
459 if (!rc)
460 {
461 rc = unmap_pages(existed, subset->start, (char *)subset->start + subset->size);
462 if (!rc)
463 on_varea_open(subset);
464 }
465
466 return rc;
467 }
468 else
469 {
470 LOG_E("%s varea %p not a file, vaddr %p", __func__, existed, existed->start);
471 }
472
473 return -RT_ERROR;
474 }
475
476 /**
477 * @brief Handle virtual memory area merging operation
478 *
479 * @param[in] merge_to Pointer to the target virtual memory area that will receive the merge
480 * @param[in] merge_from Pointer to the source virtual memory area to be merged
481 *
482 * @return rt_err_t Error code:
483 * - RT_EOK: Success
484 * - -RT_ERROR: Failure (varea not associated with a file)
485 */
on_varea_merge(struct rt_varea * merge_to,struct rt_varea * merge_from)486 rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
487 {
488 struct dfs_file *file = dfs_mem_obj_get_file(merge_from->mem_obj);
489
490 if (file)
491 {
492 LOG_I("%s varea: %p", __func__, merge_from);
493 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
494 merge_from->start, merge_from->size, merge_from->offset, merge_from->attr, merge_from->flag);
495
496 if (file->dentry)
497 {
498 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
499 }
500
501 dfs_aspace_unmap(file, merge_from);
502 on_varea_close(merge_from);
503
504 return RT_EOK;
505 }
506 else
507 {
508 LOG_E("%s varea %p not a file, vaddr %p", __func__, merge_from, merge_from->start);
509 }
510
511 return -RT_ERROR;
512 }
513
514 /**
515 * @brief Handle virtual memory area remapping operation
516 *
517 * @param[in] varea Pointer to the virtual memory area structure
518 * @param[in] new_size New size of the virtual memory area after remapping
519 * @param[in] flags Remapping flags (e.g., MREMAP_MAYMOVE)
520 * @param[in] new_address New starting address after remapping (optional)
521 *
522 * @return void* Pointer to the new virtual memory area after remapping
523 * - Returns RT_NULL if remapping fails
524 *
525 * @note This function remaps a virtual memory area to a new address or size.
526 * It currently supports the MREMAP_MAYMOVE flag.
527 */
on_varea_mremap(struct rt_varea * varea,rt_size_t new_size,int flags,void * new_address)528 void *on_varea_mremap(struct rt_varea *varea, rt_size_t new_size, int flags, void *new_address)
529 {
530 void *vaddr = RT_NULL;
531 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
532
533 #ifndef MREMAP_MAYMOVE
534 #define MREMAP_MAYMOVE 1
535 #endif
536
537 if (file && flags == MREMAP_MAYMOVE)
538 {
539 int ret;
540 rt_mem_obj_t mem_obj = dfs_get_mem_obj(file);
541
542 vaddr = new_address ? new_address : varea->start;
543 new_size = (new_size + ARCH_PAGE_SIZE - 1);
544 new_size &= ~ARCH_PAGE_MASK;
545 ret = rt_aspace_map(varea->aspace, &vaddr, new_size, varea->attr, varea->flag, mem_obj, varea->offset);
546 if (ret != RT_EOK)
547 {
548 LOG_E("failed to map %lx with size %lx with errno %d", vaddr, new_size, ret);
549 vaddr = RT_NULL;
550 }
551 else
552 {
553 LOG_I("old: %p size: %p new: %p size: %p", varea->start, varea->size, vaddr, new_size);
554 }
555 }
556
557 return vaddr;
558 }
559
560 /**
561 * @brief Memory object operations structure
562 *
563 * Defines function pointers for various virtual memory area (varea) operations,
564 * including memory management, page fault handling, and lifecycle callbacks.
565 */
566 static struct rt_mem_obj _mem_obj =
567 {
568 .hint_free = hint_free, /* Free memory hint function */
569 .on_page_fault = on_page_fault, /* Page fault handler */
570 .on_varea_open = on_varea_open, /* Varea open callback */
571 .on_varea_close = on_varea_close, /* Varea close callback */
572 .get_name = get_name, /* Get mapped file name */
573
574 .page_read = page_read, /* Page read operation */
575 .page_write = page_write, /* Page write operation */
576
577 .on_varea_shrink = on_varea_shrink, /* Varea shrink handler */
578 .on_varea_expand = on_varea_expand, /* Varea expand handler */
579 .on_varea_split = on_varea_split, /* Varea split handler */
580 .on_varea_merge = on_varea_merge, /* Varea merge handler */
581
582 .on_varea_mremap = on_varea_mremap, /* Varea remap handler */
583 };
584
585 /**
586 * @brief DFS memory object structure
587 *
588 * Contains a standard memory object and an associated file pointer,
589 * used to maintain the relationship between memory mappings and files.
590 */
591 struct dfs_mem_obj {
592 struct rt_mem_obj mem_obj; /* Base memory object */
593 void *file; /* Associated file pointer */
594 };
595
596 /**
597 * @brief Get or create memory mapping object for a file
598 *
599 * @param[in] file Pointer to the file descriptor structure
600 *
601 * @return rt_mem_obj_t Memory mapping object associated with the file
602 * - Returns existing object if already created
603 * - Creates and initializes new object if not exists
604 */
dfs_get_mem_obj(struct dfs_file * file)605 static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file)
606 {
607 rt_mem_obj_t mobj = file->mmap_context;
608 if (!mobj)
609 {
610 struct dfs_mem_obj *dfs_mobj;
611 dfs_file_lock();
612 dfs_mobj = rt_malloc(sizeof(*dfs_mobj));
613 if (dfs_mobj)
614 {
615 dfs_mobj->file = file;
616 mobj = &dfs_mobj->mem_obj;
617 memcpy(mobj, &_mem_obj, sizeof(*mobj));
618 file->mmap_context = mobj;
619 }
620 dfs_file_unlock();
621 }
622 return mobj;
623 }
624
625 /**
626 * @brief Get the file descriptor from memory mapping object
627 *
628 * @param[in] mem_obj Pointer to the memory mapping object
629 *
630 * @return void* Pointer to the associated file descriptor structure
631 *
632 * @note This function uses rt_container_of macro to get the containing
633 * dfs_mem_obj structure from its mem_obj member.
634 */
dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)635 static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)
636 {
637 struct dfs_mem_obj *dfs_mobj;
638 dfs_mobj = rt_container_of(mem_obj, struct dfs_mem_obj, mem_obj);
639 return dfs_mobj->file;
640 }
641
642 /**
643 * @brief Map a file into memory
644 *
645 * @param[in] file Pointer to the file descriptor structure
646 * @param[in,out] mmap2 Pointer to memory mapping arguments structure
647 * - Input: Contains mapping parameters (addr, length, etc.)
648 * - Output: Contains the mapped address in ret field if successful
649 *
650 * @return int Error code:
651 * - EINVAL: Invalid parameters
652 * - Other errors from underlying mapping operations
653 *
654 * @note This function creates a virtual address area in user space (lwp) for the file mapping.
655 * The actual mapping is performed by _map_data_to_uspace().
656 */
dfs_file_mmap(struct dfs_file * file,struct dfs_mmap2_args * mmap2)657 int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
658 {
659 rt_err_t ret = -EINVAL;
660 void *map_vaddr;
661
662 LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
663 mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
664 if (file && file->vnode)
665 {
666 if (file->vnode->aspace)
667 {
668 /* create a va area in user space (lwp) */
669 map_vaddr = _map_data_to_uspace(mmap2, file, &ret);
670 if (map_vaddr)
671 {
672 mmap2->ret = map_vaddr;
673 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
674 }
675 }
676 else
677 {
678 LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
679 }
680 }
681
682 return ret;
683 }
684 #else
dfs_file_mmap(struct dfs_file * file,struct dfs_mmap2_args * mmap2)685 int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
686 {
687 LOG_E("File mapping support is not enabled, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
688 LOG_E("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
689 mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
690
691 return -EPERM;
692 }
693 #endif
694