1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2023-05-05     RTT          Implement mnt in dfs v2.0
9  * 2023-10-23     Shell        fix synchronization of data to icache
10  */
11 
12 #define DBG_TAG "dfs.pcache"
13 #define DBG_LVL DBG_WARNING
14 #include <rtdbg.h>
15 
16 #include <dfs_pcache.h>
17 #include <dfs_dentry.h>
18 #include <dfs_mnt.h>
19 
20 #include <rthw.h>
21 
22 #ifdef RT_USING_PAGECACHE
23 
24 #include <mm_page.h>
25 #include <mm_private.h>
26 #include <mmu.h>
27 #include <tlb.h>
28 
29 #ifndef RT_PAGECACHE_COUNT
30 #define RT_PAGECACHE_COUNT          4096
31 #endif
32 
33 #ifndef RT_PAGECACHE_ASPACE_COUNT
34 #define RT_PAGECACHE_ASPACE_COUNT   1024
35 #endif
36 
37 #ifndef RT_PAGECACHE_PRELOAD
38 #define RT_PAGECACHE_PRELOAD        4
39 #endif
40 
41 #ifndef RT_PAGECACHE_GC_WORK_LEVEL
42 #define RT_PAGECACHE_GC_WORK_LEVEL  90
43 #endif
44 
45 #ifndef RT_PAGECACHE_GC_STOP_LEVEL
46 #define RT_PAGECACHE_GC_STOP_LEVEL  70
47 #endif
48 
49 #define PCACHE_MQ_GC    1
50 #define PCACHE_MQ_WB    2
51 
52 struct dfs_aspace_mmap_obj
53 {
54     rt_uint32_t cmd;
55     struct rt_mailbox *ack;
56     struct dfs_file *file;
57     struct rt_varea *varea;
58     void *data;
59 };
60 
61 struct dfs_pcache_mq_obj
62 {
63     struct rt_mailbox *ack;
64     rt_uint32_t cmd;
65 };
66 
67 static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos);
68 static void dfs_page_ref(struct dfs_page *page);
69 static int dfs_page_inactive(struct dfs_page *page);
70 static int dfs_page_remove(struct dfs_page *page);
71 static void dfs_page_release(struct dfs_page *page);
72 static int dfs_page_dirty(struct dfs_page *page);
73 
74 static int dfs_aspace_release(struct dfs_aspace *aspace);
75 
76 static int dfs_aspace_lock(struct dfs_aspace *aspace);
77 static int dfs_aspace_unlock(struct dfs_aspace *aspace);
78 
79 static int dfs_pcache_lock(void);
80 static int dfs_pcache_unlock(void);
81 
82 
83 static struct dfs_pcache __pcache;
84 
85 /**
86  * @brief Perform garbage collection on an address space to release pages
87  *
88  * This function attempts to release a specified number of pages from both inactive
89  * and active lists of the given address space. It prioritizes releasing pages from
90  * the inactive list first before moving to the active list.
91  *
92  * @param[in] aspace Pointer to the address space structure to perform GC on
93  * @param[in] count  Number of pages to attempt to release
94  *
95  * @return Number of pages actually released (count - remaining)
96  */
dfs_aspace_gc(struct dfs_aspace * aspace,int count)97 static int dfs_aspace_gc(struct dfs_aspace *aspace, int count)
98 {
99     int cnt = count;
100 
101     if (aspace)
102     {
103         dfs_aspace_lock(aspace);
104 
105         if (aspace->pages_count > 0)
106         {
107             struct dfs_page *page = RT_NULL;
108             rt_list_t *node = aspace->list_inactive.next;
109 
110             while (cnt && node != &aspace->list_active)
111             {
112                 page = rt_list_entry(node, struct dfs_page, space_node);
113                 node = node->next;
114                 if (dfs_page_remove(page) == 0)
115                 {
116                     cnt --;
117                 }
118             }
119 
120             node = aspace->list_active.next;
121             while (cnt && node != &aspace->list_inactive)
122             {
123                 page = rt_list_entry(node, struct dfs_page, space_node);
124                 node = node->next;
125                 if (dfs_page_remove(page) == 0)
126                 {
127                     cnt --;
128                 }
129             }
130         }
131 
132         dfs_aspace_unlock(aspace);
133     }
134 
135     return count - cnt;
136 }
137 
138 /**
139  * @brief Release page cache entries to free up memory
140  *
141  * This function attempts to release a specified number of page cache entries.
142  * If count is 0, it calculates the number of pages to release based on the
143  * current cache size and GC stop level. It first tries to release from inactive
144  * list, then from active list if needed.
145  *
146  * @param[in] count Number of pages to release. If 0, calculates automatically
147  *                  based on current cache size and GC stop level.
148  *
149  * @note The function uses LRU (Least Recently Used) policy by prioritizing
150  *       inactive list over active list.
151  */
dfs_pcache_release(size_t count)152 void dfs_pcache_release(size_t count)
153 {
154     rt_list_t *node = RT_NULL;
155     struct dfs_aspace *aspace = RT_NULL;
156 
157     dfs_pcache_lock();
158 
159     if (count == 0)
160     {
161         count = rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
162     }
163 
164     node = __pcache.list_inactive.next;
165     while (count && node != &__pcache.list_active)
166     {
167         aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
168         node = node->next;
169         if (aspace)
170         {
171             count -= dfs_aspace_gc(aspace, count);
172             dfs_aspace_release(aspace);
173         }
174     }
175 
176     node = __pcache.list_active.next;
177     while (count && node != &__pcache.list_inactive)
178     {
179         aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
180         node = node->next;
181         if (aspace)
182         {
183             count -= dfs_aspace_gc(aspace, count);
184         }
185     }
186 
187     dfs_pcache_unlock();
188 }
189 
190 /**
191  * @brief Clean up page cache entries for a specific mount point
192  *
193  * This function iterates through both inactive and active lists of the page cache
194  * to clean up entries associated with the given mount point. It performs cleanup
195  * and calls the provided callback function for each matching address space.
196  *
197  * @param[in] mnt Pointer to the mount point structure to clean up
198  * @param[in] cb  Callback function to be called for each matching address space
199  *                The callback takes an address space pointer and returns an integer
200  */
_pcache_clean(struct dfs_mnt * mnt,int (* cb)(struct dfs_aspace * aspace))201 static void _pcache_clean(struct dfs_mnt *mnt, int (*cb)(struct dfs_aspace *aspace))
202 {
203     rt_list_t *node = RT_NULL;
204     struct dfs_aspace *aspace = RT_NULL;
205 
206     dfs_pcache_lock();
207 
208     node = __pcache.list_inactive.next;
209     while (node != &__pcache.list_active)
210     {
211         aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
212         node = node->next;
213         if (aspace && aspace->mnt == mnt)
214         {
215             dfs_aspace_clean(aspace);
216             cb(aspace);
217         }
218     }
219 
220     node = __pcache.list_active.next;
221     while (node != &__pcache.list_inactive)
222     {
223         aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
224         node = node->next;
225         if (aspace && aspace->mnt == mnt)
226         {
227             dfs_aspace_clean(aspace);
228             cb(aspace);
229         }
230     }
231 
232     dfs_pcache_unlock();
233 }
234 
235 /**
236  * @brief Unmount and clean up page cache for a specific mount point
237  *
238  * This function cleans up all page cache entries associated with the given mount point
239  * by calling _pcache_clean() with dfs_aspace_release as the callback function.
240  * It will release all address spaces and their pages belonging to this mount point.
241  *
242  * @param[in] mnt Pointer to the mount point structure to be unmounted
243  *
244  * @note This function is typically called during filesystem unmount operation
245  * @see _pcache_clean()
246  */
dfs_pcache_unmount(struct dfs_mnt * mnt)247 void dfs_pcache_unmount(struct dfs_mnt *mnt)
248 {
249     _pcache_clean(mnt, dfs_aspace_release);
250 }
251 
_dummy_cb(struct dfs_aspace * mnt)252 static int _dummy_cb(struct dfs_aspace *mnt)
253 {
254     return 0;
255 }
256 
257 /**
258  * @brief Clean page cache for a specific mount point without releasing address spaces
259  *
260  * This function cleans up all page cache entries associated with the given mount point
261  * but keeps the address spaces intact by using a dummy callback function.
262  *
263  * @param[in] mnt Pointer to the mount point structure to be cleaned
264  *
265  * @note Typical usage scenarios:
266  * - Filesystem maintenance operations that require cache invalidation
267  * - Force refreshing cached data without unmounting
268  * - Handling external modifications to mounted filesystems
269  *
270  * @see _pcache_clean()
271  */
dfs_pcache_clean(struct dfs_mnt * mnt)272 void dfs_pcache_clean(struct dfs_mnt *mnt)
273 {
274     _pcache_clean(mnt, _dummy_cb);
275 }
276 
277 /**
278  * @brief Check and enforce page cache memory limit
279  *
280  * This function checks if the current page cache usage exceeds the working level threshold.
281  * If exceeded, it will trigger page cache release up to 4 times to reduce cache size.
282  *
283  * @return Always returns 0 indicating success
284  */
dfs_pcache_limit_check(void)285 static int dfs_pcache_limit_check(void)
286 {
287     int index = 4;
288 
289     while (index && rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
290     {
291         dfs_pcache_release(0);
292         index --;
293     }
294 
295     return 0;
296 }
297 
298 /**
299  * @brief Page cache management thread
300  *
301  * This is the main worker thread for page cache management. It handles:
302  * - Garbage collection (GC) requests to free up memory
303  * - Write-back (WB) requests to flush dirty pages to storage
304  *
305  * @param[in] parameter Thread parameter (unused)
306  *
307  * @note The thread runs in an infinite loop processing messages from the cache message queue:
308  * - For GC commands: calls dfs_pcache_limit_check() to free pages when cache is full
309  * - For WB commands: flushes dirty pages that have been dirty for at least 500ms
310  * - Processes up to 4 dirty pages per WB command to prevent thread starvation
311  */
dfs_pcache_thread(void * parameter)312 static void dfs_pcache_thread(void *parameter)
313 {
314     struct dfs_pcache_mq_obj work;
315 
316     while (1)
317     {
318         if (rt_mq_recv(__pcache.mqueue, &work, sizeof(work), RT_WAITING_FOREVER) == sizeof(work))
319         {
320             if (work.cmd == PCACHE_MQ_GC)
321             {
322                 dfs_pcache_limit_check();
323             }
324             else if (work.cmd == PCACHE_MQ_WB)
325             {
326                 int count = 0;
327                 rt_list_t *node;
328                 struct dfs_page *page = 0;
329 
330                 while (1)
331                 {
332                     /* try to get dirty page */
333                     dfs_pcache_lock();
334                     page = 0;
335                     rt_list_for_each(node, &__pcache.list_active)
336                     {
337                         if (node != &__pcache.list_inactive)
338                         {
339                             struct dfs_aspace *aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
340                             dfs_aspace_lock(aspace);
341                             if (aspace->list_dirty.next != &aspace->list_dirty)
342                             {
343                                 page = rt_list_entry(aspace->list_dirty.next, struct dfs_page, dirty_node);
344                                 dfs_page_ref(page);
345                                 dfs_aspace_unlock(aspace);
346                                 break;
347                             }
348                             else
349                             {
350                                 page = RT_NULL;
351                             }
352                             dfs_aspace_unlock(aspace);
353                         }
354                     }
355                     dfs_pcache_unlock();
356 
357                     if (page)
358                     {
359                         struct dfs_aspace *aspace = page->aspace;
360 
361                         dfs_aspace_lock(aspace);
362                         if (page->is_dirty == 1 && aspace->vnode)
363                         {
364                             if (rt_tick_get_millisecond() - page->tick_ms >= 500)
365                             {
366                                 if (aspace->vnode->size < page->fpos + page->size)
367                                 {
368                                     page->len = aspace->vnode->size - page->fpos;
369                                 }
370                                 else
371                                 {
372                                     page->len = page->size;
373                                 }
374                                 if (aspace->ops->write)
375                                 {
376                                     aspace->ops->write(page);
377                                 }
378 
379                                 page->is_dirty = 0;
380 
381                                 if (page->dirty_node.next != RT_NULL)
382                                 {
383                                     rt_list_remove(&page->dirty_node);
384                                     page->dirty_node.next = RT_NULL;
385                                 }
386                             }
387                         }
388                         dfs_page_release(page);
389                         dfs_aspace_unlock(aspace);
390                     }
391                     else
392                     {
393                         break;
394                     }
395 
396                     rt_thread_mdelay(5);
397 
398                     count ++;
399                     if (count >= 4)
400                     {
401                         break;
402                     }
403                 }
404             }
405         }
406     }
407 }
408 
409 /**
410  * @brief Initialize the page cache system
411  *
412  * This function initializes the global page cache structure including:
413  * - Hash table for address space lookup
414  * - Active and inactive page lists
415  * - Page count tracking
416  * - Mutex for thread safety
417  * - Message queue for cache operations
418  * - Worker thread for background tasks
419  *
420  * @return 0 on success, negative error code on failure
421  *
422  * @note This function is automatically called during system initialization
423  * via INIT_PREV_EXPORT macro. It sets up all necessary infrastructure
424  * for page cache management.
425  */
dfs_pcache_init(void)426 static int dfs_pcache_init(void)
427 {
428     rt_thread_t tid;
429 
430     for (int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
431     {
432         rt_list_init(&__pcache.head[i]);
433     }
434 
435     rt_list_init(&__pcache.list_active);
436     rt_list_init(&__pcache.list_inactive);
437     rt_list_insert_after(&__pcache.list_active, &__pcache.list_inactive);
438 
439     rt_atomic_store(&(__pcache.pages_count), 0);
440 
441     rt_mutex_init(&__pcache.lock, "pcache", RT_IPC_FLAG_PRIO);
442 
443     __pcache.mqueue = rt_mq_create("pcache", sizeof(struct dfs_pcache_mq_obj), 1024, RT_IPC_FLAG_FIFO);
444     tid = rt_thread_create("pcache", dfs_pcache_thread, 0, 8192, 25, 5);
445     if (tid)
446     {
447         rt_thread_startup(tid);
448     }
449 
450     __pcache.last_time_wb = rt_tick_get_millisecond();
451 
452     return 0;
453 }
454 INIT_PREV_EXPORT(dfs_pcache_init);
455 
456 /**
457  * @brief Send a command to page cache message queue
458  *
459  * This function sends a command to the page cache message queue for processing
460  * by the cache management thread. It waits for the message to be sent.
461  *
462  * @param[in] cmd The command to send (PCACHE_MQ_GC or PCACHE_MQ_WB)
463  *
464  * @return RT_EOK on success, error code on failure
465  *
466  * @note This is used to trigger garbage collection or write-back operations
467  * asynchronously through the cache management thread.
468  */
dfs_pcache_mq_work(rt_uint32_t cmd)469 static rt_ubase_t dfs_pcache_mq_work(rt_uint32_t cmd)
470 {
471     rt_err_t err;
472     struct dfs_pcache_mq_obj work = { 0 };
473 
474     work.cmd = cmd;
475 
476     err = rt_mq_send_wait(__pcache.mqueue, (const void *)&work, sizeof(struct dfs_pcache_mq_obj), 0);
477 
478     return err;
479 }
480 
481 /**
482  * @brief Lock the page cache global mutex
483  *
484  * @return Always returns 0.
485  */
dfs_pcache_lock(void)486 static int dfs_pcache_lock(void)
487 {
488     rt_mutex_take(&__pcache.lock, RT_WAITING_FOREVER);
489     return 0;
490 }
491 
492 /**
493  * @brief Unlock the page cache global mutex
494  *
495  * @return Always returns 0.
496  */
dfs_pcache_unlock(void)497 static int dfs_pcache_unlock(void)
498 {
499     rt_mutex_release(&__pcache.lock);
500     return 0;
501 }
502 
503 /**
504  * @brief Calculate hash value for address space lookup
505  *
506  * This function computes a hash value based on mount point and path string.
507  * It uses a simple string hashing algorithm combined with mount point pointer.
508  *
509  * @param[in] mnt  Pointer to the mount point structure
510  * @param[in] path Path string to be hashed (can be NULL)
511  *
512  * @return Computed hash value within range [0, RT_PAGECACHE_HASH_NR-1]
513  *
514  * @note The hash algorithm combines:
515  * - DJB2 hash algorithm for the path string
516  * - XOR with mount point pointer
517  * - Modulo operation to fit hash table size
518  */
dfs_aspace_hash(struct dfs_mnt * mnt,const char * path)519 static uint32_t dfs_aspace_hash(struct dfs_mnt *mnt, const char *path)
520 {
521     uint32_t val = 0;
522 
523     if (path)
524     {
525         while (*path)
526         {
527             val = ((val << 5) + val) + *path++;
528         }
529     }
530 
531     return (val ^ (unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
532 }
533 
534 /**
535  * @brief Look up an address space in the page cache hash table
536  *
537  * This function searches for an address space matching the given dentry and operations
538  * in the page cache hash table. If found, it increments the reference count of the
539  * address space before returning it.
540  *
541  * @param[in] dentry Directory entry containing mount point and path information
542  * @param[in] ops    Pointer to address space operations structure
543  *
544  * @return Pointer to the found address space on success, NULL if not found
545  */
dfs_aspace_hash_lookup(struct dfs_dentry * dentry,const struct dfs_aspace_ops * ops)546 static struct dfs_aspace *dfs_aspace_hash_lookup(struct dfs_dentry *dentry, const struct dfs_aspace_ops *ops)
547 {
548     struct dfs_aspace *aspace = RT_NULL;
549 
550     dfs_pcache_lock();
551     rt_list_for_each_entry(aspace, &__pcache.head[dfs_aspace_hash(dentry->mnt, dentry->pathname)], hash_node)
552     {
553 
554         if (aspace->mnt == dentry->mnt
555             && aspace->ops == ops
556             && !strcmp(aspace->pathname, dentry->pathname))
557         {
558             rt_atomic_add(&aspace->ref_count, 1);
559             dfs_pcache_unlock();
560             return aspace;
561         }
562     }
563     dfs_pcache_unlock();
564 
565     return RT_NULL;
566 }
567 
568 /**
569  * @brief Insert an address space into page cache
570  *
571  * This function inserts the given address space into both the hash table and
572  * inactive list of the page cache. It also increments the reference count of
573  * the address space.
574  *
575  * @param[in,out] aspace Pointer to the address space to be inserted
576  */
dfs_aspace_insert(struct dfs_aspace * aspace)577 static void dfs_aspace_insert(struct dfs_aspace *aspace)
578 {
579     uint32_t val = 0;
580 
581     val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
582 
583     dfs_pcache_lock();
584     rt_atomic_add(&aspace->ref_count, 1);
585     rt_list_insert_after(&__pcache.head[val], &aspace->hash_node);
586     rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
587     dfs_pcache_unlock();
588 }
589 
590 /**
591  * @brief Remove an address space from page cache
592  *
593  * This function removes the given address space from both the hash table and
594  * active/inactive lists of the page cache.
595  *
596  * @param[in,out] aspace Pointer to the address space to be removed
597  */
dfs_aspace_remove(struct dfs_aspace * aspace)598 static void dfs_aspace_remove(struct dfs_aspace *aspace)
599 {
600     dfs_pcache_lock();
601     if (aspace->hash_node.next != RT_NULL)
602     {
603         rt_list_remove(&aspace->hash_node);
604     }
605     if (aspace->cache_node.next != RT_NULL)
606     {
607         rt_list_remove(&aspace->cache_node);
608     }
609     dfs_pcache_unlock();
610 }
611 
612 /**
613  * @brief Move an address space to active list
614  *
615  * This function moves the specified address space from its current position
616  * to the active list in the page cache. The active list contains frequently
617  * accessed address spaces.
618  *
619  * @param[in,out] aspace Pointer to the address space to be activated
620  *
621  * @note Insert the address space before inactive list's head, means putting it
622  *       to the end of the active list.
623  *
624  * @see dfs_aspace_inactive() for the opposite operation
625  */
dfs_aspace_active(struct dfs_aspace * aspace)626 static void dfs_aspace_active(struct dfs_aspace *aspace)
627 {
628     dfs_pcache_lock();
629     if (aspace->cache_node.next != RT_NULL)
630     {
631         rt_list_remove(&aspace->cache_node);
632         rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
633     }
634     dfs_pcache_unlock();
635 }
636 
637 /**
638  * @brief Move an address space to inactive list
639  *
640  * This function moves the specified address space from its current position
641  * to the inactive list in the page cache. The inactive list contains less
642  * frequently accessed address spaces that are candidates for eviction.
643  *
644  * @param[in,out] aspace Pointer to the address space to be deactivated
645  */
dfs_aspace_inactive(struct dfs_aspace * aspace)646 static void dfs_aspace_inactive(struct dfs_aspace *aspace)
647 {
648     dfs_pcache_lock();
649     if (aspace->cache_node.next != RT_NULL)
650     {
651         rt_list_remove(&aspace->cache_node);
652         rt_list_insert_before(&__pcache.list_active, &aspace->cache_node);
653     }
654     dfs_pcache_unlock();
655 }
656 
657 /**
658  * @brief Internal function to create a new address space for page cache
659  *
660  * This function allocates and initializes a new address space structure for page caching.
661  * It sets up all necessary lists, locks, and initial values for the address space.
662  *
663  * @param[in] dentry Directory entry containing mount point and path information (can be NULL)
664  * @param[in] vnode  Pointer to the vnode structure this address space will be associated with
665  * @param[in] ops    Pointer to address space operations structure
666  *
667  * @return Pointer to the newly created address space on success, NULL on failure
668  *
669  * @note The created address space will be automatically inserted into the page cache
670  * @see dfs_aspace_create() for the public interface to create address spaces
671  */
_dfs_aspace_create(struct dfs_dentry * dentry,struct dfs_vnode * vnode,const struct dfs_aspace_ops * ops)672 static struct dfs_aspace *_dfs_aspace_create(struct dfs_dentry *dentry,
673                                              struct dfs_vnode *vnode,
674                                              const struct dfs_aspace_ops *ops)
675 {
676     struct dfs_aspace *aspace;
677 
678     aspace = rt_calloc(1, sizeof(struct dfs_aspace));
679     if (aspace)
680     {
681         rt_list_init(&aspace->list_active);
682         rt_list_init(&aspace->list_inactive);
683         rt_list_init(&aspace->list_dirty);
684         rt_list_insert_after(&aspace->list_active, &aspace->list_inactive);
685 
686         aspace->avl_root.root_node = 0;
687         aspace->avl_page = 0;
688 
689         rt_mutex_init(&aspace->lock, rt_thread_self()->parent.name, RT_IPC_FLAG_PRIO);
690         rt_atomic_store(&aspace->ref_count, 1);
691 
692         aspace->pages_count = 0;
693         aspace->vnode = vnode;
694         aspace->ops = ops;
695 
696         if (dentry && dentry->mnt)
697         {
698             aspace->mnt = dentry->mnt;
699             aspace->fullpath = rt_strdup(dentry->mnt->fullpath);
700             aspace->pathname = rt_strdup(dentry->pathname);
701         }
702 
703         dfs_aspace_insert(aspace);
704     }
705 
706     return aspace;
707 }
708 
709 /**
710  * @brief Create or lookup an address space for page caching
711  *
712  * This function either creates a new address space or looks up an existing one
713  * in the page cache hash table. If found, it updates the vnode reference and
714  * activates the address space.
715  *
716  * @param[in] dentry Directory entry containing mount point and path info (can be NULL)
717  * @param[in] vnode  Pointer to the vnode structure to associate with
718  * @param[in] ops    Pointer to address space operations structure
719  *
720  * @return Pointer to the found/created address space on success, NULL on failure
721  */
dfs_aspace_create(struct dfs_dentry * dentry,struct dfs_vnode * vnode,const struct dfs_aspace_ops * ops)722 struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry,
723                                      struct dfs_vnode *vnode,
724                                      const struct dfs_aspace_ops *ops)
725 {
726     struct dfs_aspace *aspace = RT_NULL;
727 
728     RT_ASSERT(vnode && ops);
729     dfs_pcache_lock();
730     if (dentry)
731     {
732         aspace = dfs_aspace_hash_lookup(dentry, ops);
733     }
734 
735     if (!aspace)
736     {
737         aspace = _dfs_aspace_create(dentry, vnode, ops);
738     }
739     else
740     {
741         aspace->vnode = vnode;
742         dfs_aspace_active(aspace);
743     }
744     dfs_pcache_unlock();
745     return aspace;
746 }
747 
748 /**
749  * @brief Destroy an address space and release its resources
750  *
751  * This function decrements the reference count of the address space and marks it as inactive.
752  * If the reference count reaches 1 and there are no pages left, it will be fully released.
753  *
754  * @param[in] aspace Pointer to the address space to be destroyed
755  *
756  * @return 0 on successful release, -EINVAL if aspace is NULL
757  */
dfs_aspace_destroy(struct dfs_aspace * aspace)758 int dfs_aspace_destroy(struct dfs_aspace *aspace)
759 {
760     int ret = -EINVAL;
761 
762     if (aspace)
763     {
764         dfs_pcache_lock();
765         dfs_aspace_lock(aspace);
766         rt_atomic_sub(&aspace->ref_count, 1);
767         RT_ASSERT(rt_atomic_load(&aspace->ref_count) > 0);
768         dfs_aspace_inactive(aspace);
769         aspace->vnode = RT_NULL;
770         if (dfs_aspace_release(aspace) != 0)
771         {
772             dfs_aspace_unlock(aspace);
773         }
774         dfs_pcache_unlock();
775     }
776 
777     return ret;
778 }
779 
780 /**
781  * @brief Release an address space when its reference count reaches 1
782  *
783  * This function checks if the address space can be safely released by verifying:
784  * - Reference count is 1 (only caller holds reference)
785  * - No pages remain in the address space
786  * If conditions are met, it removes the space from cache and frees all resources.
787  *
788  * @param[in] aspace Pointer to the address space to be released
789  *
790  * @return 0 on successful release, -1 if space cannot be released yet
791  */
dfs_aspace_release(struct dfs_aspace * aspace)792 static int dfs_aspace_release(struct dfs_aspace *aspace)
793 {
794     int ret = -1;
795 
796     if (aspace)
797     {
798         dfs_pcache_lock();
799         dfs_aspace_lock(aspace);
800 
801         if (rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
802         {
803             dfs_aspace_remove(aspace);
804             if (aspace->fullpath)
805             {
806                 rt_free(aspace->fullpath);
807             }
808             if (aspace->pathname)
809             {
810                 rt_free(aspace->pathname);
811             }
812             rt_mutex_detach(&aspace->lock);
813             rt_free(aspace);
814             ret = 0;
815         }
816         else
817         {
818             dfs_aspace_unlock(aspace);
819         }
820         dfs_pcache_unlock();
821     }
822 
823     return ret;
824 }
825 
826 /**
827  * @brief Dump address space page information for debugging
828  *
829  * This function prints detailed information about pages in the given address space.
830  * It can optionally filter to show only dirty pages or all pages.
831  *
832  * @param[in] aspace    Pointer to the address space to dump
833  * @param[in] is_dirty  Flag indicating whether to show only dirty pages (1) or all pages (0)
834  *
835  * @return Always returns 0
836  */
_dfs_aspace_dump(struct dfs_aspace * aspace,int is_dirty)837 static int _dfs_aspace_dump(struct dfs_aspace *aspace, int is_dirty)
838 {
839     if (aspace)
840     {
841         rt_list_t *next;
842         struct dfs_page *page;
843 
844         dfs_aspace_lock(aspace);
845         if (aspace->pages_count > 0)
846         {
847             rt_list_for_each(next, &aspace->list_inactive)
848             {
849                 if (next != &aspace->list_active)
850                 {
851                     page = rt_list_entry(next, struct dfs_page, space_node);
852                     if (is_dirty && page->is_dirty)
853                     {
854                         rt_kprintf("    pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
855                     }
856                     else if (is_dirty == 0)
857                     {
858                         rt_kprintf("    pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
859                     }
860                 }
861             }
862         }
863         else
864         {
865             rt_kprintf("    pages >> empty\n");
866         }
867         dfs_aspace_unlock(aspace);
868     }
869     return 0;
870 }
871 
872 /**
873  * @brief Dump page cache information for debugging purposes
874  *
875  * This function prints detailed information about the page cache, including:
876  * - Total page count and capacity
877  * - File paths and page counts for each address space
878  * - Optional detailed page information (with --dump or --dirty flags)
879  *
880  * @param[in] argc Number of command line arguments
881  * @param[in] argv Command line arguments array
882  *
883  * @return Always returns 0
884  *
885  * @see _dfs_aspace_dump() for the actual page dumping implementation
886  */
dfs_pcache_dump(int argc,char ** argv)887 static int dfs_pcache_dump(int argc, char **argv)
888 {
889     int dump = 0;
890     rt_list_t *node;
891     struct dfs_aspace *aspace;
892 
893     if (argc == 2)
894     {
895         if (strcmp(argv[1], "--dump") == 0)
896         {
897             dump = 1;
898         }
899         else if (strcmp(argv[1], "--dirty") == 0)
900         {
901             dump = 2;
902         }
903         else
904         {
905             rt_kprintf("dfs page cache dump\n");
906             rt_kprintf("usage: dfs_cache\n");
907             rt_kprintf("       dfs_cache --dump\n");
908             rt_kprintf("       dfs_cache --dirty\n");
909             return 0;
910         }
911     }
912 
913     dfs_pcache_lock();
914 
915     rt_kprintf("total pages count: %d / %d\n", rt_atomic_load(&(__pcache.pages_count)), RT_PAGECACHE_COUNT);
916 
917     rt_list_for_each(node, &__pcache.list_active)
918     {
919         if (node != &__pcache.list_inactive)
920         {
921             aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
922 
923             if (aspace->mnt)
924             {
925                 rt_kprintf("file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
926             }
927             else
928             {
929                 rt_kprintf("unknown type, pages: %d\n", aspace->pages_count);
930             }
931 
932             if (dump > 0)
933             {
934                 _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
935             }
936         }
937     }
938 
939     dfs_pcache_unlock();
940 
941     return 0;
942 }
943 MSH_CMD_EXPORT_ALIAS(dfs_pcache_dump, dfs_cache, dump dfs page cache);
944 
945 /**
946  * @brief Unmap all memory mappings for a page
947  *
948  * This function unmaps all virtual memory areas that have mapped this physical page.
949  * It also marks the page as dirty if it contains valid data that hasn't been written back.
950  *
951  * @param[in,out] page Pointer to the page structure to unmap
952  *
953  * @return Always returns 0
954  */
dfs_page_unmap(struct dfs_page * page)955 static int dfs_page_unmap(struct dfs_page *page)
956 {
957     rt_list_t *next;
958     struct dfs_mmap *map;
959 
960     next = page->mmap_head.next;
961 
962     if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
963     {
964         dfs_page_dirty(page);
965     }
966 
967     while (next != &page->mmap_head)
968     {
969         map = rt_list_entry(next, struct dfs_mmap, mmap_node);
970         next = next->next;
971 
972         if (map)
973         {
974             rt_varea_t varea;
975             void *vaddr;
976 
977             varea = rt_aspace_query(map->aspace, map->vaddr);
978             RT_ASSERT(varea);
979             vaddr = dfs_aspace_vaddr(varea, page->fpos);
980 
981             rt_varea_unmap_page(varea, vaddr);
982 
983             rt_free(map);
984         }
985     }
986 
987     rt_list_init(&page->mmap_head);
988 
989     return 0;
990 }
991 
992 /**
993  * @brief Create a new page structure for page cache
994  *
995  * This function allocates and initializes a new page structure for the page cache.
996  * It allocates physical memory for the page and initializes its metadata including:
997  * - Memory mapping list head
998  * - Reference count
999  * - Physical page allocation with affinity hint
1000  *
1001  * @param[in] pos File position used to determine page allocation affinity
1002  *
1003  * @return Pointer to the newly created page structure on success, NULL on failure
1004  */
dfs_page_create(off_t pos)1005 static struct dfs_page *dfs_page_create(off_t pos)
1006 {
1007     struct dfs_page *page = RT_NULL;
1008     int affid = RT_PAGE_PICK_AFFID(pos);
1009 
1010     page = rt_calloc(1, sizeof(struct dfs_page));
1011     if (page)
1012     {
1013         page->page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
1014         if (page->page)
1015         {
1016             /* memset(page->page, 0x00, ARCH_PAGE_SIZE); */
1017             rt_list_init(&page->mmap_head);
1018             rt_atomic_store(&(page->ref_count), 1);
1019         }
1020         else
1021         {
1022             LOG_E("page alloc failed!\n");
1023             rt_free(page);
1024             page = RT_NULL;
1025         }
1026     }
1027 
1028     return page;
1029 }
1030 
1031 /**
1032  * @brief Increment the reference count of a page
1033  *
1034  * This function atomically increases the reference count of the specified page.
1035  * It is used to track how many times the page is being referenced/used.
1036  *
1037  * @param[in,out] page Pointer to the page structure whose reference count will be incremented
1038  */
dfs_page_ref(struct dfs_page * page)1039 static void dfs_page_ref(struct dfs_page *page)
1040 {
1041     rt_atomic_add(&(page->ref_count), 1);
1042 }
1043 
1044 /**
1045  * @brief Release a page from page cache when reference count reaches zero
1046  *
1047  * This function decrements the reference count of a page and performs cleanup
1048  * when the count reaches zero. It handles:
1049  * - Unmapping all virtual mappings of the page
1050  * - Writing back dirty pages to storage
1051  * - Freeing physical memory and page structure
1052  *
1053  * @param[in,out] page Pointer to the page structure to be released
1054  */
dfs_page_release(struct dfs_page * page)1055 static void dfs_page_release(struct dfs_page *page)
1056 {
1057     struct dfs_aspace *aspace = page->aspace;
1058 
1059     dfs_aspace_lock(aspace);
1060 
1061     rt_atomic_sub(&(page->ref_count), 1);
1062 
1063     if (rt_atomic_load(&(page->ref_count)) == 0)
1064     {
1065         dfs_page_unmap(page);
1066 
1067         if (page->is_dirty == 1 && aspace->vnode)
1068         {
1069             if (aspace->vnode->size < page->fpos + page->size)
1070             {
1071                 page->len = aspace->vnode->size - page->fpos;
1072             }
1073             else
1074             {
1075                 page->len = page->size;
1076             }
1077             if (aspace->ops->write)
1078             {
1079                 aspace->ops->write(page);
1080             }
1081             page->is_dirty = 0;
1082         }
1083         RT_ASSERT(page->is_dirty == 0);
1084 
1085         rt_pages_free(page->page, 0);
1086         page->page = RT_NULL;
1087         rt_free(page);
1088     }
1089 
1090     dfs_aspace_unlock(aspace);
1091 }
1092 
1093 /**
1094  * @brief Compare file positions for page alignment
1095  *
1096  * This function compares two file positions to determine if they belong to the same page.
1097  * It aligns both positions to page boundaries before comparison.
1098  *
1099  * @param[in] fpos   File position to compare (byte offset)
1100  * @param[in] value  Reference file position to compare against (byte offset)
1101  *
1102  * @return 0 if positions are in the same page, negative if fpos is before value,
1103  *         positive if fpos is after value
1104  */
dfs_page_compare(off_t fpos,off_t value)1105 static int dfs_page_compare(off_t fpos, off_t value)
1106 {
1107     return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
1108 }
1109 
1110 /**
1111  * @brief Insert a page into the AVL tree of an address space
1112  *
1113  * This function inserts a page into the AVL tree of the specified address space.
1114  * The tree is ordered by the file position (fpos) of pages. If a page with the
1115  * same fpos already exists, the insertion fails.
1116  *
1117  * @param[in] aspace Pointer to the address space containing the AVL tree
1118  * @param[in,out] page Pointer to the page structure to be inserted
1119  *
1120  * @return 0 on successful insertion, -1 if a page with same fpos already exists
1121  *
1122  * @note The function:
1123  * - Maintains AVL tree balance after insertion
1124  * - Updates the aspace's avl_page pointer to the newly inserted page
1125  * - Uses file position (fpos) as the ordering key
1126  */
_dfs_page_insert(struct dfs_aspace * aspace,struct dfs_page * page)1127 static int _dfs_page_insert(struct dfs_aspace *aspace, struct dfs_page *page)
1128 {
1129     struct dfs_page *tmp;
1130     struct util_avl_struct *current = NULL;
1131     struct util_avl_struct **next = &(aspace->avl_root.root_node);
1132 
1133     /* Figure out where to put new node */
1134     while (*next)
1135     {
1136         current = *next;
1137         tmp = rt_container_of(current, struct dfs_page, avl_node);
1138 
1139         if (page->fpos < tmp->fpos)
1140             next = &(current->avl_left);
1141         else if (page->fpos > tmp->fpos)
1142             next = &(current->avl_right);
1143         else
1144             return -1;
1145     }
1146 
1147     /* Add new node and rebalance tree. */
1148     util_avl_link(&page->avl_node, current, next);
1149     util_avl_rebalance(current, &aspace->avl_root);
1150     aspace->avl_page = page;
1151 
1152     return 0;
1153 }
1154 
1155 /**
1156  * @brief Remove a page from the AVL tree of an address space
1157  *
1158  * This function removes a page from the AVL tree of the specified address space.
1159  * It also clears the cached AVL page pointer if it points to the page being removed.
1160  *
1161  * @param[in,out] aspace Pointer to the address space containing the AVL tree
1162  * @param[in,out] page   Pointer to the page structure to be removed
1163  */
_dfs_page_remove(struct dfs_aspace * aspace,struct dfs_page * page)1164 static void _dfs_page_remove(struct dfs_aspace *aspace, struct dfs_page *page)
1165 {
1166     if (aspace->avl_page && aspace->avl_page == page)
1167     {
1168         aspace->avl_page = 0;
1169     }
1170 
1171     util_avl_remove(&page->avl_node, &aspace->avl_root);
1172 }
1173 
1174 /**
1175  * @brief Lock an address space for thread-safe operations
1176  *
1177  * @param[in,out] aspace Pointer to the address space structure to be locked
1178  *
1179  * @return Always returns 0 indicating success
1180  *
1181  * @note The lock must be released using dfs_aspace_unlock()
1182  * @see dfs_aspace_unlock()
1183  */
dfs_aspace_lock(struct dfs_aspace * aspace)1184 static int dfs_aspace_lock(struct dfs_aspace *aspace)
1185 {
1186     rt_mutex_take(&aspace->lock, RT_WAITING_FOREVER);
1187     return 0;
1188 }
1189 
1190 /**
1191  * @brief Unlock an address space after thread-safe operations
1192  *
1193  * @param[in,out] aspace Pointer to the address space structure to be unlocked
1194  *
1195  * @return Always returns 0 indicating success
1196  *
1197  * @note Must be called after dfs_aspace_lock() to release the lock
1198  * @see dfs_aspace_lock()
1199  */
dfs_aspace_unlock(struct dfs_aspace * aspace)1200 static int dfs_aspace_unlock(struct dfs_aspace *aspace)
1201 {
1202     rt_mutex_release(&aspace->lock);
1203     return 0;
1204 }
1205 
1206 /**
1207  * @brief Insert a page into the address space's page cache
1208  *
1209  * This function inserts a page into the active list of the address space's page cache.
1210  * It maintains the page count and performs eviction if the cache exceeds its capacity.
1211  *
1212  * @param[in] page Pointer to the page structure to be inserted
1213  *
1214  * @return Always returns 0 indicating success
1215  */
dfs_page_insert(struct dfs_page * page)1216 static int dfs_page_insert(struct dfs_page *page)
1217 {
1218     struct dfs_aspace *aspace = page->aspace;
1219 
1220     dfs_aspace_lock(aspace);
1221 
1222     rt_list_insert_before(&aspace->list_inactive, &page->space_node);
1223     aspace->pages_count ++;
1224 
1225     if (_dfs_page_insert(aspace, page))
1226     {
1227         RT_ASSERT(0);
1228     }
1229 
1230     if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
1231     {
1232         rt_list_t *next = aspace->list_active.next;
1233 
1234         if (next != &aspace->list_inactive)
1235         {
1236             struct dfs_page *tmp = rt_list_entry(next, struct dfs_page, space_node);
1237             dfs_page_inactive(tmp);
1238         }
1239     }
1240 
1241     rt_atomic_add(&(__pcache.pages_count), 1);
1242 
1243     dfs_aspace_unlock(aspace);
1244 
1245     return 0;
1246 }
1247 
1248 /**
1249  * @brief Remove a page from the address space's page cache
1250  *
1251  * This function safely removes a page from both the space and dirty lists of the address space.
1252  * It decrements the reference count and releases the page if it's the last reference.
1253  *
1254  * @param[in] page Pointer to the page structure to be removed
1255  *
1256  * @return 0 if the page was successfully removed, -1 if the page is still referenced
1257  */
dfs_page_remove(struct dfs_page * page)1258 static int dfs_page_remove(struct dfs_page *page)
1259 {
1260     int ret = -1;
1261     struct dfs_aspace *aspace = page->aspace;
1262 
1263     dfs_aspace_lock(aspace);
1264 
1265     if (rt_atomic_load(&(page->ref_count)) == 1)
1266     {
1267         if (page->space_node.next != RT_NULL)
1268         {
1269             rt_list_remove(&page->space_node);
1270             page->space_node.next = RT_NULL;
1271             aspace->pages_count--;
1272             _dfs_page_remove(aspace, page);
1273         }
1274         if (page->dirty_node.next != RT_NULL)
1275         {
1276             rt_list_remove(&page->dirty_node);
1277             page->dirty_node.next = RT_NULL;
1278         }
1279 
1280         rt_atomic_sub(&(__pcache.pages_count), 1);
1281 
1282         dfs_page_release(page);
1283         ret = 0;
1284     }
1285 
1286     dfs_aspace_unlock(aspace);
1287 
1288     return ret;
1289 }
1290 
1291 /**
1292  * @brief Move a page to active list
1293  *
1294  * This function moves a page to the active list
1295  * within its associated address space.
1296  *
1297  * @param[in] page The page to be moved to active list
1298  * @return int Always returns 0 on success
1299  */
dfs_page_active(struct dfs_page * page)1300 static int dfs_page_active(struct dfs_page *page)
1301 {
1302     struct dfs_aspace *aspace = page->aspace;
1303 
1304     dfs_aspace_lock(aspace);
1305     if (page->space_node.next != RT_NULL)
1306     {
1307         rt_list_remove(&page->space_node);
1308         rt_list_insert_before(&aspace->list_inactive, &page->space_node);
1309     }
1310     dfs_aspace_unlock(aspace);
1311 
1312     return 0;
1313 }
1314 
1315 /**
1316  * @brief Move a page to inactive list
1317  *
1318  * This function moves a page to the inactive list
1319  * within its associated address space.
1320  *
1321  * @param[in] page The page to be moved to inactive list
1322  * @return int Always returns 0 on success
1323  */
dfs_page_inactive(struct dfs_page * page)1324 static int dfs_page_inactive(struct dfs_page *page)
1325 {
1326     struct dfs_aspace *aspace = page->aspace;
1327 
1328     dfs_aspace_lock(aspace);
1329     if (page->space_node.next != RT_NULL)
1330     {
1331         rt_list_remove(&page->space_node);
1332         rt_list_insert_before(&aspace->list_active, &page->space_node);
1333     }
1334     dfs_aspace_unlock(aspace);
1335 
1336     return 0;
1337 }
1338 
1339 /**
1340  * @brief Mark a page as dirty and manage dirty list
1341  *
1342  * This function marks a page as dirty and adds it to the dirty list if not already present.
1343  * It also triggers a write-back operation if more than 1 second has passed since last write-back.
1344  *
1345  * @param[in] page The page to be marked as dirty
1346  * @return int Always returns 0 on success
1347  */
dfs_page_dirty(struct dfs_page * page)1348 static int dfs_page_dirty(struct dfs_page *page)
1349 {
1350     struct dfs_aspace *aspace = page->aspace;
1351 
1352     dfs_aspace_lock(aspace);
1353 
1354     if (page->dirty_node.next == RT_NULL && page->space_node.next != RT_NULL)
1355     {
1356         rt_list_insert_before(&aspace->list_dirty, &page->dirty_node);
1357     }
1358 
1359     page->is_dirty = 1;
1360     page->tick_ms = rt_tick_get_millisecond();
1361 
1362     if (rt_tick_get_millisecond() - __pcache.last_time_wb >= 1000)
1363     {
1364         dfs_pcache_mq_work(PCACHE_MQ_WB);
1365         __pcache.last_time_wb = rt_tick_get_millisecond();
1366     }
1367 
1368     dfs_aspace_unlock(aspace);
1369 
1370     return 0;
1371 }
1372 
1373 /**
1374  * @brief Search for a page in the address space AVL tree
1375  *
1376  * This function searches for a page at the specified file position in the address space's AVL tree.
1377  * If found, it marks the page as active and increments its reference count.
1378  *
1379  * @param[in] aspace The address space to search in
1380  * @param[in] fpos The file position to search for
1381  * @return struct dfs_page* The found page, or RT_NULL if not found
1382  */
dfs_page_search(struct dfs_aspace * aspace,off_t fpos)1383 static struct dfs_page *dfs_page_search(struct dfs_aspace *aspace, off_t fpos)
1384 {
1385     int cmp;
1386     struct dfs_page *page;
1387     struct util_avl_struct *avl_node;
1388 
1389     dfs_aspace_lock(aspace);
1390 
1391     if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
1392     {
1393         page = aspace->avl_page;
1394         dfs_page_active(page);
1395         dfs_page_ref(page);
1396         dfs_aspace_unlock(aspace);
1397         return page;
1398     }
1399 
1400     avl_node = aspace->avl_root.root_node;
1401     while (avl_node)
1402     {
1403         page = rt_container_of(avl_node, struct dfs_page, avl_node);
1404         cmp = dfs_page_compare(fpos, page->fpos);
1405 
1406         if (cmp < 0)
1407         {
1408             avl_node = avl_node->avl_left;
1409         }
1410         else if (cmp > 0)
1411         {
1412             avl_node = avl_node->avl_right;
1413         }
1414         else
1415         {
1416             aspace->avl_page = page;
1417             dfs_page_active(page);
1418             dfs_page_ref(page);
1419             dfs_aspace_unlock(aspace);
1420             return page;
1421         }
1422     }
1423 
1424     dfs_aspace_unlock(aspace);
1425 
1426     return RT_NULL;
1427 }
1428 
1429 /**
1430  * @brief Load a page from file into address space cache
1431  *
1432  * This function creates a new page cache entry for the specified file position,
1433  * reads the content from the file into the page, and inserts it into the cache.
1434  * The page's reference count is incremented to prevent c eviction.
1435  *
1436  * @param[in] file Pointer to the file structure containing the vnode and aspace
1437  * @param[in] pos  File position to load (will be page-aligned)
1438  *
1439  * @return Pointer to the newly created and loaded page on success,
1440  *         NULL on failure or invalid parameters
1441  */
dfs_aspace_load_page(struct dfs_file * file,off_t pos)1442 static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
1443 {
1444     struct dfs_page *page = RT_NULL;
1445 
1446     if (file && file->vnode && file->vnode->aspace)
1447     {
1448         struct dfs_vnode *vnode = file->vnode;
1449         struct dfs_aspace *aspace = vnode->aspace;
1450 
1451         page = dfs_page_create(pos);
1452         if (page)
1453         {
1454             page->aspace = aspace;
1455             page->size = ARCH_PAGE_SIZE;
1456             page->fpos = RT_ALIGN_DOWN(pos, ARCH_PAGE_SIZE);
1457             aspace->ops->read(file, page);
1458             page->ref_count ++;
1459 
1460             dfs_page_insert(page);
1461         }
1462     }
1463 
1464     return page;
1465 }
1466 
1467 /**
1468  * @brief Look up a page in the cache and load it if not found
1469  *
1470  * This function searches for a page at the specified position in the file's address space.
1471  * If the page isn't found, it preloads multiple pages (RT_PAGECACHE_PRELOAD count) next to the requested position.
1472  * It also triggers garbage collection when the cache reaches certain thresholds.
1473  *
1474  * @param[in] file Pointer to the file structure containing the vnode and aspace
1475  * @param[in] pos  File position to look up (will be page-aligned)
1476  *
1477  * @return Pointer to the found or newly loaded page on success,
1478  *         NULL if the page couldn't be found or loaded
1479  */
dfs_page_lookup(struct dfs_file * file,off_t pos)1480 static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos)
1481 {
1482     struct dfs_page *page = RT_NULL;
1483     struct dfs_aspace *aspace = file->vnode->aspace;
1484 
1485     dfs_aspace_lock(aspace);
1486     page = dfs_page_search(aspace, pos);
1487     if (!page)
1488     {
1489         int count = RT_PAGECACHE_PRELOAD;
1490         struct dfs_page *tmp = RT_NULL;
1491         off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
1492 
1493         do
1494         {
1495             page = dfs_aspace_load_page(file, fpos);
1496             if (page)
1497             {
1498                 if (tmp == RT_NULL)
1499                 {
1500                     tmp = page;
1501                 }
1502                 else
1503                 {
1504                     dfs_page_release(page);
1505                 }
1506             }
1507             else
1508             {
1509                 break;
1510             }
1511 
1512             fpos += ARCH_PAGE_SIZE;
1513             page = dfs_page_search(aspace, fpos);
1514             if (page)
1515             {
1516                 dfs_page_release(page);
1517             }
1518             count --;
1519 
1520         } while (count && page == RT_NULL);
1521 
1522         page = tmp;
1523         if (page)
1524         {
1525             dfs_aspace_unlock(aspace);
1526 
1527             if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
1528             {
1529                 dfs_pcache_limit_check();
1530             }
1531             else if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
1532             {
1533                 dfs_pcache_mq_work(PCACHE_MQ_GC);
1534             }
1535 
1536             return page;
1537         }
1538     }
1539     dfs_aspace_unlock(aspace);
1540 
1541     return page;
1542 }
1543 
1544 /**
1545  * @brief Read data from file through address space page cache
1546  *
1547  * This function reads data from a file using its address space page cache. It handles
1548  * the lookup of pages containing the requested data, copies the data to the provided
1549  * buffer, and manages page references.
1550  *
1551  * @param[in] file  Pointer to the file structure containing vnode and aspace
1552  * @param[in] buf   Buffer to store the read data
1553  * @param[in] count Number of bytes to read
1554  * @param[in,out] pos Pointer to the file position (updated during reading)
1555  *
1556  * @return Number of bytes successfully read, or negative error code
1557  */
dfs_aspace_read(struct dfs_file * file,void * buf,size_t count,off_t * pos)1558 int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
1559 {
1560     int ret = -EINVAL;
1561 
1562     if (file && file->vnode && file->vnode->aspace)
1563     {
1564         if (!(file->vnode->aspace->ops->read))
1565             return ret;
1566         struct dfs_vnode *vnode = file->vnode;
1567         struct dfs_aspace *aspace = vnode->aspace;
1568 
1569         struct dfs_page *page;
1570         char *ptr = (char *)buf;
1571 
1572         ret = 0;
1573 
1574         while (count)
1575         {
1576             page = dfs_page_lookup(file, *pos);
1577             if (page)
1578             {
1579                 off_t len;
1580 
1581                 dfs_aspace_lock(aspace);
1582                 if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
1583                 {
1584                     len = aspace->vnode->size - *pos;
1585                 }
1586                 else
1587                 {
1588                     len = page->fpos + ARCH_PAGE_SIZE - *pos;
1589                 }
1590 
1591                 len = count > len ? len : count;
1592                 if (len > 0)
1593                 {
1594                     rt_memcpy(ptr, page->page + *pos - page->fpos, len);
1595                     ptr += len;
1596                     *pos += len;
1597                     count -= len;
1598                     ret += len;
1599                 }
1600                 else
1601                 {
1602                     dfs_page_release(page);
1603                     dfs_aspace_unlock(aspace);
1604                     break;
1605                 }
1606                 dfs_page_release(page);
1607                 dfs_aspace_unlock(aspace);
1608             }
1609             else
1610             {
1611                 break;
1612             }
1613         }
1614     }
1615 
1616     return ret;
1617 }
1618 
1619 /**
1620  * @brief Write data to file through address space page cache
1621  *
1622  * This function writes data to a file using its address space page cache. It handles
1623  * page lookup, data copying, dirty page marking, and synchronization operations.
1624  *
1625  * @param[in] file  Pointer to the file structure containing vnode and aspace
1626  * @param[in] buf   Buffer containing data to write
1627  * @param[in] count Number of bytes to write
1628  * @param[in,out] pos Pointer to the file position (updated during writing)
1629  *
1630  * @return Number of bytes successfully written, or negative error code
1631  */
dfs_aspace_write(struct dfs_file * file,const void * buf,size_t count,off_t * pos)1632 int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
1633 {
1634     int ret = -EINVAL;
1635 
1636     if (file && file->vnode && file->vnode->aspace)
1637     {
1638         struct dfs_vnode *vnode = file->vnode;
1639         struct dfs_aspace *aspace = vnode->aspace;
1640 
1641         struct dfs_page *page;
1642         char *ptr = (char *)buf;
1643 
1644         if (!(aspace->ops->write))
1645         {
1646             return ret;
1647         }
1648         else if (aspace->mnt && (aspace->mnt->flags & MNT_RDONLY))
1649         {
1650             return -EROFS;
1651         }
1652 
1653         ret = 0;
1654 
1655         while (count)
1656         {
1657             page = dfs_page_lookup(file, *pos);
1658             if (page)
1659             {
1660                 off_t len;
1661 
1662                 dfs_aspace_lock(aspace);
1663                 len = page->fpos + ARCH_PAGE_SIZE - *pos;
1664                 len = count > len ? len : count;
1665                 rt_memcpy(page->page + *pos - page->fpos, ptr, len);
1666                 ptr += len;
1667                 *pos += len;
1668                 count -= len;
1669                 ret += len;
1670 
1671                 if (*pos > aspace->vnode->size)
1672                 {
1673                     aspace->vnode->size = *pos;
1674                 }
1675 
1676                 if (file->flags & O_SYNC)
1677                 {
1678                     if (aspace->vnode->size < page->fpos + page->size)
1679                     {
1680                         page->len = aspace->vnode->size - page->fpos;
1681                     }
1682                     else
1683                     {
1684                         page->len = page->size;
1685                     }
1686 
1687                     aspace->ops->write(page);
1688                     page->is_dirty = 0;
1689                 }
1690                 else
1691                 {
1692                     dfs_page_dirty(page);
1693                 }
1694 
1695                 dfs_page_release(page);
1696                 dfs_aspace_unlock(aspace);
1697             }
1698             else
1699             {
1700                 break;
1701             }
1702         }
1703     }
1704 
1705     return ret;
1706 }
1707 
1708 /**
1709  * @brief Flush dirty pages in an address space to storage
1710  *
1711  * This function writes all dirty pages in the specified address space to storage,
1712  * ensuring data persistence. It handles page size adjustments and clears dirty flags
1713  * after successful writes.
1714  *
1715  * @param[in] aspace Pointer to the address space containing dirty pages
1716  *
1717  * @return Always returns 0 (success)
1718  */
dfs_aspace_flush(struct dfs_aspace * aspace)1719 int dfs_aspace_flush(struct dfs_aspace *aspace)
1720 {
1721     if (aspace)
1722     {
1723         rt_list_t *next;
1724         struct dfs_page *page;
1725 
1726         dfs_aspace_lock(aspace);
1727 
1728         if (aspace->pages_count > 0 && aspace->vnode)
1729         {
1730             rt_list_for_each(next, &aspace->list_dirty)
1731             {
1732                 page = rt_list_entry(next, struct dfs_page, dirty_node);
1733                 if (page->is_dirty == 1 && aspace->vnode)
1734                 {
1735                     if (aspace->vnode->size < page->fpos + page->size)
1736                     {
1737                         page->len = aspace->vnode->size - page->fpos;
1738                     }
1739                     else
1740                     {
1741                         page->len = page->size;
1742                     }
1743 
1744                     if (aspace->ops->write)
1745                     {
1746                         aspace->ops->write(page);
1747                     }
1748 
1749                     page->is_dirty = 0;
1750                 }
1751                 RT_ASSERT(page->is_dirty == 0);
1752             }
1753         }
1754 
1755         dfs_aspace_unlock(aspace);
1756     }
1757     return 0;
1758 }
1759 
1760 /**
1761  * @brief Clean all pages from an address space
1762  *
1763  * This function removes all active pages from the specified address space while
1764  * maintaining thread safety through proper locking. It skips inactive pages
1765  * during the cleanup process.
1766  *
1767  * @param[in] aspace Pointer to the address space structure to clean
1768  *
1769  * @return 0 on success, negative value on error
1770  */
dfs_aspace_clean(struct dfs_aspace * aspace)1771 int dfs_aspace_clean(struct dfs_aspace *aspace)
1772 {
1773     if (aspace)
1774     {
1775         dfs_aspace_lock(aspace);
1776 
1777         if (aspace->pages_count > 0)
1778         {
1779             rt_list_t *next = aspace->list_active.next;
1780             struct dfs_page *page;
1781 
1782             while (next && next != &aspace->list_active)
1783             {
1784                 if (next == &aspace->list_inactive)
1785                 {
1786                     next = next->next;
1787                     continue;
1788                 }
1789                 page = rt_list_entry(next, struct dfs_page, space_node);
1790                 next = next->next;
1791                 dfs_page_remove(page);
1792             }
1793         }
1794 
1795         dfs_aspace_unlock(aspace);
1796     }
1797 
1798     return 0;
1799 }
1800 
1801 /**
1802  * @brief Map a file page into virtual address space
1803  *
1804  * This function maps a file page into the specified virtual address space, handling
1805  * memory allocation, page lookup, and cache synchronization. It ensures proper
1806  * memory visibility across different CPU architectures with cache operations.
1807  *
1808  * @param[in] file    Pointer to the file structure
1809  * @param[in] varea   Pointer to the virtual address area structure
1810  * @param[in] vaddr   Virtual address to map the page to
1811  *
1812  * @return Pointer to the mapped page on success, NULL on failure
1813  *
1814  * @note This function handles cache synchronization for architectures with weak
1815  *       memory models or Harvard architectures to ensure data visibility. It also
1816  *       manages the mapping structure lifecycle through proper allocation/free.
1817  */
dfs_aspace_mmap(struct dfs_file * file,struct rt_varea * varea,void * vaddr)1818 void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
1819 {
1820     void *ret = RT_NULL;
1821     struct dfs_page *page;
1822     struct dfs_aspace *aspace = file->vnode->aspace;
1823     rt_aspace_t target_aspace = varea->aspace;
1824 
1825     page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
1826     if (page)
1827     {
1828         struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
1829         if (map)
1830         {
1831             void *pg_vaddr = page->page;
1832             void *pg_paddr = rt_kmem_v2p(pg_vaddr);
1833             int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
1834             if (err == RT_EOK)
1835             {
1836                 /**
1837                  * Note: While the page is mapped into user area, the data writing into the page
1838                  * is not guaranteed to be visible for machines with the *weak* memory model and
1839                  * those Harvard architecture (especially for those ARM64) cores for their
1840                  * out-of-order pipelines of data buffer. Besides if the instruction cache in the
1841                  * L1 memory system is a VIPT cache, there are chances to have the alias matching
1842                  * entry if we reuse the same page frame and map it into the same virtual address
1843                  * of the previous one.
1844                  *
1845                  * That's why we have to do synchronization and cleanup manually to ensure that
1846                  * fetching of the next instruction can see the coherent data with the data cache,
1847                  * TLB, MMU, main memory, and all the other observers in the computer system.
1848                  */
1849                 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
1850                 rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
1851 
1852                 ret = pg_vaddr;
1853                 map->aspace = target_aspace;
1854                 map->vaddr = vaddr;
1855                 dfs_aspace_lock(aspace);
1856                 rt_list_insert_after(&page->mmap_head, &map->mmap_node);
1857                 dfs_page_release(page);
1858                 dfs_aspace_unlock(aspace);
1859             }
1860             else
1861             {
1862                 dfs_page_release(page);
1863                 rt_free(map);
1864             }
1865         }
1866         else
1867         {
1868             dfs_page_release(page);
1869         }
1870     }
1871 
1872     return ret;
1873 }
1874 
1875 /**
1876  * @brief Unmap pages from virtual address space
1877  *
1878  * This function removes mappings of file pages within the specified virtual address range.
1879  * It handles cache synchronization and maintains page dirty status when unmapping.
1880  *
1881  * @param[in] file    Pointer to the file structure
1882  * @param[in] varea   Pointer to the virtual address area to unmap
1883  *
1884  * @return 0 on success
1885  *
1886  * @note This function handles both private and shared mappings, ensuring proper
1887  *       cache synchronization and page dirty status maintenance during unmapping.
1888  */
dfs_aspace_unmap(struct dfs_file * file,struct rt_varea * varea)1889 int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
1890 {
1891     struct dfs_vnode *vnode = file->vnode;
1892     struct dfs_aspace *aspace = vnode->aspace;
1893     void *unmap_start = varea->start;
1894     void *unmap_end = (char *)unmap_start + varea->size;
1895 
1896     if (aspace)
1897     {
1898         rt_list_t *next;
1899         struct dfs_page *page;
1900 
1901         dfs_aspace_lock(aspace);
1902         if (aspace->pages_count > 0)
1903         {
1904             rt_list_for_each(next, &aspace->list_active)
1905             {
1906                 if (next != &aspace->list_inactive)
1907                 {
1908                     page = rt_list_entry(next, struct dfs_page, space_node);
1909                     if (page)
1910                     {
1911                         rt_list_t *node, *tmp;
1912                         struct dfs_mmap *map;
1913                         rt_varea_t map_varea = RT_NULL;
1914 
1915                         node = page->mmap_head.next;
1916 
1917                         while (node != &page->mmap_head)
1918                         {
1919                             rt_aspace_t map_aspace;
1920                             map = rt_list_entry(node, struct dfs_mmap, mmap_node);
1921                             tmp = node;
1922                             node = node->next;
1923 
1924                             if (map && varea->aspace == map->aspace
1925                                 && map->vaddr >= unmap_start && map->vaddr < unmap_end)
1926                             {
1927                                 void *vaddr = map->vaddr;
1928                                 map_aspace = map->aspace;
1929 
1930                                 if (!map_varea || map_varea->aspace != map_aspace ||
1931                                     vaddr < map_varea->start ||
1932                                     vaddr >= map_varea->start + map_varea->size)
1933                                 {
1934                                     /* lock the tree so we don't access uncompleted data */
1935                                     map_varea = rt_aspace_query(map_aspace, vaddr);
1936                                 }
1937 
1938                                 rt_varea_unmap_page(map_varea, vaddr);
1939 
1940                                 if (!rt_varea_is_private_locked(varea) &&
1941                                     page->fpos < page->aspace->vnode->size)
1942                                 {
1943                                     dfs_page_dirty(page);
1944                                 }
1945                                 rt_list_remove(tmp);
1946                                 rt_free(map);
1947                                 break;
1948                             }
1949                         }
1950                     }
1951                 }
1952             }
1953         }
1954         dfs_aspace_unlock(aspace);
1955     }
1956 
1957     return 0;
1958 }
1959 
1960 /**
1961  * Unmap a page from virtual address space.
1962  *
1963  * @param[in] file    The file object containing the page
1964  * @param[in] varea   The virtual memory area
1965  * @param[in] vaddr   The virtual address to unmap
1966  *
1967  * @return Always returns 0 on success
1968  *
1969  * @note This function removes the mapping between a virtual address and a physical page.
1970  *          It handles cleanup of mmap structures and marks pages dirty if needed.
1971  */
dfs_aspace_page_unmap(struct dfs_file * file,struct rt_varea * varea,void * vaddr)1972 int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
1973 {
1974     struct dfs_page *page;
1975     struct dfs_aspace *aspace = file->vnode->aspace;
1976 
1977     if (aspace)
1978     {
1979         dfs_aspace_lock(aspace);
1980 
1981         page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
1982         if (page)
1983         {
1984             rt_list_t *node, *tmp;
1985             struct dfs_mmap *map;
1986             rt_varea_unmap_page(varea, vaddr);
1987 
1988             node = page->mmap_head.next;
1989 
1990             while (node != &page->mmap_head)
1991             {
1992                 map = rt_list_entry(node, struct dfs_mmap, mmap_node);
1993                 tmp = node;
1994                 node = node->next;
1995 
1996                 if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
1997                 {
1998                     if (!rt_varea_is_private_locked(varea))
1999                     {
2000                         dfs_page_dirty(page);
2001                     }
2002                     rt_list_remove(tmp);
2003                     rt_free(map);
2004                     break;
2005                 }
2006             }
2007 
2008             dfs_page_release(page);
2009         }
2010 
2011         dfs_aspace_unlock(aspace);
2012     }
2013 
2014     return 0;
2015 }
2016 
2017 /**
2018  * Mark a page as dirty in the address space.
2019  *
2020  * @param[in] file    The file object containing the page
2021  * @param[in] varea   The virtual memory area
2022  * @param[in] vaddr   The virtual address of the page
2023  *
2024  * @return Always returns 0 on success
2025  *
2026  * @note This function marks a specific page as dirty in the file's address space.
2027  *          The page is released after being marked dirty.
2028  */
dfs_aspace_page_dirty(struct dfs_file * file,struct rt_varea * varea,void * vaddr)2029 int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
2030 {
2031     struct dfs_page *page;
2032     struct dfs_aspace *aspace = file->vnode->aspace;
2033 
2034     if (aspace)
2035     {
2036         dfs_aspace_lock(aspace);
2037 
2038         page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
2039         if (page)
2040         {
2041             dfs_page_dirty(page);
2042             dfs_page_release(page);
2043         }
2044 
2045         dfs_aspace_unlock(aspace);
2046     }
2047 
2048     return 0;
2049 }
2050 
2051 /**
2052  * Calculate file position from virtual address.
2053  *
2054  * @param[in] varea   The virtual memory area
2055  * @param[in] vaddr   The virtual address to convert
2056  *
2057  * @return The calculated file position offset
2058  */
dfs_aspace_fpos(struct rt_varea * varea,void * vaddr)2059 off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr)
2060 {
2061     return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
2062 }
2063 
2064 /**
2065  * Get the virtual address corresponding to a file position in a virtual area.
2066  *
2067  * @param[in] varea The virtual area structure
2068  * @param[in] fpos  The file position to convert
2069  *
2070  * @return The virtual address corresponding to the file position
2071  */
dfs_aspace_vaddr(struct rt_varea * varea,off_t fpos)2072 void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos)
2073 {
2074     return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
2075 }
2076 
2077 /**
2078  * @brief Read data from memory-mapped file space
2079  *
2080  * This function handles read operations for memory-mapped file regions by
2081  * translating virtual addresses to file positions and performing the actual
2082  * read operation through dfs_aspace_read.
2083  *
2084  * @param[in] file   Pointer to the file structure being mapped
2085  * @param[in] varea  Pointer to the virtual memory area structure
2086  * @param[in] data   Pointer to the I/O message containing read details
2087  *                    (includes fault address and buffer address)
2088  *
2089  * @return Number of bytes successfully read (ARCH_PAGE_SIZE on success)
2090  *         0 if any parameter is invalid
2091  */
dfs_aspace_mmap_read(struct dfs_file * file,struct rt_varea * varea,void * data)2092 int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data)
2093 {
2094     int ret = 0;
2095 
2096     if (file && varea)
2097     {
2098         struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
2099         if (msg)
2100         {
2101             off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
2102             return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
2103         }
2104     }
2105 
2106     return ret;
2107 }
2108 
2109 /**
2110  * @brief Write data to memory-mapped file space
2111  *
2112  * This function handles write operations for memory-mapped file regions by
2113  * translating virtual addresses to file positions and performing the actual
2114  * write operation through dfs_aspace_write.
2115  *
2116  * @param[in] file   Pointer to the file structure being mapped
2117  * @param[in] varea  Pointer to the virtual memory area structure
2118  * @param[in] data   Pointer to the I/O message containing write details
2119  *                    (includes fault address and buffer address)
2120  *
2121  * @return Number of bytes successfully written (ARCH_PAGE_SIZE on success)
2122  *         0 if any parameter is invalid
2123  */
dfs_aspace_mmap_write(struct dfs_file * file,struct rt_varea * varea,void * data)2124 int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data)
2125 {
2126     int ret = 0;
2127 
2128     if (file && varea)
2129     {
2130         struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
2131         if (msg)
2132         {
2133             off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
2134             return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
2135         }
2136     }
2137 
2138     return ret;
2139 }
2140 
2141 #endif