1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author         Notes
8  * 2025-04-22     ScuDays        Add VDSO functionality under the riscv64 architecture.
9  */
10 
11 #ifndef ASM_VDSO_SYS_H
12 #define ASM_VDSO_SYS_H
13 
14 #include <time.h>
15 #include <unistd.h>
16 #include <sys/types.h>
17 
18 #include <vdso_config.h>
19 #include <vdso_datapage.h>
20 
21 #define __always_unused __attribute__((__unused__))
22 #define __maybe_unused  __attribute__((__unused__))
23 
24 #define likely(x)   __builtin_expect(!!(x), 1)
25 #define unlikely(x) __builtin_expect(!!(x), 0)
26 
27 #define arch_counter_enforce_ordering \
28     __asm__ volatile("fence rw, rw" ::: "memory")
29 
__arch_get_hw_counter(void)30 static inline uint64_t __arch_get_hw_counter(void)
31 {
32     uint64_t res;
33     __asm__ volatile("rdtime %0" : "=r"(res));
34     arch_counter_enforce_ordering;
35     return res;
36 }
37 
38 static inline uint32_t
__iter_div_u64_rem(uint64_t dividend,uint32_t divisor,uint64_t * remainder)39 __iter_div_u64_rem(uint64_t dividend, uint32_t divisor, uint64_t *remainder)
40 {
41     uint32_t ret = 0;
42 
43     while (dividend >= divisor)
44     {
45         /* The following asm() prevents the compiler from
46         optimising this loop into a modulo operation.  */
47         __asm__("" : "+rm"(dividend));
48 
49         dividend -= divisor;
50         ret++;
51     }
52 
53     *remainder = dividend;
54 
55     return ret;
56 }
57 
58 #define __RT_STRINGIFY(x...) #x
59 #define RT_STRINGIFY(x...)   __RT_STRINGIFY(x)
60 #define rt_hw_barrier(cmd, ...) \
61     __asm__ volatile(RT_STRINGIFY(cmd) " " RT_STRINGIFY(__VA_ARGS__)::: "memory")
62 
63 #define rt_hw_isb() rt_hw_barrier(fence.i)
64 #define rt_hw_dmb() rt_hw_barrier(fence, rw, rw)
65 #define rt_hw_wmb() rt_hw_barrier(fence, w, w)
66 #define rt_hw_rmb() rt_hw_barrier(fence, r, r)
67 #define rt_hw_dsb() rt_hw_barrier(fence, rw, rw)
68 
69 #ifndef barrier
70 
71 #define barrier() __asm__ __volatile__("fence" : : : "memory")
72 #endif
73 
cpu_relax(void)74 static inline void cpu_relax(void)
75 {
76     __asm__ volatile("nop" ::: "memory");
77 }
78 
79 #define __READ_ONCE_SIZE                                          \
80     ({                                                            \
81         switch (size)                                             \
82         {                                                         \
83         case 1:                                                   \
84             *(__u8 *)res = *(volatile __u8 *)p;                   \
85             break;                                                \
86         case 2:                                                   \
87             *(__u16 *)res = *(volatile __u16 *)p;                 \
88             break;                                                \
89         case 4:                                                   \
90             *(__u32 *)res = *(volatile __u32 *)p;                 \
91             break;                                                \
92         case 8:                                                   \
93             *(__u64 *)res = *(volatile __u64 *)p;                 \
94             break;                                                \
95         default:                                                  \
96             barrier();                                            \
97             __builtin_memcpy((void *)res, (const void *)p, size); \
98             barrier();                                            \
99         }                                                         \
100     })
101 
__read_once_size(const volatile void * p,void * res,int size)102 static inline void __read_once_size(const volatile void *p, void *res, int size)
103 {
104     __READ_ONCE_SIZE;
105 }
106 
107 #define __READ_ONCE(x, check)                                                \
108     ({                                                                       \
109         union {                                                              \
110             typeof(x) __val;                                                 \
111             char      __c[1];                                                \
112         } __u;                                                               \
113         if (check)                                                           \
114             __read_once_size(&(x), __u.__c, sizeof(x));                      \
115         smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
116         __u.__val;                                                           \
117     })
118 #define READ_ONCE(x) __READ_ONCE(x, 1)
119 
120 extern struct vdso_data         _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
__arch_get_vdso_data(void)121 static inline struct vdso_data *__arch_get_vdso_data(void)
122 {
123     return _vdso_data;
124 }
125 
rt_vdso_read_begin(const struct vdso_data * vd)126 static inline uint32_t rt_vdso_read_begin(const struct vdso_data *vd)
127 {
128     uint32_t seq;
129 
130     while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
131         cpu_relax();
132 
133     rt_hw_rmb();
134     return seq;
135 }
136 
rt_vdso_read_retry(const struct vdso_data * vd,uint32_t start)137 static inline uint32_t rt_vdso_read_retry(const struct vdso_data *vd,
138                                           uint32_t                start)
139 {
140     uint32_t seq;
141 
142     rt_hw_rmb();
143     seq = READ_ONCE(vd->seq);
144     return seq != start;
145 }
146 
147 #endif
148