1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author         Notes
8  * 2025-04-22     ScuDays        Add VDSO functionality under the riscv64 architecture.
9  * 2025-05-10     Bernard        Move __arch_get_hw_frq() to vdso_sys.c as a weak function.
10  */
11 
12 #include <stdio.h>
13 #include <time.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <vdso_sys.h>
18 
19 #ifndef rt_vdso_cycles_ready
rt_vdso_cycles_ready(uint64_t cycles)20 static inline bool rt_vdso_cycles_ready(uint64_t cycles)
21 {
22     return true;
23 }
24 #endif
25 
26 #ifndef rt_vdso_get_ns
27 /* Implement as a weak function because there is no CPU cycle for RISCV */
__arch_get_hw_frq()28 __attribute__((weak)) uint64_t __arch_get_hw_frq()
29 {
30     return 10000000;
31 }
32 
rt_vdso_get_ns(uint64_t cycles,uint64_t last)33 static inline uint64_t rt_vdso_get_ns(uint64_t cycles, uint64_t last)
34 {
35     return (cycles - last) * NSEC_PER_SEC / __arch_get_hw_frq();
36 }
37 #endif
38 
39 static int
__rt_vdso_getcoarse(struct timespec * ts,clockid_t clock,const struct vdso_data * vdns)40 __rt_vdso_getcoarse(struct timespec *ts, clockid_t clock, const struct vdso_data *vdns)
41 {
42     const struct vdso_data *vd;
43     const struct timespec  *vdso_ts;
44     uint32_t                seq;
45     uint64_t                sec, last, ns, cycles;
46 
47     if (clock != CLOCK_MONOTONIC_RAW)
48         vd = &vdns[CS_HRES_COARSE];
49     else
50         vd = &vdns[CS_RAW];
51 
52     vdso_ts = &vd->basetime[clock];
53 
54     do {
55         seq    = rt_vdso_read_begin(vd);
56         cycles = __arch_get_hw_counter();
57         if (unlikely(!rt_vdso_cycles_ready(cycles)))
58             return -1;
59         ns    = vdso_ts->tv_nsec;
60         last  = vd->cycle_last;
61         ns   += rt_vdso_get_ns(cycles, last);
62         sec   = vdso_ts->tv_sec;
63     } while (unlikely(rt_vdso_read_retry(vd, seq)));
64 
65     ts->tv_sec  = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
66     ts->tv_nsec = ns;
67 
68     return 0;
69 }
70 
71 static inline int
__vdso_clock_gettime_common(const struct vdso_data * vd,clockid_t clock,struct timespec * ts)72 __vdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
73                             struct timespec *ts)
74 {
75     u_int32_t msk;
76 
77     if (unlikely((u_int32_t)clock >= MAX_CLOCKS))
78         return -1;
79 
80     msk = 1U << clock;
81     if (likely(msk & VDSO_REALTIME))
82         return __rt_vdso_getcoarse(ts, CLOCK_REALTIME, vd);
83     else if (msk & VDSO_MONOTIME)
84         return __rt_vdso_getcoarse(ts, CLOCK_MONOTONIC, vd);
85     else
86         return ENOENT;
87 }
88 
89 static __maybe_unused int
rt_vdso_clock_gettime_data(const struct vdso_data * vd,clockid_t clock,struct timespec * ts)90 rt_vdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
91                            struct timespec *ts)
92 {
93     int ret = 0;
94     ret     = __vdso_clock_gettime_common(vd, clock, ts);
95     return ret;
96 }
97 
__vdso_clock_gettime(clockid_t clock,struct timespec * ts)98 int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
99 {
100     return rt_vdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
101 }
102