1 /*
2  * Copyright (c) 2018 Intel Corporation
3  * Copyright (c) 2018 Friedt Professional Engineering Services, Inc
4  * Copyright (c) 2025 Tenstorrent AI ULC
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <errno.h>
10 #include <stdbool.h>
11 #include <stddef.h>
12 #include <stdint.h>
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/sys/clock.h>
17 #include <zephyr/sys/timeutil.h>
18 #include <zephyr/toolchain.h>
19 
20 /*
21  * `k_uptime_get` returns a timestamp offset on an always increasing
22  * value from the system start.  To support the `SYS_CLOCK_REALTIME`
23  * clock, this `rt_clock_offset` records the time that the system was
24  * started.  This can either be set via 'sys_clock_settime', or could be
25  * set from a real time clock, if such hardware is present.
26  */
27 static struct timespec rt_clock_offset;
28 static struct k_spinlock rt_clock_offset_lock;
29 
is_valid_clock_id(int clock_id)30 static bool is_valid_clock_id(int clock_id)
31 {
32 	switch (clock_id) {
33 	case SYS_CLOCK_MONOTONIC:
34 	case SYS_CLOCK_REALTIME:
35 		return true;
36 	default:
37 		return false;
38 	}
39 }
40 
timespec_from_ticks(uint64_t ticks,struct timespec * ts)41 static void timespec_from_ticks(uint64_t ticks, struct timespec *ts)
42 {
43 	uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
44 	uint64_t nremainder = ticks % CONFIG_SYS_CLOCK_TICKS_PER_SEC;
45 
46 	*ts = (struct timespec){
47 		.tv_sec = (time_t)elapsed_secs,
48 		/* For ns 32 bit conversion can be used since its smaller than 1sec. */
49 		.tv_nsec = (int32_t)k_ticks_to_ns_floor32(nremainder),
50 	};
51 }
52 
sys_clock_from_clockid(int clock_id)53 int sys_clock_from_clockid(int clock_id)
54 {
55 	switch (clock_id) {
56 #if defined(CLOCK_REALTIME) || defined(_POSIX_C_SOURCE)
57 	case (int)CLOCK_REALTIME:
58 		return SYS_CLOCK_REALTIME;
59 #endif
60 #if defined(CLOCK_MONOTONIC) || defined(_POSIX_MONOTONIC_CLOCK)
61 	case (int)CLOCK_MONOTONIC:
62 		return SYS_CLOCK_MONOTONIC;
63 #endif
64 	default:
65 		return -EINVAL;
66 	}
67 }
68 
sys_clock_gettime(int clock_id,struct timespec * ts)69 int sys_clock_gettime(int clock_id, struct timespec *ts)
70 {
71 	if (!is_valid_clock_id(clock_id)) {
72 		return -EINVAL;
73 	}
74 
75 	switch (clock_id) {
76 	case SYS_CLOCK_REALTIME: {
77 		struct timespec offset;
78 
79 		timespec_from_ticks(k_uptime_ticks(), ts);
80 		sys_clock_getrtoffset(&offset);
81 		if (unlikely(!timespec_add(ts, &offset))) {
82 			/* Saturate rather than reporting an overflow in 292 billion years */
83 			*ts = (struct timespec){
84 				.tv_sec = (time_t)INT64_MAX,
85 				.tv_nsec = NSEC_PER_SEC - 1,
86 			};
87 		}
88 	} break;
89 
90 	case SYS_CLOCK_MONOTONIC:
91 		timespec_from_ticks(k_uptime_ticks(), ts);
92 		break;
93 
94 	default:
95 		CODE_UNREACHABLE;
96 		return -EINVAL; /* Should never reach here */
97 	}
98 
99 	__ASSERT_NO_MSG(timespec_is_valid(ts));
100 
101 	return 0;
102 }
103 
z_impl_sys_clock_getrtoffset(struct timespec * tp)104 void z_impl_sys_clock_getrtoffset(struct timespec *tp)
105 {
106 	__ASSERT_NO_MSG(tp != NULL);
107 
108 	K_SPINLOCK(&rt_clock_offset_lock) {
109 		*tp = rt_clock_offset;
110 	}
111 
112 	__ASSERT_NO_MSG(timespec_is_valid(tp));
113 }
114 
115 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_getrtoffset(struct timespec * tp)116 void z_vrfy_sys_clock_getrtoffset(struct timespec *tp)
117 {
118 	K_OOPS(K_SYSCALL_MEMORY_WRITE(tp, sizeof(*tp)));
119 	return z_impl_sys_clock_getrtoffset(tp);
120 }
121 #include <zephyr/syscalls/sys_clock_getrtoffset_mrsh.c>
122 #endif /* CONFIG_USERSPACE */
123 
z_impl_sys_clock_settime(int clock_id,const struct timespec * tp)124 int z_impl_sys_clock_settime(int clock_id, const struct timespec *tp)
125 {
126 	struct timespec offset;
127 
128 	if (clock_id != SYS_CLOCK_REALTIME) {
129 		return -EINVAL;
130 	}
131 
132 	if (!timespec_is_valid(tp)) {
133 		return -EINVAL;
134 	}
135 
136 	timespec_from_ticks(k_uptime_ticks(), &offset);
137 	(void)timespec_negate(&offset);
138 	(void)timespec_add(&offset, tp);
139 
140 	K_SPINLOCK(&rt_clock_offset_lock) {
141 		rt_clock_offset = offset;
142 	}
143 
144 	return 0;
145 }
146 
147 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_settime(int clock_id,const struct timespec * ts)148 int z_vrfy_sys_clock_settime(int clock_id, const struct timespec *ts)
149 {
150 	K_OOPS(K_SYSCALL_MEMORY_READ(ts, sizeof(*ts)));
151 	return z_impl_sys_clock_settime(clock_id, ts);
152 }
153 #include <zephyr/syscalls/sys_clock_settime_mrsh.c>
154 #endif /* CONFIG_USERSPACE */
155 
z_impl_sys_clock_nanosleep(int clock_id,int flags,const struct timespec * rqtp,struct timespec * rmtp)156 int z_impl_sys_clock_nanosleep(int clock_id, int flags, const struct timespec *rqtp,
157 			       struct timespec *rmtp)
158 {
159 	k_timepoint_t end;
160 	k_timeout_t timeout;
161 	struct timespec duration;
162 	const bool update_rmtp = rmtp != NULL;
163 	const bool abstime = (flags & SYS_TIMER_ABSTIME) != 0;
164 
165 	if (!is_valid_clock_id(clock_id)) {
166 		return -EINVAL;
167 	}
168 
169 	if ((rqtp->tv_sec < 0) || !timespec_is_valid(rqtp)) {
170 		return -EINVAL;
171 	}
172 
173 	if (abstime) {
174 		/* convert absolute time to relative time duration */
175 		(void)sys_clock_gettime(clock_id, &duration);
176 		(void)timespec_negate(&duration);
177 		(void)timespec_add(&duration, rqtp);
178 	} else {
179 		duration = *rqtp;
180 	}
181 
182 	/* sleep for relative time duration */
183 	if ((sizeof(rqtp->tv_sec) == sizeof(int64_t)) &&
184 	    unlikely(rqtp->tv_sec >= (time_t)(UINT64_MAX / NSEC_PER_SEC))) {
185 		uint64_t ns = (uint64_t)k_sleep(K_SECONDS(duration.tv_sec - 1)) * NSEC_PER_MSEC;
186 		struct timespec rem = {
187 			.tv_sec = (time_t)(ns / NSEC_PER_SEC),
188 			.tv_nsec = ns % NSEC_PER_MSEC,
189 		};
190 
191 		duration.tv_sec = 1;
192 		(void)timespec_add(&duration, &rem);
193 	}
194 
195 	timeout = timespec_to_timeout(&duration);
196 	end = sys_timepoint_calc(timeout);
197 	do {
198 		(void)k_sleep(timeout);
199 		timeout = sys_timepoint_timeout(end);
200 	} while (!K_TIMEOUT_EQ(timeout, K_NO_WAIT));
201 
202 	if (update_rmtp) {
203 		*rmtp = (struct timespec){
204 			.tv_sec = 0,
205 			.tv_nsec = 0,
206 		};
207 	}
208 
209 	return 0;
210 }
211 
212 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_nanosleep(int clock_id,int flags,const struct timespec * rqtp,struct timespec * rmtp)213 int z_vrfy_sys_clock_nanosleep(int clock_id, int flags, const struct timespec *rqtp,
214 			       struct timespec *rmtp)
215 {
216 	K_OOPS(K_SYSCALL_MEMORY_READ(rqtp, sizeof(*rqtp)));
217 	if (rmtp != NULL) {
218 		K_OOPS(K_SYSCALL_MEMORY_WRITE(rmtp, sizeof(*rmtp)));
219 	}
220 	return z_impl_sys_clock_nanosleep(clock_id, flags, rqtp, rmtp);
221 }
222 #include <zephyr/syscalls/sys_clock_nanosleep_mrsh.c>
223 #endif /* CONFIG_USERSPACE */
224 
225 #ifdef CONFIG_ZTEST
226 #include <zephyr/ztest.h>
reset_clock_offset(void)227 static void reset_clock_offset(void)
228 {
229 	K_SPINLOCK(&rt_clock_offset_lock) {
230 		rt_clock_offset = (struct timespec){0};
231 	}
232 }
233 
clock_offset_reset_rule_after(const struct ztest_unit_test * test,void * data)234 static void clock_offset_reset_rule_after(const struct ztest_unit_test *test, void *data)
235 {
236 	ARG_UNUSED(test);
237 	ARG_UNUSED(data);
238 
239 	reset_clock_offset();
240 }
241 
242 ZTEST_RULE(clock_offset_reset_rule, NULL, clock_offset_reset_rule_after);
243 #endif /* CONFIG_ZTEST */
244