1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include "clock_control_nrf2_common.h"
7 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(clock_control_nrf2, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
11 
12 #define FLAG_UPDATE_IN_PROGRESS BIT(FLAGS_COMMON_BITS - 1)
13 #define FLAG_UPDATE_NEEDED      BIT(FLAGS_COMMON_BITS - 2)
14 
15 #define ONOFF_CNT_MAX (FLAGS_COMMON_BITS - 2)
16 
17 #define CONTAINER_OF_ITEM(ptr, idx, type, array) \
18 	(type *)((char *)ptr - \
19 		 (idx * sizeof(array[0])) - \
20 		 offsetof(type, array[0]))
21 
22 /*
23  * Definition of `struct clock_config_generic`.
24  * Used to access `clock_config_*` structures in a common way.
25  */
26 STRUCT_CLOCK_CONFIG(generic, ONOFF_CNT_MAX);
27 
28 /* Structure used for synchronous clock request. */
29 struct sync_req {
30 	struct onoff_client cli;
31 	struct k_sem sem;
32 	int res;
33 };
34 
update_config(struct clock_config_generic * cfg)35 static void update_config(struct clock_config_generic *cfg)
36 {
37 	atomic_val_t prev_flags = atomic_or(&cfg->flags, FLAG_UPDATE_NEEDED);
38 
39 	/* If the update work is already scheduled (FLAG_UPDATE_NEEDED was
40 	 * set before the above OR operation) or is currently being executed,
41 	 * it is not to be submitted again. In the latter case, it will be
42 	 * submitted by clock_config_update_end().
43 	 */
44 	if (prev_flags & (FLAG_UPDATE_NEEDED | FLAG_UPDATE_IN_PROGRESS)) {
45 		return;
46 	}
47 
48 	k_work_submit(&cfg->work);
49 }
50 
onoff_start_option(struct onoff_manager * mgr,onoff_notify_fn notify)51 static void onoff_start_option(struct onoff_manager *mgr,
52 			       onoff_notify_fn notify)
53 {
54 	struct clock_onoff *onoff =
55 		CONTAINER_OF(mgr, struct clock_onoff, mgr);
56 	struct clock_config_generic *cfg =
57 		CONTAINER_OF_ITEM(onoff, onoff->idx,
58 				  struct clock_config_generic, onoff);
59 
60 	onoff->notify = notify;
61 
62 	(void)atomic_or(&cfg->flags, BIT(onoff->idx));
63 	update_config(cfg);
64 }
65 
onoff_stop_option(struct onoff_manager * mgr,onoff_notify_fn notify)66 static void onoff_stop_option(struct onoff_manager *mgr,
67 			      onoff_notify_fn notify)
68 {
69 	struct clock_onoff *onoff =
70 		CONTAINER_OF(mgr, struct clock_onoff, mgr);
71 	struct clock_config_generic *cfg =
72 		CONTAINER_OF_ITEM(onoff, onoff->idx,
73 				  struct clock_config_generic, onoff);
74 
75 	(void)atomic_and(&cfg->flags, ~BIT(onoff->idx));
76 	update_config(cfg);
77 
78 	notify(mgr, 0);
79 }
80 
onoff_reset_option(struct onoff_manager * mgr,onoff_notify_fn notify)81 static void onoff_reset_option(struct onoff_manager *mgr,
82 			       onoff_notify_fn notify)
83 {
84 	notify(mgr, 0);
85 }
86 
get_index_of_highest_bit(uint32_t value)87 static inline uint8_t get_index_of_highest_bit(uint32_t value)
88 {
89 	return value ? (uint8_t)(31 - __builtin_clz(value)) : 0;
90 }
91 
clock_config_init(void * clk_cfg,uint8_t onoff_cnt,k_work_handler_t update_work_handler)92 int clock_config_init(void *clk_cfg, uint8_t onoff_cnt, k_work_handler_t update_work_handler)
93 {
94 	struct clock_config_generic *cfg = clk_cfg;
95 
96 	__ASSERT_NO_MSG(onoff_cnt <= ONOFF_CNT_MAX);
97 
98 	for (int i = 0; i < onoff_cnt; ++i) {
99 		static const struct onoff_transitions transitions = {
100 			.start = onoff_start_option,
101 			.stop  = onoff_stop_option,
102 			.reset = onoff_reset_option,
103 		};
104 		int rc;
105 
106 		rc = onoff_manager_init(&cfg->onoff[i].mgr, &transitions);
107 		if (rc < 0) {
108 			return rc;
109 		}
110 
111 		cfg->onoff[i].idx = (uint8_t)i;
112 	}
113 
114 	cfg->onoff_cnt = onoff_cnt;
115 
116 	k_work_init(&cfg->work, update_work_handler);
117 
118 	return 0;
119 }
120 
clock_config_request(struct onoff_manager * mgr,struct onoff_client * cli)121 int clock_config_request(struct onoff_manager *mgr, struct onoff_client *cli)
122 {
123 	/* If the on-off service recorded earlier an error, its state must be
124 	 * reset before a new request is made, otherwise the request would fail
125 	 * immediately.
126 	 */
127 	if (onoff_has_error(mgr)) {
128 		struct onoff_client reset_cli;
129 
130 		sys_notify_init_spinwait(&reset_cli.notify);
131 		onoff_reset(mgr, &reset_cli);
132 	}
133 
134 	return onoff_request(mgr, cli);
135 }
136 
clock_config_update_begin(struct k_work * work)137 uint8_t clock_config_update_begin(struct k_work *work)
138 {
139 	struct clock_config_generic *cfg =
140 		CONTAINER_OF(work, struct clock_config_generic, work);
141 	uint32_t active_options;
142 
143 	(void)atomic_or(&cfg->flags, FLAG_UPDATE_IN_PROGRESS);
144 	cfg->flags_snapshot = atomic_and(&cfg->flags, ~FLAG_UPDATE_NEEDED);
145 
146 	active_options = cfg->flags_snapshot & BIT_MASK(ONOFF_CNT_MAX);
147 	return get_index_of_highest_bit(active_options);
148 }
149 
clock_config_update_end(void * clk_cfg,int status)150 void clock_config_update_end(void *clk_cfg, int status)
151 {
152 	struct clock_config_generic *cfg = clk_cfg;
153 	atomic_val_t prev_flags;
154 
155 	prev_flags = atomic_and(&cfg->flags, ~FLAG_UPDATE_IN_PROGRESS);
156 	if (!(prev_flags & FLAG_UPDATE_IN_PROGRESS)) {
157 		return;
158 	}
159 
160 	for (int i = 0; i < cfg->onoff_cnt; ++i) {
161 		if (cfg->flags_snapshot & BIT(i)) {
162 			onoff_notify_fn notify = cfg->onoff[i].notify;
163 
164 			if (notify) {
165 				/* If an option was to be activated now
166 				 * (it is waiting for a notification) and
167 				 * the activation failed, this option's flag
168 				 * must be cleared (the option can no longer
169 				 * be considered active).
170 				 */
171 				if (status < 0) {
172 					(void)atomic_and(&cfg->flags, ~BIT(i));
173 				}
174 
175 				cfg->onoff[i].notify = NULL;
176 				notify(&cfg->onoff[i].mgr, status);
177 			}
178 		}
179 	}
180 
181 	if (prev_flags & FLAG_UPDATE_NEEDED) {
182 		k_work_submit(&cfg->work);
183 	}
184 }
185 
api_nosys_on_off(const struct device * dev,clock_control_subsys_t sys)186 int api_nosys_on_off(const struct device *dev, clock_control_subsys_t sys)
187 {
188 	ARG_UNUSED(dev);
189 	ARG_UNUSED(sys);
190 
191 	return -ENOSYS;
192 }
193 
sync_cb(struct onoff_manager * mgr,struct onoff_client * cli,uint32_t state,int res)194 static void sync_cb(struct onoff_manager *mgr, struct onoff_client *cli, uint32_t state, int res)
195 {
196 	struct sync_req *req = CONTAINER_OF(cli, struct sync_req, cli);
197 
198 	req->res = res;
199 	k_sem_give(&req->sem);
200 }
201 
nrf_clock_control_request_sync(const struct device * dev,const struct nrf_clock_spec * spec,k_timeout_t timeout)202 int nrf_clock_control_request_sync(const struct device *dev,
203 				   const struct nrf_clock_spec *spec,
204 				   k_timeout_t timeout)
205 {
206 	struct sync_req req = {
207 		.sem = Z_SEM_INITIALIZER(req.sem, 0, 1)
208 	};
209 	int err;
210 
211 	if (k_is_in_isr()) {
212 		return -EWOULDBLOCK;
213 	}
214 
215 	sys_notify_init_callback(&req.cli.notify, sync_cb);
216 
217 	err = nrf_clock_control_request(dev, spec, &req.cli);
218 	if (err < 0) {
219 		return err;
220 	}
221 
222 	err = k_sem_take(&req.sem, timeout);
223 	if (err < 0) {
224 		nrf_clock_control_cancel_or_release(dev, spec, &req.cli);
225 		return err;
226 	}
227 
228 	return req.res;
229 }
230