1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #define DT_DRV_COMPAT nordic_nrf_hsfll_global
7 
8 #include "clock_control_nrf2_common.h"
9 #include <zephyr/devicetree.h>
10 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
11 #include <nrfs_gdfs.h>
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_DECLARE(clock_control_nrf2, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
15 
16 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES \
17 	DT_INST_PROP(0, supported_clock_frequencies)
18 
19 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(idx) \
20 	DT_INST_PROP_BY_IDX(0, supported_clock_frequencies, idx)
21 
22 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE \
23 	DT_INST_PROP_LEN(0, supported_clock_frequencies)
24 
25 #define GLOBAL_HSFLL_FREQ_REQ_TIMEOUT \
26 	K_MSEC(CONFIG_CLOCK_CONTROL_NRF_HSFLL_GLOBAL_TIMEOUT_MS)
27 
28 #define GLOBAL_HSFLL_INIT_LOW_REQ \
29 	CONFIG_CLOCK_CONTROL_NRF_HSFLL_GLOBAL_REQ_LOW_FREQ
30 
31 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE == 4);
32 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(0) == 64000000);
33 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(1) == 128000000);
34 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(2) == 256000000);
35 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(3) == 320000000);
36 BUILD_ASSERT(GDFS_FREQ_COUNT == 4);
37 BUILD_ASSERT(GDFS_FREQ_HIGH == 0);
38 BUILD_ASSERT(GDFS_FREQ_MEDHIGH == 1);
39 BUILD_ASSERT(GDFS_FREQ_MEDLOW == 2);
40 BUILD_ASSERT(GDFS_FREQ_LOW == 3);
41 
42 struct global_hsfll_dev_config {
43 	uint32_t clock_frequencies[GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE];
44 };
45 
46 struct global_hsfll_dev_data {
47 	STRUCT_CLOCK_CONFIG(global_hsfll, GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE) clk_cfg;
48 	const struct device *dev;
49 	struct k_work evt_work;
50 	nrfs_gdfs_evt_type_t evt;
51 	struct k_work_delayable timeout_dwork;
52 
53 #if GLOBAL_HSFLL_INIT_LOW_REQ
54 	struct k_sem evt_sem;
55 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
56 };
57 
global_hsfll_get_max_clock_frequency(const struct device * dev)58 static uint32_t global_hsfll_get_max_clock_frequency(const struct device *dev)
59 {
60 	const struct global_hsfll_dev_config *dev_config = dev->config;
61 
62 	return dev_config->clock_frequencies[ARRAY_SIZE(dev_config->clock_frequencies) - 1];
63 }
64 
global_hsfll_resolve_spec_to_idx(const struct device * dev,const struct nrf_clock_spec * req_spec)65 static int global_hsfll_resolve_spec_to_idx(const struct device *dev,
66 					    const struct nrf_clock_spec *req_spec)
67 {
68 	const struct global_hsfll_dev_config *dev_config = dev->config;
69 	uint32_t req_frequency;
70 
71 	if (req_spec->accuracy || req_spec->precision) {
72 		LOG_ERR("invalid specification of accuracy or precision");
73 		return -EINVAL;
74 	}
75 
76 	req_frequency = req_spec->frequency == NRF_CLOCK_CONTROL_FREQUENCY_MAX
77 		      ? global_hsfll_get_max_clock_frequency(dev)
78 		      : req_spec->frequency;
79 
80 	for (uint8_t i = 0; i < ARRAY_SIZE(dev_config->clock_frequencies); i++) {
81 		if (dev_config->clock_frequencies[i] < req_frequency) {
82 			continue;
83 		}
84 
85 		return i;
86 	}
87 
88 	LOG_ERR("invalid frequency");
89 	return -EINVAL;
90 }
91 
global_hsfll_get_spec_by_idx(const struct device * dev,uint8_t idx,struct nrf_clock_spec * spec)92 static void global_hsfll_get_spec_by_idx(const struct device *dev,
93 					 uint8_t idx,
94 					 struct nrf_clock_spec *spec)
95 {
96 	const struct global_hsfll_dev_config *dev_config = dev->config;
97 
98 	spec->frequency = dev_config->clock_frequencies[idx];
99 	spec->accuracy = 0;
100 	spec->precision = 0;
101 }
102 
global_hsfll_get_mgr_by_idx(const struct device * dev,uint8_t idx)103 static struct onoff_manager *global_hsfll_get_mgr_by_idx(const struct device *dev, uint8_t idx)
104 {
105 	struct global_hsfll_dev_data *dev_data = dev->data;
106 
107 	return &dev_data->clk_cfg.onoff[idx].mgr;
108 }
109 
global_hsfll_find_mgr_by_spec(const struct device * dev,const struct nrf_clock_spec * spec)110 static struct onoff_manager *global_hsfll_find_mgr_by_spec(const struct device *dev,
111 							   const struct nrf_clock_spec *spec)
112 {
113 	int idx;
114 
115 	if (!spec) {
116 		return global_hsfll_get_mgr_by_idx(dev, 0);
117 	}
118 
119 	idx = global_hsfll_resolve_spec_to_idx(dev, spec);
120 	return idx < 0 ? NULL : global_hsfll_get_mgr_by_idx(dev, idx);
121 }
122 
api_request_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec,struct onoff_client * cli)123 static int api_request_global_hsfll(const struct device *dev,
124 				    const struct nrf_clock_spec *spec,
125 				    struct onoff_client *cli)
126 {
127 	struct onoff_manager *mgr = global_hsfll_find_mgr_by_spec(dev, spec);
128 
129 	if (mgr) {
130 		return clock_config_request(mgr, cli);
131 	}
132 
133 	return -EINVAL;
134 }
135 
api_release_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec)136 static int api_release_global_hsfll(const struct device *dev,
137 				    const struct nrf_clock_spec *spec)
138 {
139 	struct onoff_manager *mgr = global_hsfll_find_mgr_by_spec(dev, spec);
140 
141 	if (mgr) {
142 		return onoff_release(mgr);
143 	}
144 
145 	return -EINVAL;
146 }
147 
api_cancel_or_release_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec,struct onoff_client * cli)148 static int api_cancel_or_release_global_hsfll(const struct device *dev,
149 					      const struct nrf_clock_spec *spec,
150 					      struct onoff_client *cli)
151 {
152 	struct onoff_manager *mgr = global_hsfll_find_mgr_by_spec(dev, spec);
153 
154 	if (mgr) {
155 		return onoff_cancel_or_release(mgr, cli);
156 	}
157 
158 	return -EINVAL;
159 }
160 
api_resolve_global_hsfll(const struct device * dev,const struct nrf_clock_spec * req_spec,struct nrf_clock_spec * res_spec)161 static int api_resolve_global_hsfll(const struct device *dev,
162 				    const struct nrf_clock_spec *req_spec,
163 				    struct nrf_clock_spec *res_spec)
164 {
165 	int idx;
166 
167 	idx = global_hsfll_resolve_spec_to_idx(dev, req_spec);
168 	if (idx < 0) {
169 		return -EINVAL;
170 	}
171 
172 	global_hsfll_get_spec_by_idx(dev, idx, res_spec);
173 	return 0;
174 }
175 
176 static DEVICE_API(nrf_clock_control, driver_api) = {
177 	.std_api = {
178 		.on = api_nosys_on_off,
179 		.off = api_nosys_on_off,
180 	},
181 	.request = api_request_global_hsfll,
182 	.release = api_release_global_hsfll,
183 	.cancel_or_release = api_cancel_or_release_global_hsfll,
184 	.resolve = api_resolve_global_hsfll,
185 };
186 
global_hsfll_freq_idx_to_nrfs_freq(const struct device * dev,uint8_t freq_idx)187 static enum gdfs_frequency_setting global_hsfll_freq_idx_to_nrfs_freq(const struct device *dev,
188 								      uint8_t freq_idx)
189 {
190 	const struct global_hsfll_dev_config *dev_config = dev->config;
191 
192 	return ARRAY_SIZE(dev_config->clock_frequencies) - 1 - freq_idx;
193 }
194 
global_hsfll_gdfs_freq_to_str(enum gdfs_frequency_setting freq)195 static const char *global_hsfll_gdfs_freq_to_str(enum gdfs_frequency_setting freq)
196 {
197 	switch (freq) {
198 	case GDFS_FREQ_HIGH:
199 		return "GDFS_FREQ_HIGH";
200 	case GDFS_FREQ_MEDHIGH:
201 		return "GDFS_FREQ_MEDHIGH";
202 	case GDFS_FREQ_MEDLOW:
203 		return "GDFS_FREQ_MEDLOW";
204 	case GDFS_FREQ_LOW:
205 		return "GDFS_FREQ_LOW";
206 	default:
207 		break;
208 	}
209 
210 	return "UNKNOWN";
211 }
212 
global_hsfll_work_handler(struct k_work * work)213 static void global_hsfll_work_handler(struct k_work *work)
214 {
215 	struct global_hsfll_dev_data *dev_data =
216 		CONTAINER_OF(work, struct global_hsfll_dev_data, clk_cfg.work);
217 	const struct device *dev = dev_data->dev;
218 	uint8_t freq_idx;
219 	enum gdfs_frequency_setting target_freq;
220 	nrfs_err_t err;
221 
222 	freq_idx = clock_config_update_begin(work);
223 	target_freq = global_hsfll_freq_idx_to_nrfs_freq(dev, freq_idx);
224 
225 	LOG_DBG("requesting %s", global_hsfll_gdfs_freq_to_str(target_freq));
226 	err = nrfs_gdfs_request_freq(target_freq, dev_data);
227 	if (err != NRFS_SUCCESS) {
228 		clock_config_update_end(&dev_data->clk_cfg, -EIO);
229 		return;
230 	}
231 
232 	k_work_schedule(&dev_data->timeout_dwork, GLOBAL_HSFLL_FREQ_REQ_TIMEOUT);
233 }
234 
global_hsfll_evt_handler(struct k_work * work)235 static void global_hsfll_evt_handler(struct k_work *work)
236 {
237 	struct global_hsfll_dev_data *dev_data =
238 		CONTAINER_OF(work, struct global_hsfll_dev_data, evt_work);
239 	int rc;
240 
241 	k_work_cancel_delayable(&dev_data->timeout_dwork);
242 	rc = dev_data->evt == NRFS_GDFS_EVT_FREQ_CONFIRMED ? 0 : -EIO;
243 	clock_config_update_end(&dev_data->clk_cfg, rc);
244 }
245 
246 #if GLOBAL_HSFLL_INIT_LOW_REQ
global_hfsll_nrfs_gdfs_init_evt_handler(nrfs_gdfs_evt_t const * p_evt,void * context)247 static void global_hfsll_nrfs_gdfs_init_evt_handler(nrfs_gdfs_evt_t const *p_evt, void *context)
248 {
249 	struct global_hsfll_dev_data *dev_data = context;
250 
251 	dev_data->evt = p_evt->type;
252 	k_sem_give(&dev_data->evt_sem);
253 }
254 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
255 
global_hfsll_nrfs_gdfs_evt_handler(nrfs_gdfs_evt_t const * p_evt,void * context)256 static void global_hfsll_nrfs_gdfs_evt_handler(nrfs_gdfs_evt_t const *p_evt, void *context)
257 {
258 	struct global_hsfll_dev_data *dev_data = context;
259 
260 	if (k_work_is_pending(&dev_data->evt_work)) {
261 		return;
262 	}
263 
264 	dev_data->evt = p_evt->type;
265 	k_work_submit(&dev_data->evt_work);
266 }
267 
global_hsfll_timeout_handler(struct k_work * work)268 static void global_hsfll_timeout_handler(struct k_work *work)
269 {
270 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
271 	struct global_hsfll_dev_data *dev_data =
272 		CONTAINER_OF(dwork, struct global_hsfll_dev_data, timeout_dwork);
273 
274 	clock_config_update_end(&dev_data->clk_cfg, -ETIMEDOUT);
275 }
276 
global_hfsll_init(const struct device * dev)277 static int global_hfsll_init(const struct device *dev)
278 {
279 	struct global_hsfll_dev_data *dev_data = dev->data;
280 	nrfs_err_t err;
281 	int rc;
282 
283 	k_work_init_delayable(&dev_data->timeout_dwork, global_hsfll_timeout_handler);
284 	k_work_init(&dev_data->evt_work, global_hsfll_evt_handler);
285 
286 #if GLOBAL_HSFLL_INIT_LOW_REQ
287 	k_sem_init(&dev_data->evt_sem, 0, 1);
288 
289 	err = nrfs_gdfs_init(global_hfsll_nrfs_gdfs_init_evt_handler);
290 	if (err != NRFS_SUCCESS) {
291 		return -EIO;
292 	}
293 
294 	LOG_DBG("initial request %s", global_hsfll_gdfs_freq_to_str(GDFS_FREQ_LOW));
295 	err = nrfs_gdfs_request_freq(GDFS_FREQ_LOW, dev_data);
296 	if (err != NRFS_SUCCESS) {
297 		return -EIO;
298 	}
299 
300 	rc = k_sem_take(&dev_data->evt_sem, GLOBAL_HSFLL_FREQ_REQ_TIMEOUT);
301 	if (rc) {
302 		return -EIO;
303 	}
304 
305 	if (dev_data->evt != NRFS_GDFS_EVT_FREQ_CONFIRMED) {
306 		return -EIO;
307 	}
308 
309 	nrfs_gdfs_uninit();
310 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
311 
312 	rc = clock_config_init(&dev_data->clk_cfg,
313 			       ARRAY_SIZE(dev_data->clk_cfg.onoff),
314 			       global_hsfll_work_handler);
315 	if (rc < 0) {
316 		return rc;
317 	}
318 
319 	err = nrfs_gdfs_init(global_hfsll_nrfs_gdfs_evt_handler);
320 	if (err != NRFS_SUCCESS) {
321 		return -EIO;
322 	}
323 
324 	return 0;
325 }
326 
327 static struct global_hsfll_dev_data driver_data = {
328 	.dev = DEVICE_DT_INST_GET(0),
329 };
330 
331 static const struct global_hsfll_dev_config driver_config = {
332 	GLOBAL_HSFLL_CLOCK_FREQUENCIES
333 };
334 
335 DEVICE_DT_INST_DEFINE(
336 	0,
337 	global_hfsll_init,
338 	NULL,
339 	&driver_data,
340 	&driver_config,
341 	POST_KERNEL,
342 	CONFIG_CLOCK_CONTROL_NRF_HSFLL_GLOBAL_INIT_PRIORITY,
343 	&driver_api
344 );
345