1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
3 
4 #include "tsnep.h"
5 
6 #include <net/pkt_sched.h>
7 
8 /* save one operation at the end for additional operation at list change */
9 #define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
10 
tsnep_validate_gcl(struct tc_taprio_qopt_offload * qopt)11 static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
12 {
13 	int i;
14 	u64 cycle_time;
15 
16 	if (!qopt->cycle_time)
17 		return -ERANGE;
18 	if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
19 		return -EINVAL;
20 	cycle_time = 0;
21 	for (i = 0; i < qopt->num_entries; i++) {
22 		if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
23 			return -EINVAL;
24 		if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
25 			return -EINVAL;
26 		if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
27 			return -EINVAL;
28 		cycle_time += qopt->entries[i].interval;
29 	}
30 	if (qopt->cycle_time != cycle_time)
31 		return -EINVAL;
32 	if (qopt->cycle_time_extension >= qopt->cycle_time)
33 		return -EINVAL;
34 
35 	return 0;
36 }
37 
tsnep_write_gcl_operation(struct tsnep_gcl * gcl,int index,u32 properties,u32 interval,bool flush)38 static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
39 				      u32 properties, u32 interval, bool flush)
40 {
41 	void __iomem *addr = gcl->addr +
42 			     sizeof(struct tsnep_gcl_operation) * index;
43 
44 	gcl->operation[index].properties = properties;
45 	gcl->operation[index].interval = interval;
46 
47 	iowrite32(properties, addr);
48 	iowrite32(interval, addr + sizeof(u32));
49 
50 	if (flush) {
51 		/* flush write with read access */
52 		ioread32(addr);
53 	}
54 }
55 
tsnep_change_duration(struct tsnep_gcl * gcl,int index)56 static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
57 {
58 	u64 duration;
59 	int count;
60 
61 	/* change needs to be triggered one or two operations before start of
62 	 * new gate control list
63 	 * - change is triggered at start of operation (minimum one operation)
64 	 * - operation with adjusted interval is inserted on demand to exactly
65 	 *   meet the start of the new gate control list (optional)
66 	 *
67 	 * additionally properties are read directly after start of previous
68 	 * operation
69 	 *
70 	 * therefore, three operations needs to be considered for the limit
71 	 */
72 	duration = 0;
73 	count = 3;
74 	while (count) {
75 		duration += gcl->operation[index].interval;
76 
77 		index--;
78 		if (index < 0)
79 			index = gcl->count - 1;
80 
81 		count--;
82 	}
83 
84 	return duration;
85 }
86 
tsnep_write_gcl(struct tsnep_gcl * gcl,struct tc_taprio_qopt_offload * qopt)87 static void tsnep_write_gcl(struct tsnep_gcl *gcl,
88 			    struct tc_taprio_qopt_offload *qopt)
89 {
90 	int i;
91 	u32 properties;
92 	u64 extend;
93 	u64 cut;
94 
95 	gcl->base_time = ktime_to_ns(qopt->base_time);
96 	gcl->cycle_time = qopt->cycle_time;
97 	gcl->cycle_time_extension = qopt->cycle_time_extension;
98 
99 	for (i = 0; i < qopt->num_entries; i++) {
100 		properties = qopt->entries[i].gate_mask;
101 		if (i == (qopt->num_entries - 1))
102 			properties |= TSNEP_GCL_LAST;
103 
104 		tsnep_write_gcl_operation(gcl, i, properties,
105 					  qopt->entries[i].interval, true);
106 	}
107 	gcl->count = qopt->num_entries;
108 
109 	/* calculate change limit; i.e., the time needed between enable and
110 	 * start of new gate control list
111 	 */
112 
113 	/* case 1: extend cycle time for change
114 	 * - change duration of last operation
115 	 * - cycle time extension
116 	 */
117 	extend = tsnep_change_duration(gcl, gcl->count - 1);
118 	extend += gcl->cycle_time_extension;
119 
120 	/* case 2: cut cycle time for change
121 	 * - maximum change duration
122 	 */
123 	cut = 0;
124 	for (i = 0; i < gcl->count; i++)
125 		cut = max(cut, tsnep_change_duration(gcl, i));
126 
127 	/* use maximum, because the actual case (extend or cut) can be
128 	 * determined only after limit is known (chicken-and-egg problem)
129 	 */
130 	gcl->change_limit = max(extend, cut);
131 }
132 
tsnep_gcl_start_after(struct tsnep_gcl * gcl,u64 limit)133 static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
134 {
135 	u64 start = gcl->base_time;
136 	u64 n;
137 
138 	if (start <= limit) {
139 		n = div64_u64(limit - start, gcl->cycle_time);
140 		start += (n + 1) * gcl->cycle_time;
141 	}
142 
143 	return start;
144 }
145 
tsnep_gcl_start_before(struct tsnep_gcl * gcl,u64 limit)146 static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
147 {
148 	u64 start = gcl->base_time;
149 	u64 n;
150 
151 	n = div64_u64(limit - start, gcl->cycle_time);
152 	start += n * gcl->cycle_time;
153 	if (start == limit)
154 		start -= gcl->cycle_time;
155 
156 	return start;
157 }
158 
tsnep_set_gcl_change(struct tsnep_gcl * gcl,int index,u64 change,bool insert)159 static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
160 				bool insert)
161 {
162 	/* previous operation triggers change and properties are evaluated at
163 	 * start of operation
164 	 */
165 	if (index == 0)
166 		index = gcl->count - 1;
167 	else
168 		index = index - 1;
169 	change -= gcl->operation[index].interval;
170 
171 	/* optionally change to new list with additional operation in between */
172 	if (insert) {
173 		void __iomem *addr = gcl->addr +
174 				     sizeof(struct tsnep_gcl_operation) * index;
175 
176 		gcl->operation[index].properties |= TSNEP_GCL_INSERT;
177 		iowrite32(gcl->operation[index].properties, addr);
178 	}
179 
180 	return change;
181 }
182 
tsnep_clean_gcl(struct tsnep_gcl * gcl)183 static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
184 {
185 	int i;
186 	u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
187 	void __iomem *addr;
188 
189 	/* search for insert operation and reset properties */
190 	for (i = 0; i < gcl->count; i++) {
191 		if (gcl->operation[i].properties & ~mask) {
192 			addr = gcl->addr +
193 			       sizeof(struct tsnep_gcl_operation) * i;
194 
195 			gcl->operation[i].properties &= mask;
196 			iowrite32(gcl->operation[i].properties, addr);
197 
198 			break;
199 		}
200 	}
201 }
202 
tsnep_insert_gcl_operation(struct tsnep_gcl * gcl,int ref,u64 change,u32 interval)203 static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
204 				      u64 change, u32 interval)
205 {
206 	u32 properties;
207 
208 	properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
209 	/* change to new list directly after inserted operation */
210 	properties |= TSNEP_GCL_CHANGE;
211 
212 	/* last operation of list is reserved to insert operation */
213 	tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
214 				  interval, false);
215 
216 	return tsnep_set_gcl_change(gcl, ref, change, true);
217 }
218 
tsnep_extend_gcl(struct tsnep_gcl * gcl,u64 start,u32 extension)219 static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
220 {
221 	int ref = gcl->count - 1;
222 	u32 interval = gcl->operation[ref].interval + extension;
223 
224 	start -= gcl->operation[ref].interval;
225 
226 	return tsnep_insert_gcl_operation(gcl, ref, start, interval);
227 }
228 
tsnep_cut_gcl(struct tsnep_gcl * gcl,u64 start,u64 cycle_time)229 static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
230 {
231 	u64 sum = 0;
232 	int i;
233 
234 	/* find operation which shall be cutted */
235 	for (i = 0; i < gcl->count; i++) {
236 		u64 sum_tmp = sum + gcl->operation[i].interval;
237 		u64 interval;
238 
239 		/* sum up operations as long as cycle time is not exceeded */
240 		if (sum_tmp > cycle_time)
241 			break;
242 
243 		/* remaining interval must be big enough for hardware */
244 		interval = cycle_time - sum_tmp;
245 		if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
246 			break;
247 
248 		sum = sum_tmp;
249 	}
250 	if (sum == cycle_time) {
251 		/* no need to cut operation itself or whole cycle
252 		 * => change exactly at operation
253 		 */
254 		return tsnep_set_gcl_change(gcl, i, start + sum, false);
255 	}
256 	return tsnep_insert_gcl_operation(gcl, i, start + sum,
257 					  cycle_time - sum);
258 }
259 
tsnep_enable_gcl(struct tsnep_adapter * adapter,struct tsnep_gcl * gcl,struct tsnep_gcl * curr)260 static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
261 			    struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
262 {
263 	u64 system_time;
264 	u64 timeout;
265 	u64 limit;
266 
267 	/* estimate timeout limit after timeout enable, actually timeout limit
268 	 * in hardware will be earlier than estimate so we are on the safe side
269 	 */
270 	tsnep_get_system_time(adapter, &system_time);
271 	timeout = system_time + TSNEP_GC_TIMEOUT;
272 
273 	if (curr)
274 		limit = timeout + curr->change_limit;
275 	else
276 		limit = timeout;
277 
278 	gcl->start_time = tsnep_gcl_start_after(gcl, limit);
279 
280 	/* gate control time register is only 32bit => time shall be in the near
281 	 * future (no driver support for far future implemented)
282 	 */
283 	if ((gcl->start_time - system_time) >= U32_MAX)
284 		return -EAGAIN;
285 
286 	if (curr) {
287 		/* change gate control list */
288 		u64 last;
289 		u64 change;
290 
291 		last = tsnep_gcl_start_before(curr, gcl->start_time);
292 		if ((last + curr->cycle_time) == gcl->start_time)
293 			change = tsnep_cut_gcl(curr, last,
294 					       gcl->start_time - last);
295 		else if (((gcl->start_time - last) <=
296 			  curr->cycle_time_extension) ||
297 			 ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
298 			change = tsnep_extend_gcl(curr, last,
299 						  gcl->start_time - last);
300 		else
301 			change = tsnep_cut_gcl(curr, last,
302 					       gcl->start_time - last);
303 
304 		WARN_ON(change <= timeout);
305 		gcl->change = true;
306 		iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
307 	} else {
308 		/* start gate control list */
309 		WARN_ON(gcl->start_time <= timeout);
310 		gcl->change = false;
311 		iowrite32(gcl->start_time & 0xFFFFFFFF,
312 			  adapter->addr + TSNEP_GC_TIME);
313 	}
314 
315 	return 0;
316 }
317 
tsnep_taprio(struct tsnep_adapter * adapter,struct tc_taprio_qopt_offload * qopt)318 static int tsnep_taprio(struct tsnep_adapter *adapter,
319 			struct tc_taprio_qopt_offload *qopt)
320 {
321 	struct tsnep_gcl *gcl;
322 	struct tsnep_gcl *curr;
323 	int retval;
324 
325 	if (!adapter->gate_control)
326 		return -EOPNOTSUPP;
327 
328 	if (!qopt->enable) {
329 		/* disable gate control if active */
330 		mutex_lock(&adapter->gate_control_lock);
331 
332 		if (adapter->gate_control_active) {
333 			iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
334 			adapter->gate_control_active = false;
335 		}
336 
337 		mutex_unlock(&adapter->gate_control_lock);
338 
339 		return 0;
340 	}
341 
342 	retval = tsnep_validate_gcl(qopt);
343 	if (retval)
344 		return retval;
345 
346 	mutex_lock(&adapter->gate_control_lock);
347 
348 	gcl = &adapter->gcl[adapter->next_gcl];
349 	tsnep_write_gcl(gcl, qopt);
350 
351 	/* select current gate control list if active */
352 	if (adapter->gate_control_active) {
353 		if (adapter->next_gcl == 0)
354 			curr = &adapter->gcl[1];
355 		else
356 			curr = &adapter->gcl[0];
357 	} else {
358 		curr = NULL;
359 	}
360 
361 	for (;;) {
362 		/* start timeout which discards late enable, this helps ensuring
363 		 * that start/change time are in the future at enable
364 		 */
365 		iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
366 
367 		retval = tsnep_enable_gcl(adapter, gcl, curr);
368 		if (retval) {
369 			mutex_unlock(&adapter->gate_control_lock);
370 
371 			return retval;
372 		}
373 
374 		/* enable gate control list */
375 		if (adapter->next_gcl == 0)
376 			iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
377 		else
378 			iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
379 
380 		/* done if timeout did not happen */
381 		if (!(ioread32(adapter->addr + TSNEP_GC) &
382 		      TSNEP_GC_TIMEOUT_SIGNAL))
383 			break;
384 
385 		/* timeout is acknowledged with any enable */
386 		iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
387 
388 		if (curr)
389 			tsnep_clean_gcl(curr);
390 
391 		/* retry because of timeout */
392 	}
393 
394 	adapter->gate_control_active = true;
395 
396 	if (adapter->next_gcl == 0)
397 		adapter->next_gcl = 1;
398 	else
399 		adapter->next_gcl = 0;
400 
401 	mutex_unlock(&adapter->gate_control_lock);
402 
403 	return 0;
404 }
405 
tsnep_tc_query_caps(struct tsnep_adapter * adapter,struct tc_query_caps_base * base)406 static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
407 			       struct tc_query_caps_base *base)
408 {
409 	switch (base->type) {
410 	case TC_SETUP_QDISC_TAPRIO: {
411 		struct tc_taprio_caps *caps = base->caps;
412 
413 		if (!adapter->gate_control)
414 			return -EOPNOTSUPP;
415 
416 		caps->gate_mask_per_txq = true;
417 
418 		return 0;
419 	}
420 	default:
421 		return -EOPNOTSUPP;
422 	}
423 }
424 
tsnep_tc_setup(struct net_device * netdev,enum tc_setup_type type,void * type_data)425 int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
426 		   void *type_data)
427 {
428 	struct tsnep_adapter *adapter = netdev_priv(netdev);
429 
430 	switch (type) {
431 	case TC_QUERY_CAPS:
432 		return tsnep_tc_query_caps(adapter, type_data);
433 	case TC_SETUP_QDISC_TAPRIO:
434 		return tsnep_taprio(adapter, type_data);
435 	default:
436 		return -EOPNOTSUPP;
437 	}
438 }
439 
tsnep_tc_init(struct tsnep_adapter * adapter)440 int tsnep_tc_init(struct tsnep_adapter *adapter)
441 {
442 	if (!adapter->gate_control)
443 		return 0;
444 
445 	/* open all gates */
446 	iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
447 	iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
448 
449 	adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
450 	adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
451 
452 	return 0;
453 }
454 
tsnep_tc_cleanup(struct tsnep_adapter * adapter)455 void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
456 {
457 	if (!adapter->gate_control)
458 		return;
459 
460 	if (adapter->gate_control_active) {
461 		iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
462 		adapter->gate_control_active = false;
463 	}
464 }
465