1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
4 *
5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/ptrace.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25 #include <linux/bitops.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/clk.h>
29 #include <linux/platform_device.h>
30 #include <linux/phy.h>
31 #include <linux/fec.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_gpio.h>
35 #include <linux/of_net.h>
36
37 #include "fec.h"
38
39 /* FEC 1588 register bits */
40 #define FEC_T_CTRL_SLAVE 0x00002000
41 #define FEC_T_CTRL_CAPTURE 0x00000800
42 #define FEC_T_CTRL_RESTART 0x00000200
43 #define FEC_T_CTRL_PERIOD_RST 0x00000030
44 #define FEC_T_CTRL_PERIOD_EN 0x00000010
45 #define FEC_T_CTRL_ENABLE 0x00000001
46
47 #define FEC_T_INC_MASK 0x0000007f
48 #define FEC_T_INC_OFFSET 0
49 #define FEC_T_INC_CORR_MASK 0x00007f00
50 #define FEC_T_INC_CORR_OFFSET 8
51
52 #define FEC_T_CTRL_PINPER 0x00000080
53 #define FEC_T_TF0_MASK 0x00000001
54 #define FEC_T_TF0_OFFSET 0
55 #define FEC_T_TF1_MASK 0x00000002
56 #define FEC_T_TF1_OFFSET 1
57 #define FEC_T_TF2_MASK 0x00000004
58 #define FEC_T_TF2_OFFSET 2
59 #define FEC_T_TF3_MASK 0x00000008
60 #define FEC_T_TF3_OFFSET 3
61 #define FEC_T_TDRE_MASK 0x00000001
62 #define FEC_T_TDRE_OFFSET 0
63 #define FEC_T_TMODE_MASK 0x0000003C
64 #define FEC_T_TMODE_OFFSET 2
65 #define FEC_T_TIE_MASK 0x00000040
66 #define FEC_T_TIE_OFFSET 6
67 #define FEC_T_TF_MASK 0x00000080
68 #define FEC_T_TF_OFFSET 7
69
70 #define FEC_ATIME_CTRL 0x400
71 #define FEC_ATIME 0x404
72 #define FEC_ATIME_EVT_OFFSET 0x408
73 #define FEC_ATIME_EVT_PERIOD 0x40c
74 #define FEC_ATIME_CORR 0x410
75 #define FEC_ATIME_INC 0x414
76 #define FEC_TS_TIMESTAMP 0x418
77
78 #define FEC_TGSR 0x604
79 #define FEC_TCSR(n) (0x608 + n * 0x08)
80 #define FEC_TCCR(n) (0x60C + n * 0x08)
81 #define MAX_TIMER_CHANNEL 3
82 #define FEC_TMODE_TOGGLE 0x05
83 #define FEC_HIGH_PULSE 0x0F
84
85 #define FEC_CC_MULT (1 << 31)
86 #define FEC_COUNTER_PERIOD (1 << 31)
87 #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
88 #define FEC_CHANNLE_0 0
89 #define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
90
91 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
92 #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
93
94 /**
95 * fec_ptp_enable_pps
96 * @fep: the fec_enet_private structure handle
97 * @enable: enable the channel pps output
98 *
99 * This function enble the PPS ouput on the timer channel.
100 */
fec_ptp_enable_pps(struct fec_enet_private * fep,uint enable)101 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
102 {
103 unsigned long flags;
104 u32 val, tempval;
105 struct timespec64 ts;
106 u64 ns;
107
108 if (fep->pps_enable == enable)
109 return 0;
110
111 fep->pps_channel = DEFAULT_PPS_CHANNEL;
112 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
113
114 spin_lock_irqsave(&fep->tmreg_lock, flags);
115
116 if (enable) {
117 /* clear capture or output compare interrupt status if have.
118 */
119 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
120
121 /* It is recommended to double check the TMODE field in the
122 * TCSR register to be cleared before the first compare counter
123 * is written into TCCR register. Just add a double check.
124 */
125 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
126 do {
127 val &= ~(FEC_T_TMODE_MASK);
128 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
129 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
130 } while (val & FEC_T_TMODE_MASK);
131
132 /* Dummy read counter to update the counter */
133 timecounter_read(&fep->tc);
134 /* We want to find the first compare event in the next
135 * second point. So we need to know what the ptp time
136 * is now and how many nanoseconds is ahead to get next second.
137 * The remaining nanosecond ahead before the next second would be
138 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
139 * to current timer would be next second.
140 */
141 tempval = fep->cc.read(&fep->cc);
142 /* Convert the ptp local counter to 1588 timestamp */
143 ns = timecounter_cyc2time(&fep->tc, tempval);
144 ts = ns_to_timespec64(ns);
145
146 /* The tempval is less than 3 seconds, and so val is less than
147 * 4 seconds. No overflow for 32bit calculation.
148 */
149 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
150
151 /* Need to consider the situation that the current time is
152 * very close to the second point, which means NSEC_PER_SEC
153 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
154 * is still running when we calculate the first compare event, it is
155 * possible that the remaining nanoseonds run out before the compare
156 * counter is calculated and written into TCCR register. To avoid
157 * this possibility, we will set the compare event to be the next
158 * of next second. The current setting is 31-bit timer and wrap
159 * around over 2 seconds. So it is okay to set the next of next
160 * seond for the timer.
161 */
162 val += NSEC_PER_SEC;
163
164 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
165 * ptp counter, which maybe cause 32-bit wrap. Since the
166 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
167 * We can ensure the wrap will not cause issue. If the offset
168 * is bigger than fep->cc.mask would be a error.
169 */
170 val &= fep->cc.mask;
171 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
172
173 /* Calculate the second the compare event timestamp */
174 fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
175
176 /* * Enable compare event when overflow */
177 val = readl(fep->hwp + FEC_ATIME_CTRL);
178 val |= FEC_T_CTRL_PINPER;
179 writel(val, fep->hwp + FEC_ATIME_CTRL);
180
181 /* Compare channel setting. */
182 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
183 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
184 val &= ~(1 << FEC_T_TDRE_OFFSET);
185 val &= ~(FEC_T_TMODE_MASK);
186 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
187 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
188
189 /* Write the second compare event timestamp and calculate
190 * the third timestamp. Refer the TCCR register detail in the spec.
191 */
192 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
193 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
194 } else {
195 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
196 }
197
198 fep->pps_enable = enable;
199 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
200
201 return 0;
202 }
203
fec_ptp_pps_perout(struct fec_enet_private * fep)204 static int fec_ptp_pps_perout(struct fec_enet_private *fep)
205 {
206 u32 compare_val, ptp_hc, temp_val;
207 u64 curr_time;
208 unsigned long flags;
209
210 spin_lock_irqsave(&fep->tmreg_lock, flags);
211
212 /* Update time counter */
213 timecounter_read(&fep->tc);
214
215 /* Get the current ptp hardware time counter */
216 temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
217 temp_val |= FEC_T_CTRL_CAPTURE;
218 writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
219 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
220 udelay(1);
221
222 ptp_hc = readl(fep->hwp + FEC_ATIME);
223
224 /* Convert the ptp local counter to 1588 timestamp */
225 curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
226
227 /* If the pps start time less than current time add 100ms, just return.
228 * Because the software might not able to set the comparison time into
229 * the FEC_TCCR register in time and missed the start time.
230 */
231 if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
232 dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
233 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
234 return -1;
235 }
236
237 compare_val = fep->perout_stime - curr_time + ptp_hc;
238 compare_val &= fep->cc.mask;
239
240 writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
241 fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
242
243 /* Enable compare event when overflow */
244 temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
245 temp_val |= FEC_T_CTRL_PINPER;
246 writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
247
248 /* Compare channel setting. */
249 temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
250 temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
251 temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
252 temp_val &= ~(FEC_T_TMODE_MASK);
253 temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
254 writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
255
256 /* Write the second compare event timestamp and calculate
257 * the third timestamp. Refer the TCCR register detail in the spec.
258 */
259 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
260 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
261 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
262
263 return 0;
264 }
265
fec_ptp_pps_perout_handler(struct hrtimer * timer)266 static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
267 {
268 struct fec_enet_private *fep = container_of(timer,
269 struct fec_enet_private, perout_timer);
270
271 fec_ptp_pps_perout(fep);
272
273 return HRTIMER_NORESTART;
274 }
275
276 /**
277 * fec_ptp_read - read raw cycle counter (to be used by time counter)
278 * @cc: the cyclecounter structure
279 *
280 * this function reads the cyclecounter registers and is called by the
281 * cyclecounter structure used to construct a ns counter from the
282 * arbitrary fixed point registers
283 */
fec_ptp_read(const struct cyclecounter * cc)284 static u64 fec_ptp_read(const struct cyclecounter *cc)
285 {
286 struct fec_enet_private *fep =
287 container_of(cc, struct fec_enet_private, cc);
288 u32 tempval;
289
290 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
291 tempval |= FEC_T_CTRL_CAPTURE;
292 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
293
294 if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
295 udelay(1);
296
297 return readl(fep->hwp + FEC_ATIME);
298 }
299
300 /**
301 * fec_ptp_start_cyclecounter - create the cycle counter from hw
302 * @ndev: network device
303 *
304 * this function initializes the timecounter and cyclecounter
305 * structures for use in generated a ns counter from the arbitrary
306 * fixed point cycles registers in the hardware.
307 */
fec_ptp_start_cyclecounter(struct net_device * ndev)308 void fec_ptp_start_cyclecounter(struct net_device *ndev)
309 {
310 struct fec_enet_private *fep = netdev_priv(ndev);
311 unsigned long flags;
312 int inc;
313
314 inc = 1000000000 / fep->cycle_speed;
315
316 /* grab the ptp lock */
317 spin_lock_irqsave(&fep->tmreg_lock, flags);
318
319 /* 1ns counter */
320 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
321
322 /* use 31-bit timer counter */
323 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
324
325 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
326 fep->hwp + FEC_ATIME_CTRL);
327
328 memset(&fep->cc, 0, sizeof(fep->cc));
329 fep->cc.read = fec_ptp_read;
330 fep->cc.mask = CLOCKSOURCE_MASK(31);
331 fep->cc.shift = 31;
332 fep->cc.mult = FEC_CC_MULT;
333
334 /* reset the ns time counter */
335 timecounter_init(&fep->tc, &fep->cc, 0);
336
337 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
338 }
339
340 /**
341 * fec_ptp_adjfine - adjust ptp cycle frequency
342 * @ptp: the ptp clock structure
343 * @scaled_ppm: scaled parts per million adjustment from base
344 *
345 * Adjust the frequency of the ptp cycle counter by the
346 * indicated amount from the base frequency.
347 *
348 * Scaled parts per million is ppm with a 16-bit binary fractional field.
349 *
350 * Because ENET hardware frequency adjust is complex,
351 * using software method to do that.
352 */
fec_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)353 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
354 {
355 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
356 unsigned long flags;
357 int neg_adj = 0;
358 u32 i, tmp;
359 u32 corr_inc, corr_period;
360 u32 corr_ns;
361 u64 lhs, rhs;
362
363 struct fec_enet_private *fep =
364 container_of(ptp, struct fec_enet_private, ptp_caps);
365
366 if (ppb == 0)
367 return 0;
368
369 if (ppb < 0) {
370 ppb = -ppb;
371 neg_adj = 1;
372 }
373
374 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
375 * Try to find the corr_inc between 1 to fep->ptp_inc to
376 * meet adjustment requirement.
377 */
378 lhs = NSEC_PER_SEC;
379 rhs = (u64)ppb * (u64)fep->ptp_inc;
380 for (i = 1; i <= fep->ptp_inc; i++) {
381 if (lhs >= rhs) {
382 corr_inc = i;
383 corr_period = div_u64(lhs, rhs);
384 break;
385 }
386 lhs += NSEC_PER_SEC;
387 }
388 /* Not found? Set it to high value - double speed
389 * correct in every clock step.
390 */
391 if (i > fep->ptp_inc) {
392 corr_inc = fep->ptp_inc;
393 corr_period = 1;
394 }
395
396 if (neg_adj)
397 corr_ns = fep->ptp_inc - corr_inc;
398 else
399 corr_ns = fep->ptp_inc + corr_inc;
400
401 spin_lock_irqsave(&fep->tmreg_lock, flags);
402
403 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
404 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
405 writel(tmp, fep->hwp + FEC_ATIME_INC);
406 corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
407 writel(corr_period, fep->hwp + FEC_ATIME_CORR);
408 /* dummy read to update the timer. */
409 timecounter_read(&fep->tc);
410
411 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
412
413 return 0;
414 }
415
416 /**
417 * fec_ptp_adjtime
418 * @ptp: the ptp clock structure
419 * @delta: offset to adjust the cycle counter by
420 *
421 * adjust the timer by resetting the timecounter structure.
422 */
fec_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)423 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
424 {
425 struct fec_enet_private *fep =
426 container_of(ptp, struct fec_enet_private, ptp_caps);
427 unsigned long flags;
428
429 spin_lock_irqsave(&fep->tmreg_lock, flags);
430 timecounter_adjtime(&fep->tc, delta);
431 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
432
433 return 0;
434 }
435
436 /**
437 * fec_ptp_gettime
438 * @ptp: the ptp clock structure
439 * @ts: timespec structure to hold the current time value
440 *
441 * read the timecounter and return the correct value on ns,
442 * after converting it into a struct timespec.
443 */
fec_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)444 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
445 {
446 struct fec_enet_private *adapter =
447 container_of(ptp, struct fec_enet_private, ptp_caps);
448 u64 ns;
449 unsigned long flags;
450
451 mutex_lock(&adapter->ptp_clk_mutex);
452 /* Check the ptp clock */
453 if (!adapter->ptp_clk_on) {
454 mutex_unlock(&adapter->ptp_clk_mutex);
455 return -EINVAL;
456 }
457 spin_lock_irqsave(&adapter->tmreg_lock, flags);
458 ns = timecounter_read(&adapter->tc);
459 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
460 mutex_unlock(&adapter->ptp_clk_mutex);
461
462 *ts = ns_to_timespec64(ns);
463
464 return 0;
465 }
466
467 /**
468 * fec_ptp_settime
469 * @ptp: the ptp clock structure
470 * @ts: the timespec containing the new time for the cycle counter
471 *
472 * reset the timecounter to use a new base value instead of the kernel
473 * wall timer value.
474 */
fec_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)475 static int fec_ptp_settime(struct ptp_clock_info *ptp,
476 const struct timespec64 *ts)
477 {
478 struct fec_enet_private *fep =
479 container_of(ptp, struct fec_enet_private, ptp_caps);
480
481 u64 ns;
482 unsigned long flags;
483 u32 counter;
484
485 mutex_lock(&fep->ptp_clk_mutex);
486 /* Check the ptp clock */
487 if (!fep->ptp_clk_on) {
488 mutex_unlock(&fep->ptp_clk_mutex);
489 return -EINVAL;
490 }
491
492 ns = timespec64_to_ns(ts);
493 /* Get the timer value based on timestamp.
494 * Update the counter with the masked value.
495 */
496 counter = ns & fep->cc.mask;
497
498 spin_lock_irqsave(&fep->tmreg_lock, flags);
499 writel(counter, fep->hwp + FEC_ATIME);
500 timecounter_init(&fep->tc, &fep->cc, ns);
501 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
502 mutex_unlock(&fep->ptp_clk_mutex);
503 return 0;
504 }
505
fec_ptp_pps_disable(struct fec_enet_private * fep,uint channel)506 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
507 {
508 unsigned long flags;
509
510 spin_lock_irqsave(&fep->tmreg_lock, flags);
511 writel(0, fep->hwp + FEC_TCSR(channel));
512 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
513
514 return 0;
515 }
516
517 /**
518 * fec_ptp_enable
519 * @ptp: the ptp clock structure
520 * @rq: the requested feature to change
521 * @on: whether to enable or disable the feature
522 *
523 */
fec_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)524 static int fec_ptp_enable(struct ptp_clock_info *ptp,
525 struct ptp_clock_request *rq, int on)
526 {
527 struct fec_enet_private *fep =
528 container_of(ptp, struct fec_enet_private, ptp_caps);
529 ktime_t timeout;
530 struct timespec64 start_time, period;
531 u64 curr_time, delta, period_ns;
532 unsigned long flags;
533 int ret = 0;
534
535 if (rq->type == PTP_CLK_REQ_PPS) {
536 ret = fec_ptp_enable_pps(fep, on);
537
538 return ret;
539 } else if (rq->type == PTP_CLK_REQ_PEROUT) {
540 /* Reject requests with unsupported flags */
541 if (rq->perout.flags)
542 return -EOPNOTSUPP;
543
544 if (rq->perout.index != DEFAULT_PPS_CHANNEL)
545 return -EOPNOTSUPP;
546
547 fep->pps_channel = DEFAULT_PPS_CHANNEL;
548 period.tv_sec = rq->perout.period.sec;
549 period.tv_nsec = rq->perout.period.nsec;
550 period_ns = timespec64_to_ns(&period);
551
552 /* FEC PTP timer only has 31 bits, so if the period exceed
553 * 4s is not supported.
554 */
555 if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
556 dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
557 return -EOPNOTSUPP;
558 }
559
560 fep->reload_period = div_u64(period_ns, 2);
561 if (on && fep->reload_period) {
562 /* Convert 1588 timestamp to ns*/
563 start_time.tv_sec = rq->perout.start.sec;
564 start_time.tv_nsec = rq->perout.start.nsec;
565 fep->perout_stime = timespec64_to_ns(&start_time);
566
567 mutex_lock(&fep->ptp_clk_mutex);
568 if (!fep->ptp_clk_on) {
569 dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
570 mutex_unlock(&fep->ptp_clk_mutex);
571 return -EOPNOTSUPP;
572 }
573 spin_lock_irqsave(&fep->tmreg_lock, flags);
574 /* Read current timestamp */
575 curr_time = timecounter_read(&fep->tc);
576 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
577 mutex_unlock(&fep->ptp_clk_mutex);
578
579 /* Calculate time difference */
580 delta = fep->perout_stime - curr_time;
581
582 if (fep->perout_stime <= curr_time) {
583 dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
584 return -EINVAL;
585 }
586
587 /* Because the timer counter of FEC only has 31-bits, correspondingly,
588 * the time comparison register FEC_TCCR also only low 31 bits can be
589 * set. If the start time of pps signal exceeds current time more than
590 * 0x80000000 ns, a software timer is used and the timer expires about
591 * 1 second before the start time to be able to set FEC_TCCR.
592 */
593 if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
594 timeout = ns_to_ktime(delta - NSEC_PER_SEC);
595 hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
596 } else {
597 return fec_ptp_pps_perout(fep);
598 }
599 } else {
600 fec_ptp_pps_disable(fep, fep->pps_channel);
601 }
602
603 return 0;
604 } else {
605 return -EOPNOTSUPP;
606 }
607 }
608
609 /**
610 * fec_ptp_disable_hwts - disable hardware time stamping
611 * @ndev: pointer to net_device
612 */
fec_ptp_disable_hwts(struct net_device * ndev)613 void fec_ptp_disable_hwts(struct net_device *ndev)
614 {
615 struct fec_enet_private *fep = netdev_priv(ndev);
616
617 fep->hwts_tx_en = 0;
618 fep->hwts_rx_en = 0;
619 }
620
fec_ptp_set(struct net_device * ndev,struct ifreq * ifr)621 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
622 {
623 struct fec_enet_private *fep = netdev_priv(ndev);
624
625 struct hwtstamp_config config;
626
627 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
628 return -EFAULT;
629
630 switch (config.tx_type) {
631 case HWTSTAMP_TX_OFF:
632 fep->hwts_tx_en = 0;
633 break;
634 case HWTSTAMP_TX_ON:
635 fep->hwts_tx_en = 1;
636 break;
637 default:
638 return -ERANGE;
639 }
640
641 switch (config.rx_filter) {
642 case HWTSTAMP_FILTER_NONE:
643 fep->hwts_rx_en = 0;
644 break;
645
646 default:
647 fep->hwts_rx_en = 1;
648 config.rx_filter = HWTSTAMP_FILTER_ALL;
649 break;
650 }
651
652 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
653 -EFAULT : 0;
654 }
655
fec_ptp_get(struct net_device * ndev,struct ifreq * ifr)656 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
657 {
658 struct fec_enet_private *fep = netdev_priv(ndev);
659 struct hwtstamp_config config;
660
661 config.flags = 0;
662 config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
663 config.rx_filter = (fep->hwts_rx_en ?
664 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
665
666 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
667 -EFAULT : 0;
668 }
669
670 /*
671 * fec_time_keep - call timecounter_read every second to avoid timer overrun
672 * because ENET just support 32bit counter, will timeout in 4s
673 */
fec_time_keep(struct work_struct * work)674 static void fec_time_keep(struct work_struct *work)
675 {
676 struct delayed_work *dwork = to_delayed_work(work);
677 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
678 unsigned long flags;
679
680 mutex_lock(&fep->ptp_clk_mutex);
681 if (fep->ptp_clk_on) {
682 spin_lock_irqsave(&fep->tmreg_lock, flags);
683 timecounter_read(&fep->tc);
684 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
685 }
686 mutex_unlock(&fep->ptp_clk_mutex);
687
688 schedule_delayed_work(&fep->time_keep, HZ);
689 }
690
691 /* This function checks the pps event and reloads the timer compare counter. */
fec_pps_interrupt(int irq,void * dev_id)692 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
693 {
694 struct net_device *ndev = dev_id;
695 struct fec_enet_private *fep = netdev_priv(ndev);
696 u32 val;
697 u8 channel = fep->pps_channel;
698 struct ptp_clock_event event;
699
700 val = readl(fep->hwp + FEC_TCSR(channel));
701 if (val & FEC_T_TF_MASK) {
702 /* Write the next next compare(not the next according the spec)
703 * value to the register
704 */
705 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
706 do {
707 writel(val, fep->hwp + FEC_TCSR(channel));
708 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
709
710 /* Update the counter; */
711 fep->next_counter = (fep->next_counter + fep->reload_period) &
712 fep->cc.mask;
713
714 event.type = PTP_CLOCK_PPS;
715 ptp_clock_event(fep->ptp_clock, &event);
716 return IRQ_HANDLED;
717 }
718
719 return IRQ_NONE;
720 }
721
722 /**
723 * fec_ptp_init
724 * @pdev: The FEC network adapter
725 * @irq_idx: the interrupt index
726 *
727 * This function performs the required steps for enabling ptp
728 * support. If ptp support has already been loaded it simply calls the
729 * cyclecounter init routine and exits.
730 */
731
fec_ptp_init(struct platform_device * pdev,int irq_idx)732 void fec_ptp_init(struct platform_device *pdev, int irq_idx)
733 {
734 struct net_device *ndev = platform_get_drvdata(pdev);
735 struct fec_enet_private *fep = netdev_priv(ndev);
736 int irq;
737 int ret;
738
739 fep->ptp_caps.owner = THIS_MODULE;
740 strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
741
742 fep->ptp_caps.max_adj = 250000000;
743 fep->ptp_caps.n_alarm = 0;
744 fep->ptp_caps.n_ext_ts = 0;
745 fep->ptp_caps.n_per_out = 1;
746 fep->ptp_caps.n_pins = 0;
747 fep->ptp_caps.pps = 1;
748 fep->ptp_caps.adjfine = fec_ptp_adjfine;
749 fep->ptp_caps.adjtime = fec_ptp_adjtime;
750 fep->ptp_caps.gettime64 = fec_ptp_gettime;
751 fep->ptp_caps.settime64 = fec_ptp_settime;
752 fep->ptp_caps.enable = fec_ptp_enable;
753
754 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
755 if (!fep->cycle_speed) {
756 fep->cycle_speed = NSEC_PER_SEC;
757 dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
758 }
759 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
760
761 spin_lock_init(&fep->tmreg_lock);
762
763 fec_ptp_start_cyclecounter(ndev);
764
765 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
766
767 hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
768 fep->perout_timer.function = fec_ptp_pps_perout_handler;
769
770 irq = platform_get_irq_byname_optional(pdev, "pps");
771 if (irq < 0)
772 irq = platform_get_irq_optional(pdev, irq_idx);
773 /* Failure to get an irq is not fatal,
774 * only the PTP_CLOCK_PPS clock events should stop
775 */
776 if (irq >= 0) {
777 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
778 0, pdev->name, ndev);
779 if (ret < 0)
780 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
781 ret);
782 }
783
784 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
785 if (IS_ERR(fep->ptp_clock)) {
786 fep->ptp_clock = NULL;
787 dev_err(&pdev->dev, "ptp_clock_register failed\n");
788 }
789
790 schedule_delayed_work(&fep->time_keep, HZ);
791 }
792
fec_ptp_stop(struct platform_device * pdev)793 void fec_ptp_stop(struct platform_device *pdev)
794 {
795 struct net_device *ndev = platform_get_drvdata(pdev);
796 struct fec_enet_private *fep = netdev_priv(ndev);
797
798 cancel_delayed_work_sync(&fep->time_keep);
799 hrtimer_cancel(&fep->perout_timer);
800 if (fep->ptp_clock)
801 ptp_clock_unregister(fep->ptp_clock);
802 }
803