1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/workqueue.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/clk.h>
29#include <linux/platform_device.h>
30#include <linux/phy.h>
31#include <linux/fec.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_gpio.h>
35#include <linux/of_net.h>
36
37#include "fec.h"
38
39
40#define FEC_T_CTRL_SLAVE 0x00002000
41#define FEC_T_CTRL_CAPTURE 0x00000800
42#define FEC_T_CTRL_RESTART 0x00000200
43#define FEC_T_CTRL_PERIOD_RST 0x00000030
44#define FEC_T_CTRL_PERIOD_EN 0x00000010
45#define FEC_T_CTRL_ENABLE 0x00000001
46
47#define FEC_T_INC_MASK 0x0000007f
48#define FEC_T_INC_OFFSET 0
49#define FEC_T_INC_CORR_MASK 0x00007f00
50#define FEC_T_INC_CORR_OFFSET 8
51
52#define FEC_T_CTRL_PINPER 0x00000080
53#define FEC_T_TF0_MASK 0x00000001
54#define FEC_T_TF0_OFFSET 0
55#define FEC_T_TF1_MASK 0x00000002
56#define FEC_T_TF1_OFFSET 1
57#define FEC_T_TF2_MASK 0x00000004
58#define FEC_T_TF2_OFFSET 2
59#define FEC_T_TF3_MASK 0x00000008
60#define FEC_T_TF3_OFFSET 3
61#define FEC_T_TDRE_MASK 0x00000001
62#define FEC_T_TDRE_OFFSET 0
63#define FEC_T_TMODE_MASK 0x0000003C
64#define FEC_T_TMODE_OFFSET 2
65#define FEC_T_TIE_MASK 0x00000040
66#define FEC_T_TIE_OFFSET 6
67#define FEC_T_TF_MASK 0x00000080
68#define FEC_T_TF_OFFSET 7
69
70#define FEC_ATIME_CTRL 0x400
71#define FEC_ATIME 0x404
72#define FEC_ATIME_EVT_OFFSET 0x408
73#define FEC_ATIME_EVT_PERIOD 0x40c
74#define FEC_ATIME_CORR 0x410
75#define FEC_ATIME_INC 0x414
76#define FEC_TS_TIMESTAMP 0x418
77
78#define FEC_TGSR 0x604
79#define FEC_TCSR(n) (0x608 + n * 0x08)
80#define FEC_TCCR(n) (0x60C + n * 0x08)
81#define MAX_TIMER_CHANNEL 3
82#define FEC_TMODE_TOGGLE 0x05
83#define FEC_HIGH_PULSE 0x0F
84
85#define FEC_CC_MULT (1 << 31)
86#define FEC_COUNTER_PERIOD (1 << 31)
87#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
88#define FEC_CHANNLE_0 0
89#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
90
91
92
93
94
95
96
97
98static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
99{
100 unsigned long flags;
101 u32 val, tempval;
102 int inc;
103 struct timespec64 ts;
104 u64 ns;
105 val = 0;
106
107 if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
108 dev_err(&fep->pdev->dev, "No ptp stack is running\n");
109 return -EINVAL;
110 }
111
112 if (fep->pps_enable == enable)
113 return 0;
114
115 fep->pps_channel = DEFAULT_PPS_CHANNEL;
116 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
117 inc = fep->ptp_inc;
118
119 spin_lock_irqsave(&fep->tmreg_lock, flags);
120
121 if (enable) {
122
123
124 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
125
126
127
128
129
130 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
131 do {
132 val &= ~(FEC_T_TMODE_MASK);
133 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
134 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
135 } while (val & FEC_T_TMODE_MASK);
136
137
138 timecounter_read(&fep->tc);
139
140
141
142
143
144
145
146 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
147 tempval |= FEC_T_CTRL_CAPTURE;
148 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
149
150 tempval = readl(fep->hwp + FEC_ATIME);
151
152 ns = timecounter_cyc2time(&fep->tc, tempval);
153 ts = ns_to_timespec64(ns);
154
155
156
157
158 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
159
160
161
162
163
164
165
166
167
168
169
170
171 val += NSEC_PER_SEC;
172
173
174
175
176
177
178
179 val &= fep->cc.mask;
180 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
181
182
183 fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
184
185
186 val = readl(fep->hwp + FEC_ATIME_CTRL);
187 val |= FEC_T_CTRL_PINPER;
188 writel(val, fep->hwp + FEC_ATIME_CTRL);
189
190
191 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
192 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
193 val &= ~(1 << FEC_T_TDRE_OFFSET);
194 val &= ~(FEC_T_TMODE_MASK);
195 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
196 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
197
198
199
200
201 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
202 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
203 } else {
204 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
205 }
206
207 fep->pps_enable = enable;
208 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
209
210 return 0;
211}
212
213
214
215
216
217
218
219
220
221static u64 fec_ptp_read(const struct cyclecounter *cc)
222{
223 struct fec_enet_private *fep =
224 container_of(cc, struct fec_enet_private, cc);
225 const struct platform_device_id *id_entry =
226 platform_get_device_id(fep->pdev);
227 u32 tempval;
228
229 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
230 tempval |= FEC_T_CTRL_CAPTURE;
231 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
232
233 if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
234 udelay(1);
235
236 return readl(fep->hwp + FEC_ATIME);
237}
238
239
240
241
242
243
244
245
246
247void fec_ptp_start_cyclecounter(struct net_device *ndev)
248{
249 struct fec_enet_private *fep = netdev_priv(ndev);
250 unsigned long flags;
251 int inc;
252
253 inc = 1000000000 / fep->cycle_speed;
254
255
256 spin_lock_irqsave(&fep->tmreg_lock, flags);
257
258
259 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
260
261
262 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
263
264 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
265 fep->hwp + FEC_ATIME_CTRL);
266
267 memset(&fep->cc, 0, sizeof(fep->cc));
268 fep->cc.read = fec_ptp_read;
269 fep->cc.mask = CLOCKSOURCE_MASK(31);
270 fep->cc.shift = 31;
271 fep->cc.mult = FEC_CC_MULT;
272
273
274 timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
275
276 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
277}
278
279
280
281
282
283
284
285
286
287
288
289
290static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
291{
292 unsigned long flags;
293 int neg_adj = 0;
294 u32 i, tmp;
295 u32 corr_inc, corr_period;
296 u32 corr_ns;
297 u64 lhs, rhs;
298
299 struct fec_enet_private *fep =
300 container_of(ptp, struct fec_enet_private, ptp_caps);
301
302 if (ppb == 0)
303 return 0;
304
305 if (ppb < 0) {
306 ppb = -ppb;
307 neg_adj = 1;
308 }
309
310
311
312
313
314 lhs = NSEC_PER_SEC;
315 rhs = (u64)ppb * (u64)fep->ptp_inc;
316 for (i = 1; i <= fep->ptp_inc; i++) {
317 if (lhs >= rhs) {
318 corr_inc = i;
319 corr_period = div_u64(lhs, rhs);
320 break;
321 }
322 lhs += NSEC_PER_SEC;
323 }
324
325
326
327 if (i > fep->ptp_inc) {
328 corr_inc = fep->ptp_inc;
329 corr_period = 1;
330 }
331
332 if (neg_adj)
333 corr_ns = fep->ptp_inc - corr_inc;
334 else
335 corr_ns = fep->ptp_inc + corr_inc;
336
337 spin_lock_irqsave(&fep->tmreg_lock, flags);
338
339 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
340 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
341 writel(tmp, fep->hwp + FEC_ATIME_INC);
342 corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
343 writel(corr_period, fep->hwp + FEC_ATIME_CORR);
344
345 timecounter_read(&fep->tc);
346
347 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
348
349 return 0;
350}
351
352
353
354
355
356
357
358
359static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
360{
361 struct fec_enet_private *fep =
362 container_of(ptp, struct fec_enet_private, ptp_caps);
363 unsigned long flags;
364
365 spin_lock_irqsave(&fep->tmreg_lock, flags);
366 timecounter_adjtime(&fep->tc, delta);
367 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
368
369 return 0;
370}
371
372
373
374
375
376
377
378
379
380static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
381{
382 struct fec_enet_private *adapter =
383 container_of(ptp, struct fec_enet_private, ptp_caps);
384 u64 ns;
385 unsigned long flags;
386
387 spin_lock_irqsave(&adapter->tmreg_lock, flags);
388 ns = timecounter_read(&adapter->tc);
389 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
390
391 *ts = ns_to_timespec64(ns);
392
393 return 0;
394}
395
396
397
398
399
400
401
402
403
404static int fec_ptp_settime(struct ptp_clock_info *ptp,
405 const struct timespec64 *ts)
406{
407 struct fec_enet_private *fep =
408 container_of(ptp, struct fec_enet_private, ptp_caps);
409
410 u64 ns;
411 unsigned long flags;
412 u32 counter;
413
414 mutex_lock(&fep->ptp_clk_mutex);
415
416 if (!fep->ptp_clk_on) {
417 mutex_unlock(&fep->ptp_clk_mutex);
418 return -EINVAL;
419 }
420
421 ns = timespec64_to_ns(ts);
422
423
424
425 counter = ns & fep->cc.mask;
426
427 spin_lock_irqsave(&fep->tmreg_lock, flags);
428 writel(counter, fep->hwp + FEC_ATIME);
429 timecounter_init(&fep->tc, &fep->cc, ns);
430 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
431 mutex_unlock(&fep->ptp_clk_mutex);
432 return 0;
433}
434
435
436
437
438
439
440
441
442static int fec_ptp_enable(struct ptp_clock_info *ptp,
443 struct ptp_clock_request *rq, int on)
444{
445 struct fec_enet_private *fep =
446 container_of(ptp, struct fec_enet_private, ptp_caps);
447 int ret = 0;
448
449 if (rq->type == PTP_CLK_REQ_PPS) {
450 ret = fec_ptp_enable_pps(fep, on);
451
452 return ret;
453 }
454 return -EOPNOTSUPP;
455}
456
457
458
459
460
461
462
463int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
464{
465 struct fec_enet_private *fep = netdev_priv(ndev);
466
467 struct hwtstamp_config config;
468
469 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
470 return -EFAULT;
471
472
473 if (config.flags)
474 return -EINVAL;
475
476 switch (config.tx_type) {
477 case HWTSTAMP_TX_OFF:
478 fep->hwts_tx_en = 0;
479 break;
480 case HWTSTAMP_TX_ON:
481 fep->hwts_tx_en = 1;
482 break;
483 default:
484 return -ERANGE;
485 }
486
487 switch (config.rx_filter) {
488 case HWTSTAMP_FILTER_NONE:
489 if (fep->hwts_rx_en)
490 fep->hwts_rx_en = 0;
491 config.rx_filter = HWTSTAMP_FILTER_NONE;
492 break;
493
494 default:
495 fep->hwts_rx_en = 1;
496 config.rx_filter = HWTSTAMP_FILTER_ALL;
497 break;
498 }
499
500 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
501 -EFAULT : 0;
502}
503
504int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
505{
506 struct fec_enet_private *fep = netdev_priv(ndev);
507 struct hwtstamp_config config;
508
509 config.flags = 0;
510 config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
511 config.rx_filter = (fep->hwts_rx_en ?
512 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
513
514 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
515 -EFAULT : 0;
516}
517
518
519
520
521
522static void fec_time_keep(struct work_struct *work)
523{
524 struct delayed_work *dwork = to_delayed_work(work);
525 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
526 u64 ns;
527 unsigned long flags;
528
529 mutex_lock(&fep->ptp_clk_mutex);
530 if (fep->ptp_clk_on) {
531 spin_lock_irqsave(&fep->tmreg_lock, flags);
532 ns = timecounter_read(&fep->tc);
533 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
534 }
535 mutex_unlock(&fep->ptp_clk_mutex);
536
537 schedule_delayed_work(&fep->time_keep, HZ);
538}
539
540
541static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
542{
543 struct net_device *ndev = dev_id;
544 struct fec_enet_private *fep = netdev_priv(ndev);
545 u32 val;
546 u8 channel = fep->pps_channel;
547 struct ptp_clock_event event;
548
549 val = readl(fep->hwp + FEC_TCSR(channel));
550 if (val & FEC_T_TF_MASK) {
551
552
553
554 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
555 do {
556 writel(val, fep->hwp + FEC_TCSR(channel));
557 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
558
559
560 fep->next_counter = (fep->next_counter + fep->reload_period) &
561 fep->cc.mask;
562
563 event.type = PTP_CLOCK_PPS;
564 ptp_clock_event(fep->ptp_clock, &event);
565 return IRQ_HANDLED;
566 }
567
568 return IRQ_NONE;
569}
570
571
572
573
574
575
576
577
578
579
580void fec_ptp_init(struct platform_device *pdev, int irq_idx)
581{
582 struct net_device *ndev = platform_get_drvdata(pdev);
583 struct fec_enet_private *fep = netdev_priv(ndev);
584 int irq;
585 int ret;
586
587 fep->ptp_caps.owner = THIS_MODULE;
588 snprintf(fep->ptp_caps.name, 16, "fec ptp");
589
590 fep->ptp_caps.max_adj = 250000000;
591 fep->ptp_caps.n_alarm = 0;
592 fep->ptp_caps.n_ext_ts = 0;
593 fep->ptp_caps.n_per_out = 0;
594 fep->ptp_caps.n_pins = 0;
595 fep->ptp_caps.pps = 1;
596 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
597 fep->ptp_caps.adjtime = fec_ptp_adjtime;
598 fep->ptp_caps.gettime64 = fec_ptp_gettime;
599 fep->ptp_caps.settime64 = fec_ptp_settime;
600 fep->ptp_caps.enable = fec_ptp_enable;
601
602 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
603 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
604
605 spin_lock_init(&fep->tmreg_lock);
606
607 fec_ptp_start_cyclecounter(ndev);
608
609 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
610
611 irq = platform_get_irq_byname(pdev, "pps");
612 if (irq < 0)
613 irq = platform_get_irq(pdev, irq_idx);
614
615
616
617 if (irq >= 0) {
618 ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
619 0, pdev->name, ndev);
620 if (ret < 0)
621 dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
622 ret);
623 }
624
625 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
626 if (IS_ERR(fep->ptp_clock)) {
627 fep->ptp_clock = NULL;
628 pr_err("ptp_clock_register failed\n");
629 }
630
631 schedule_delayed_work(&fep->time_keep, HZ);
632}
633
634void fec_ptp_stop(struct platform_device *pdev)
635{
636 struct net_device *ndev = platform_get_drvdata(pdev);
637 struct fec_enet_private *fep = netdev_priv(ndev);
638
639 cancel_delayed_work_sync(&fep->time_keep);
640 if (fep->ptp_clock)
641 ptp_clock_unregister(fep->ptp_clock);
642}
643