1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/clocksource.h>
34#include <linux/highmem.h>
35#include <rdma/mlx5-abi.h>
36#include "en.h"
37#include "clock.h"
38
39enum {
40 MLX5_CYCLES_SHIFT = 23
41};
42
43enum {
44 MLX5_PIN_MODE_IN = 0x0,
45 MLX5_PIN_MODE_OUT = 0x1,
46};
47
48enum {
49 MLX5_OUT_PATTERN_PULSE = 0x0,
50 MLX5_OUT_PATTERN_PERIODIC = 0x1,
51};
52
53enum {
54 MLX5_EVENT_MODE_DISABLE = 0x0,
55 MLX5_EVENT_MODE_REPETETIVE = 0x1,
56 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
57};
58
59enum {
60 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
61 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
62 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
63 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
64 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
65 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
66};
67
68static u64 read_internal_timer(const struct cyclecounter *cc)
69{
70 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
71 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
72 clock);
73
74 return mlx5_read_internal_timer(mdev) & cc->mask;
75}
76
77static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
78{
79 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
80 struct mlx5_clock *clock = &mdev->clock;
81 u32 sign;
82
83 if (!clock_info)
84 return;
85
86 sign = smp_load_acquire(&clock_info->sign);
87 smp_store_mb(clock_info->sign,
88 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
89
90 clock_info->cycles = clock->tc.cycle_last;
91 clock_info->mult = clock->cycles.mult;
92 clock_info->nsec = clock->tc.nsec;
93 clock_info->frac = clock->tc.frac;
94
95 smp_store_release(&clock_info->sign,
96 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
97}
98
99static void mlx5_pps_out(struct work_struct *work)
100{
101 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
102 out_work);
103 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
104 pps_info);
105 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
106 clock);
107 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
108 unsigned long flags;
109 int i;
110
111 for (i = 0; i < clock->ptp_info.n_pins; i++) {
112 u64 tstart;
113
114 write_lock_irqsave(&clock->lock, flags);
115 tstart = clock->pps_info.start[i];
116 clock->pps_info.start[i] = 0;
117 write_unlock_irqrestore(&clock->lock, flags);
118 if (!tstart)
119 continue;
120
121 MLX5_SET(mtpps_reg, in, pin, i);
122 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
123 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
124 mlx5_set_mtpps(mdev, in, sizeof(in));
125 }
126}
127
128static void mlx5_timestamp_overflow(struct work_struct *work)
129{
130 struct delayed_work *dwork = to_delayed_work(work);
131 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
132 overflow_work);
133 unsigned long flags;
134
135 write_lock_irqsave(&clock->lock, flags);
136 timecounter_read(&clock->tc);
137 mlx5_update_clock_info_page(clock->mdev);
138 write_unlock_irqrestore(&clock->lock, flags);
139 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
140}
141
142static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
143 const struct timespec64 *ts)
144{
145 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
146 ptp_info);
147 u64 ns = timespec64_to_ns(ts);
148 unsigned long flags;
149
150 write_lock_irqsave(&clock->lock, flags);
151 timecounter_init(&clock->tc, &clock->cycles, ns);
152 mlx5_update_clock_info_page(clock->mdev);
153 write_unlock_irqrestore(&clock->lock, flags);
154
155 return 0;
156}
157
158static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
159{
160 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
161 ptp_info);
162 u64 ns;
163 unsigned long flags;
164
165 write_lock_irqsave(&clock->lock, flags);
166 ns = timecounter_read(&clock->tc);
167 write_unlock_irqrestore(&clock->lock, flags);
168
169 *ts = ns_to_timespec64(ns);
170
171 return 0;
172}
173
174static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
175{
176 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
177 ptp_info);
178 unsigned long flags;
179
180 write_lock_irqsave(&clock->lock, flags);
181 timecounter_adjtime(&clock->tc, delta);
182 mlx5_update_clock_info_page(clock->mdev);
183 write_unlock_irqrestore(&clock->lock, flags);
184
185 return 0;
186}
187
188static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
189{
190 u64 adj;
191 u32 diff;
192 unsigned long flags;
193 int neg_adj = 0;
194 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
195 ptp_info);
196
197 if (delta < 0) {
198 neg_adj = 1;
199 delta = -delta;
200 }
201
202 adj = clock->nominal_c_mult;
203 adj *= delta;
204 diff = div_u64(adj, 1000000000ULL);
205
206 write_lock_irqsave(&clock->lock, flags);
207 timecounter_read(&clock->tc);
208 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
209 clock->nominal_c_mult + diff;
210 mlx5_update_clock_info_page(clock->mdev);
211 write_unlock_irqrestore(&clock->lock, flags);
212
213 return 0;
214}
215
216static int mlx5_extts_configure(struct ptp_clock_info *ptp,
217 struct ptp_clock_request *rq,
218 int on)
219{
220 struct mlx5_clock *clock =
221 container_of(ptp, struct mlx5_clock, ptp_info);
222 struct mlx5_core_dev *mdev =
223 container_of(clock, struct mlx5_core_dev, clock);
224 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
225 u32 field_select = 0;
226 u8 pin_mode = 0;
227 u8 pattern = 0;
228 int pin = -1;
229 int err = 0;
230
231 if (!MLX5_PPS_CAP(mdev))
232 return -EOPNOTSUPP;
233
234 if (rq->extts.index >= clock->ptp_info.n_pins)
235 return -EINVAL;
236
237 if (on) {
238 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
239 if (pin < 0)
240 return -EBUSY;
241 pin_mode = MLX5_PIN_MODE_IN;
242 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
243 field_select = MLX5_MTPPS_FS_PIN_MODE |
244 MLX5_MTPPS_FS_PATTERN |
245 MLX5_MTPPS_FS_ENABLE;
246 } else {
247 pin = rq->extts.index;
248 field_select = MLX5_MTPPS_FS_ENABLE;
249 }
250
251 MLX5_SET(mtpps_reg, in, pin, pin);
252 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
253 MLX5_SET(mtpps_reg, in, pattern, pattern);
254 MLX5_SET(mtpps_reg, in, enable, on);
255 MLX5_SET(mtpps_reg, in, field_select, field_select);
256
257 err = mlx5_set_mtpps(mdev, in, sizeof(in));
258 if (err)
259 return err;
260
261 return mlx5_set_mtppse(mdev, pin, 0,
262 MLX5_EVENT_MODE_REPETETIVE & on);
263}
264
265static int mlx5_perout_configure(struct ptp_clock_info *ptp,
266 struct ptp_clock_request *rq,
267 int on)
268{
269 struct mlx5_clock *clock =
270 container_of(ptp, struct mlx5_clock, ptp_info);
271 struct mlx5_core_dev *mdev =
272 container_of(clock, struct mlx5_core_dev, clock);
273 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
274 u64 nsec_now, nsec_delta, time_stamp = 0;
275 u64 cycles_now, cycles_delta;
276 struct timespec64 ts;
277 unsigned long flags;
278 u32 field_select = 0;
279 u8 pin_mode = 0;
280 u8 pattern = 0;
281 int pin = -1;
282 int err = 0;
283 s64 ns;
284
285 if (!MLX5_PPS_CAP(mdev))
286 return -EOPNOTSUPP;
287
288 if (rq->perout.index >= clock->ptp_info.n_pins)
289 return -EINVAL;
290
291 if (on) {
292 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
293 rq->perout.index);
294 if (pin < 0)
295 return -EBUSY;
296
297 pin_mode = MLX5_PIN_MODE_OUT;
298 pattern = MLX5_OUT_PATTERN_PERIODIC;
299 ts.tv_sec = rq->perout.period.sec;
300 ts.tv_nsec = rq->perout.period.nsec;
301 ns = timespec64_to_ns(&ts);
302
303 if ((ns >> 1) != 500000000LL)
304 return -EINVAL;
305
306 ts.tv_sec = rq->perout.start.sec;
307 ts.tv_nsec = rq->perout.start.nsec;
308 ns = timespec64_to_ns(&ts);
309 cycles_now = mlx5_read_internal_timer(mdev);
310 write_lock_irqsave(&clock->lock, flags);
311 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
312 nsec_delta = ns - nsec_now;
313 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
314 clock->cycles.mult);
315 write_unlock_irqrestore(&clock->lock, flags);
316 time_stamp = cycles_now + cycles_delta;
317 field_select = MLX5_MTPPS_FS_PIN_MODE |
318 MLX5_MTPPS_FS_PATTERN |
319 MLX5_MTPPS_FS_ENABLE |
320 MLX5_MTPPS_FS_TIME_STAMP;
321 } else {
322 pin = rq->perout.index;
323 field_select = MLX5_MTPPS_FS_ENABLE;
324 }
325
326 MLX5_SET(mtpps_reg, in, pin, pin);
327 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
328 MLX5_SET(mtpps_reg, in, pattern, pattern);
329 MLX5_SET(mtpps_reg, in, enable, on);
330 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
331 MLX5_SET(mtpps_reg, in, field_select, field_select);
332
333 err = mlx5_set_mtpps(mdev, in, sizeof(in));
334 if (err)
335 return err;
336
337 return mlx5_set_mtppse(mdev, pin, 0,
338 MLX5_EVENT_MODE_REPETETIVE & on);
339}
340
341static int mlx5_pps_configure(struct ptp_clock_info *ptp,
342 struct ptp_clock_request *rq,
343 int on)
344{
345 struct mlx5_clock *clock =
346 container_of(ptp, struct mlx5_clock, ptp_info);
347
348 clock->pps_info.enabled = !!on;
349 return 0;
350}
351
352static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
353 struct ptp_clock_request *rq,
354 int on)
355{
356 switch (rq->type) {
357 case PTP_CLK_REQ_EXTTS:
358 return mlx5_extts_configure(ptp, rq, on);
359 case PTP_CLK_REQ_PEROUT:
360 return mlx5_perout_configure(ptp, rq, on);
361 case PTP_CLK_REQ_PPS:
362 return mlx5_pps_configure(ptp, rq, on);
363 default:
364 return -EOPNOTSUPP;
365 }
366 return 0;
367}
368
369static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
370 enum ptp_pin_function func, unsigned int chan)
371{
372 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
373}
374
375static const struct ptp_clock_info mlx5_ptp_clock_info = {
376 .owner = THIS_MODULE,
377 .name = "mlx5_p2p",
378 .max_adj = 100000000,
379 .n_alarm = 0,
380 .n_ext_ts = 0,
381 .n_per_out = 0,
382 .n_pins = 0,
383 .pps = 0,
384 .adjfreq = mlx5_ptp_adjfreq,
385 .adjtime = mlx5_ptp_adjtime,
386 .gettime64 = mlx5_ptp_gettime,
387 .settime64 = mlx5_ptp_settime,
388 .enable = NULL,
389 .verify = NULL,
390};
391
392static int mlx5_init_pin_config(struct mlx5_clock *clock)
393{
394 int i;
395
396 clock->ptp_info.pin_config =
397 kcalloc(clock->ptp_info.n_pins,
398 sizeof(*clock->ptp_info.pin_config),
399 GFP_KERNEL);
400 if (!clock->ptp_info.pin_config)
401 return -ENOMEM;
402 clock->ptp_info.enable = mlx5_ptp_enable;
403 clock->ptp_info.verify = mlx5_ptp_verify;
404 clock->ptp_info.pps = 1;
405
406 for (i = 0; i < clock->ptp_info.n_pins; i++) {
407 snprintf(clock->ptp_info.pin_config[i].name,
408 sizeof(clock->ptp_info.pin_config[i].name),
409 "mlx5_pps%d", i);
410 clock->ptp_info.pin_config[i].index = i;
411 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
412 clock->ptp_info.pin_config[i].chan = i;
413 }
414
415 return 0;
416}
417
418static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
419{
420 struct mlx5_clock *clock = &mdev->clock;
421 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
422
423 mlx5_query_mtpps(mdev, out, sizeof(out));
424
425 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
426 cap_number_of_pps_pins);
427 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
428 cap_max_num_of_pps_in_pins);
429 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
430 cap_max_num_of_pps_out_pins);
431
432 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
433 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
434 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
435 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
436 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
437 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
438 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
439 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
440}
441
442void mlx5_pps_event(struct mlx5_core_dev *mdev,
443 struct mlx5_eqe *eqe)
444{
445 struct mlx5_clock *clock = &mdev->clock;
446 struct ptp_clock_event ptp_event;
447 struct timespec64 ts;
448 u64 nsec_now, nsec_delta;
449 u64 cycles_now, cycles_delta;
450 int pin = eqe->data.pps.pin;
451 s64 ns;
452 unsigned long flags;
453
454 switch (clock->ptp_info.pin_config[pin].func) {
455 case PTP_PF_EXTTS:
456 ptp_event.index = pin;
457 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
458 be64_to_cpu(eqe->data.pps.time_stamp));
459 if (clock->pps_info.enabled) {
460 ptp_event.type = PTP_CLOCK_PPSUSR;
461 ptp_event.pps_times.ts_real =
462 ns_to_timespec64(ptp_event.timestamp);
463 } else {
464 ptp_event.type = PTP_CLOCK_EXTTS;
465 }
466 ptp_clock_event(clock->ptp, &ptp_event);
467 break;
468 case PTP_PF_PEROUT:
469 mlx5_ptp_gettime(&clock->ptp_info, &ts);
470 cycles_now = mlx5_read_internal_timer(mdev);
471 ts.tv_sec += 1;
472 ts.tv_nsec = 0;
473 ns = timespec64_to_ns(&ts);
474 write_lock_irqsave(&clock->lock, flags);
475 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
476 nsec_delta = ns - nsec_now;
477 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
478 clock->cycles.mult);
479 clock->pps_info.start[pin] = cycles_now + cycles_delta;
480 schedule_work(&clock->pps_info.out_work);
481 write_unlock_irqrestore(&clock->lock, flags);
482 break;
483 default:
484 mlx5_core_err(mdev, " Unhandled event\n");
485 }
486}
487
488void mlx5_init_clock(struct mlx5_core_dev *mdev)
489{
490 struct mlx5_clock *clock = &mdev->clock;
491 u64 overflow_cycles;
492 u64 ns;
493 u64 frac = 0;
494 u32 dev_freq;
495
496 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
497 if (!dev_freq) {
498 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
499 return;
500 }
501 rwlock_init(&clock->lock);
502 clock->cycles.read = read_internal_timer;
503 clock->cycles.shift = MLX5_CYCLES_SHIFT;
504 clock->cycles.mult = clocksource_khz2mult(dev_freq,
505 clock->cycles.shift);
506 clock->nominal_c_mult = clock->cycles.mult;
507 clock->cycles.mask = CLOCKSOURCE_MASK(41);
508 clock->mdev = mdev;
509
510 timecounter_init(&clock->tc, &clock->cycles,
511 ktime_to_ns(ktime_get_real()));
512
513
514
515
516
517
518
519
520 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
521 overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
522
523 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
524 frac, &frac);
525 do_div(ns, NSEC_PER_SEC / HZ);
526 clock->overflow_period = ns;
527
528 mdev->clock_info_page = alloc_page(GFP_KERNEL);
529 if (mdev->clock_info_page) {
530 mdev->clock_info = kmap(mdev->clock_info_page);
531 if (!mdev->clock_info) {
532 __free_page(mdev->clock_info_page);
533 mlx5_core_warn(mdev, "failed to map clock page\n");
534 } else {
535 mdev->clock_info->sign = 0;
536 mdev->clock_info->nsec = clock->tc.nsec;
537 mdev->clock_info->cycles = clock->tc.cycle_last;
538 mdev->clock_info->mask = clock->cycles.mask;
539 mdev->clock_info->mult = clock->nominal_c_mult;
540 mdev->clock_info->shift = clock->cycles.shift;
541 mdev->clock_info->frac = clock->tc.frac;
542 mdev->clock_info->overflow_period =
543 clock->overflow_period;
544 }
545 }
546
547 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
548 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
549 if (clock->overflow_period)
550 schedule_delayed_work(&clock->overflow_work, 0);
551 else
552 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
553
554
555 clock->ptp_info = mlx5_ptp_clock_info;
556
557
558 if (MLX5_PPS_CAP(mdev))
559 mlx5_get_pps_caps(mdev);
560 if (clock->ptp_info.n_pins)
561 mlx5_init_pin_config(clock);
562
563 clock->ptp = ptp_clock_register(&clock->ptp_info,
564 &mdev->pdev->dev);
565 if (IS_ERR(clock->ptp)) {
566 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
567 PTR_ERR(clock->ptp));
568 clock->ptp = NULL;
569 }
570}
571
572void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
573{
574 struct mlx5_clock *clock = &mdev->clock;
575
576 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
577 return;
578
579 if (clock->ptp) {
580 ptp_clock_unregister(clock->ptp);
581 clock->ptp = NULL;
582 }
583
584 cancel_work_sync(&clock->pps_info.out_work);
585 cancel_delayed_work_sync(&clock->overflow_work);
586
587 if (mdev->clock_info) {
588 kunmap(mdev->clock_info_page);
589 __free_page(mdev->clock_info_page);
590 mdev->clock_info = NULL;
591 }
592
593 kfree(clock->ptp_info.pin_config);
594}
595