1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/clocksource.h>
34#include <linux/highmem.h>
35#include <rdma/mlx5-abi.h>
36#include "en.h"
37
38
39#ifndef smp_store_mb
40#define smp_store_mb(var, value) set_mb(var, value)
41#endif
42
43enum {
44 MLX5_CYCLES_SHIFT = 23
45};
46
47enum {
48 MLX5_PIN_MODE_IN = 0x0,
49 MLX5_PIN_MODE_OUT = 0x1,
50};
51
52enum {
53 MLX5_OUT_PATTERN_PULSE = 0x0,
54 MLX5_OUT_PATTERN_PERIODIC = 0x1,
55};
56
57enum {
58 MLX5_EVENT_MODE_DISABLE = 0x0,
59 MLX5_EVENT_MODE_REPETETIVE = 0x1,
60 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
61};
62
63enum {
64 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
65 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
66 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
67 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
68 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
69 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
70};
71
72static u64 read_internal_timer(const struct cyclecounter *cc)
73{
74 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
75 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
76 clock);
77
78 return mlx5_read_internal_timer(mdev) & cc->mask;
79}
80
81static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
82{
83 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
84 struct mlx5_clock *clock = &mdev->clock;
85 u32 sign;
86
87 if (!clock_info)
88 return;
89
90 sign = smp_load_acquire(&clock_info->sign);
91 smp_store_mb(clock_info->sign,
92 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
93
94 clock_info->cycles = clock->tc.cycle_last;
95 clock_info->mult = clock->cycles.mult;
96 clock_info->nsec = clock->tc.nsec;
97 clock_info->frac = clock->tc.frac;
98
99 smp_store_release(&clock_info->sign,
100 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
101}
102
103static void mlx5_pps_out(struct work_struct *work)
104{
105 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
106 out_work);
107 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
108 pps_info);
109 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
110 clock);
111 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
112 unsigned long flags;
113 int i;
114
115 for (i = 0; i < clock->ptp_info.n_pins; i++) {
116 u64 tstart;
117
118 write_lock_irqsave(&clock->lock, flags);
119 tstart = clock->pps_info.start[i];
120 clock->pps_info.start[i] = 0;
121 write_unlock_irqrestore(&clock->lock, flags);
122 if (!tstart)
123 continue;
124
125 MLX5_SET(mtpps_reg, in, pin, i);
126 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
127 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
128 mlx5_set_mtpps(mdev, in, sizeof(in));
129 }
130}
131
132static void mlx5_timestamp_overflow(struct work_struct *work)
133{
134 struct delayed_work *dwork = to_delayed_work(work);
135 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
136 overflow_work);
137 unsigned long flags;
138
139 write_lock_irqsave(&clock->lock, flags);
140 timecounter_read(&clock->tc);
141 mlx5_update_clock_info_page(clock->mdev);
142 write_unlock_irqrestore(&clock->lock, flags);
143 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
144}
145
146static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
147 const struct timespec64 *ts)
148{
149 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
150 ptp_info);
151 u64 ns = timespec64_to_ns(ts);
152 unsigned long flags;
153
154 write_lock_irqsave(&clock->lock, flags);
155 timecounter_init(&clock->tc, &clock->cycles, ns);
156 mlx5_update_clock_info_page(clock->mdev);
157 write_unlock_irqrestore(&clock->lock, flags);
158
159 return 0;
160}
161
162static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
163{
164 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
165 ptp_info);
166 u64 ns;
167 unsigned long flags;
168
169 write_lock_irqsave(&clock->lock, flags);
170 ns = timecounter_read(&clock->tc);
171 write_unlock_irqrestore(&clock->lock, flags);
172
173 *ts = ns_to_timespec64(ns);
174
175 return 0;
176}
177
178static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
179{
180 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
181 ptp_info);
182 unsigned long flags;
183
184 write_lock_irqsave(&clock->lock, flags);
185 timecounter_adjtime(&clock->tc, delta);
186 mlx5_update_clock_info_page(clock->mdev);
187 write_unlock_irqrestore(&clock->lock, flags);
188
189 return 0;
190}
191
192static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
193{
194 u64 adj;
195 u32 diff;
196 unsigned long flags;
197 int neg_adj = 0;
198 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
199 ptp_info);
200
201 if (delta < 0) {
202 neg_adj = 1;
203 delta = -delta;
204 }
205
206 adj = clock->nominal_c_mult;
207 adj *= delta;
208 diff = div_u64(adj, 1000000000ULL);
209
210 write_lock_irqsave(&clock->lock, flags);
211 timecounter_read(&clock->tc);
212 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
213 clock->nominal_c_mult + diff;
214 mlx5_update_clock_info_page(clock->mdev);
215 write_unlock_irqrestore(&clock->lock, flags);
216
217 return 0;
218}
219
220static int mlx5_extts_configure(struct ptp_clock_info *ptp,
221 struct ptp_clock_request *rq,
222 int on)
223{
224 struct mlx5_clock *clock =
225 container_of(ptp, struct mlx5_clock, ptp_info);
226 struct mlx5_core_dev *mdev =
227 container_of(clock, struct mlx5_core_dev, clock);
228 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
229 u32 field_select = 0;
230 u8 pin_mode = 0;
231 u8 pattern = 0;
232 int pin = -1;
233 int err = 0;
234
235 if (!MLX5_PPS_CAP(mdev))
236 return -EOPNOTSUPP;
237
238 if (rq->extts.index >= clock->ptp_info.n_pins)
239 return -EINVAL;
240
241 if (on) {
242 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
243 if (pin < 0)
244 return -EBUSY;
245 pin_mode = MLX5_PIN_MODE_IN;
246 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
247 field_select = MLX5_MTPPS_FS_PIN_MODE |
248 MLX5_MTPPS_FS_PATTERN |
249 MLX5_MTPPS_FS_ENABLE;
250 } else {
251 pin = rq->extts.index;
252 field_select = MLX5_MTPPS_FS_ENABLE;
253 }
254
255 MLX5_SET(mtpps_reg, in, pin, pin);
256 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
257 MLX5_SET(mtpps_reg, in, pattern, pattern);
258 MLX5_SET(mtpps_reg, in, enable, on);
259 MLX5_SET(mtpps_reg, in, field_select, field_select);
260
261 err = mlx5_set_mtpps(mdev, in, sizeof(in));
262 if (err)
263 return err;
264
265 return mlx5_set_mtppse(mdev, pin, 0,
266 MLX5_EVENT_MODE_REPETETIVE & on);
267}
268
269static int mlx5_perout_configure(struct ptp_clock_info *ptp,
270 struct ptp_clock_request *rq,
271 int on)
272{
273 struct mlx5_clock *clock =
274 container_of(ptp, struct mlx5_clock, ptp_info);
275 struct mlx5_core_dev *mdev =
276 container_of(clock, struct mlx5_core_dev, clock);
277 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
278 u64 nsec_now, nsec_delta, time_stamp = 0;
279 u64 cycles_now, cycles_delta;
280 struct timespec64 ts;
281 unsigned long flags;
282 u32 field_select = 0;
283 u8 pin_mode = 0;
284 u8 pattern = 0;
285 int pin = -1;
286 int err = 0;
287 s64 ns;
288
289 if (!MLX5_PPS_CAP(mdev))
290 return -EOPNOTSUPP;
291
292 if (rq->perout.index >= clock->ptp_info.n_pins)
293 return -EINVAL;
294
295 if (on) {
296 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
297 rq->perout.index);
298 if (pin < 0)
299 return -EBUSY;
300
301 pin_mode = MLX5_PIN_MODE_OUT;
302 pattern = MLX5_OUT_PATTERN_PERIODIC;
303 ts.tv_sec = rq->perout.period.sec;
304 ts.tv_nsec = rq->perout.period.nsec;
305 ns = timespec64_to_ns(&ts);
306
307 if ((ns >> 1) != 500000000LL)
308 return -EINVAL;
309
310 ts.tv_sec = rq->perout.start.sec;
311 ts.tv_nsec = rq->perout.start.nsec;
312 ns = timespec64_to_ns(&ts);
313 cycles_now = mlx5_read_internal_timer(mdev);
314 write_lock_irqsave(&clock->lock, flags);
315 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
316 nsec_delta = ns - nsec_now;
317 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
318 clock->cycles.mult);
319 write_unlock_irqrestore(&clock->lock, flags);
320 time_stamp = cycles_now + cycles_delta;
321 field_select = MLX5_MTPPS_FS_PIN_MODE |
322 MLX5_MTPPS_FS_PATTERN |
323 MLX5_MTPPS_FS_ENABLE |
324 MLX5_MTPPS_FS_TIME_STAMP;
325 } else {
326 pin = rq->perout.index;
327 field_select = MLX5_MTPPS_FS_ENABLE;
328 }
329
330 MLX5_SET(mtpps_reg, in, pin, pin);
331 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
332 MLX5_SET(mtpps_reg, in, pattern, pattern);
333 MLX5_SET(mtpps_reg, in, enable, on);
334 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
335 MLX5_SET(mtpps_reg, in, field_select, field_select);
336
337 err = mlx5_set_mtpps(mdev, in, sizeof(in));
338 if (err)
339 return err;
340
341 return mlx5_set_mtppse(mdev, pin, 0,
342 MLX5_EVENT_MODE_REPETETIVE & on);
343}
344
345static int mlx5_pps_configure(struct ptp_clock_info *ptp,
346 struct ptp_clock_request *rq,
347 int on)
348{
349 struct mlx5_clock *clock =
350 container_of(ptp, struct mlx5_clock, ptp_info);
351
352 clock->pps_info.enabled = !!on;
353 return 0;
354}
355
356static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
357 struct ptp_clock_request *rq,
358 int on)
359{
360 switch (rq->type) {
361 case PTP_CLK_REQ_EXTTS:
362 return mlx5_extts_configure(ptp, rq, on);
363 case PTP_CLK_REQ_PEROUT:
364 return mlx5_perout_configure(ptp, rq, on);
365 case PTP_CLK_REQ_PPS:
366 return mlx5_pps_configure(ptp, rq, on);
367 default:
368 return -EOPNOTSUPP;
369 }
370 return 0;
371}
372
373static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
374 enum ptp_pin_function func, unsigned int chan)
375{
376 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
377}
378
379static const struct ptp_clock_info mlx5_ptp_clock_info = {
380 .owner = THIS_MODULE,
381 .name = "mlx5_p2p",
382 .max_adj = 100000000,
383 .n_alarm = 0,
384 .n_ext_ts = 0,
385 .n_per_out = 0,
386 .n_pins = 0,
387 .pps = 0,
388 .adjfreq = mlx5_ptp_adjfreq,
389 .adjtime = mlx5_ptp_adjtime,
390 .gettime64 = mlx5_ptp_gettime,
391 .settime64 = mlx5_ptp_settime,
392 .enable = NULL,
393 .verify = NULL,
394};
395
396static int mlx5_init_pin_config(struct mlx5_clock *clock)
397{
398 int i;
399
400 clock->ptp_info.pin_config =
401 kzalloc(sizeof(*clock->ptp_info.pin_config) *
402 clock->ptp_info.n_pins, GFP_KERNEL);
403 if (!clock->ptp_info.pin_config)
404 return -ENOMEM;
405 clock->ptp_info.enable = mlx5_ptp_enable;
406 clock->ptp_info.verify = mlx5_ptp_verify;
407 clock->ptp_info.pps = 1;
408
409 for (i = 0; i < clock->ptp_info.n_pins; i++) {
410 snprintf(clock->ptp_info.pin_config[i].name,
411 sizeof(clock->ptp_info.pin_config[i].name),
412 "mlx5_pps%d", i);
413 clock->ptp_info.pin_config[i].index = i;
414 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
415 clock->ptp_info.pin_config[i].chan = i;
416 }
417
418 return 0;
419}
420
421static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
422{
423 struct mlx5_clock *clock = &mdev->clock;
424 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
425
426 mlx5_query_mtpps(mdev, out, sizeof(out));
427
428 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
429 cap_number_of_pps_pins);
430 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
431 cap_max_num_of_pps_in_pins);
432 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
433 cap_max_num_of_pps_out_pins);
434
435 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
436 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
437 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
438 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
439 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
440 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
441 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
442 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
443}
444
445void mlx5_pps_event(struct mlx5_core_dev *mdev,
446 struct mlx5_eqe *eqe)
447{
448 struct mlx5_clock *clock = &mdev->clock;
449 struct ptp_clock_event ptp_event;
450 struct timespec64 ts;
451 u64 nsec_now, nsec_delta;
452 u64 cycles_now, cycles_delta;
453 int pin = eqe->data.pps.pin;
454 s64 ns;
455 unsigned long flags;
456
457 switch (clock->ptp_info.pin_config[pin].func) {
458 case PTP_PF_EXTTS:
459 ptp_event.index = pin;
460 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
461 be64_to_cpu(eqe->data.pps.time_stamp));
462 if (clock->pps_info.enabled) {
463 ptp_event.type = PTP_CLOCK_PPSUSR;
464 ptp_event.pps_times.ts_real =
465 ns_to_timespec64(ptp_event.timestamp);
466 } else {
467 ptp_event.type = PTP_CLOCK_EXTTS;
468 }
469 ptp_clock_event(clock->ptp, &ptp_event);
470 break;
471 case PTP_PF_PEROUT:
472 mlx5_ptp_gettime(&clock->ptp_info, &ts);
473 cycles_now = mlx5_read_internal_timer(mdev);
474 ts.tv_sec += 1;
475 ts.tv_nsec = 0;
476 ns = timespec64_to_ns(&ts);
477 write_lock_irqsave(&clock->lock, flags);
478 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
479 nsec_delta = ns - nsec_now;
480 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
481 clock->cycles.mult);
482 clock->pps_info.start[pin] = cycles_now + cycles_delta;
483 schedule_work(&clock->pps_info.out_work);
484 write_unlock_irqrestore(&clock->lock, flags);
485 break;
486 default:
487 mlx5_core_err(mdev, " Unhandled event\n");
488 }
489}
490
491void mlx5_init_clock(struct mlx5_core_dev *mdev)
492{
493 struct mlx5_clock *clock = &mdev->clock;
494 u64 ns;
495 u64 frac = 0;
496 u32 dev_freq;
497
498 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
499 if (!dev_freq) {
500 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
501 return;
502 }
503 rwlock_init(&clock->lock);
504 clock->cycles.read = read_internal_timer;
505 clock->cycles.shift = MLX5_CYCLES_SHIFT;
506 clock->cycles.mult = clocksource_khz2mult(dev_freq,
507 clock->cycles.shift);
508 clock->nominal_c_mult = clock->cycles.mult;
509 clock->cycles.mask = CLOCKSOURCE_MASK(41);
510 clock->mdev = mdev;
511
512 timecounter_init(&clock->tc, &clock->cycles,
513 ktime_to_ns(ktime_get_real()));
514
515
516
517
518 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
519 frac, &frac);
520 do_div(ns, NSEC_PER_SEC / 2 / HZ);
521 clock->overflow_period = ns;
522
523 mdev->clock_info_page = alloc_page(GFP_KERNEL);
524 if (mdev->clock_info_page) {
525 mdev->clock_info = kmap(mdev->clock_info_page);
526 if (!mdev->clock_info) {
527 __free_page(mdev->clock_info_page);
528 mlx5_core_warn(mdev, "failed to map clock page\n");
529 } else {
530 mdev->clock_info->sign = 0;
531 mdev->clock_info->nsec = clock->tc.nsec;
532 mdev->clock_info->cycles = clock->tc.cycle_last;
533 mdev->clock_info->mask = clock->cycles.mask;
534 mdev->clock_info->mult = clock->nominal_c_mult;
535 mdev->clock_info->shift = clock->cycles.shift;
536 mdev->clock_info->frac = clock->tc.frac;
537 mdev->clock_info->overflow_period =
538 clock->overflow_period;
539 }
540 }
541
542 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
543 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
544 if (clock->overflow_period)
545 schedule_delayed_work(&clock->overflow_work, 0);
546 else
547 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
548
549
550 clock->ptp_info = mlx5_ptp_clock_info;
551
552
553 if (MLX5_PPS_CAP(mdev))
554 mlx5_get_pps_caps(mdev);
555 if (clock->ptp_info.n_pins)
556 mlx5_init_pin_config(clock);
557
558 clock->ptp = ptp_clock_register(&clock->ptp_info,
559 &mdev->pdev->dev);
560 if (IS_ERR(clock->ptp)) {
561 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
562 PTR_ERR(clock->ptp));
563 clock->ptp = NULL;
564 }
565}
566
567void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
568{
569 struct mlx5_clock *clock = &mdev->clock;
570
571 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
572 return;
573
574 if (clock->ptp) {
575 ptp_clock_unregister(clock->ptp);
576 clock->ptp = NULL;
577 }
578
579 cancel_work_sync(&clock->pps_info.out_work);
580 cancel_delayed_work_sync(&clock->overflow_work);
581
582 if (mdev->clock_info) {
583 kunmap(mdev->clock_info_page);
584 __free_page(mdev->clock_info_page);
585 mdev->clock_info = NULL;
586 }
587
588 kfree(clock->ptp_info.pin_config);
589}
590