1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/device.h>
35#include <linux/clocksource.h>
36
37#include "mlx4_en.h"
38
39
40
41static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
42{
43 struct mlx4_en_dev *mdev =
44 container_of(tc, struct mlx4_en_dev, cycles);
45 struct mlx4_dev *dev = mdev->dev;
46
47 return mlx4_read_clock(dev) & tc->mask;
48}
49
50u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
51{
52 u64 hi, lo;
53 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
54
55 lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
56 hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
57
58 return hi | lo;
59}
60
61void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
62 struct skb_shared_hwtstamps *hwts,
63 u64 timestamp)
64{
65 unsigned int seq;
66 u64 nsec;
67
68 do {
69 seq = read_seqbegin(&mdev->clock_lock);
70 nsec = timecounter_cyc2time(&mdev->clock, timestamp);
71 } while (read_seqretry(&mdev->clock_lock, seq));
72
73 memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
74 hwts->hwtstamp = ns_to_ktime(nsec);
75}
76
77
78
79
80
81
82
83void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
84{
85 if (mdev->ptp_clock) {
86 ptp_clock_unregister(mdev->ptp_clock);
87 mdev->ptp_clock = NULL;
88 mlx4_info(mdev, "removed PHC\n");
89 }
90}
91
92#define MLX4_EN_WRAP_AROUND_SEC 10UL
93
94
95
96
97#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
98
99void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
100{
101 bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
102 MLX4_EN_OVERFLOW_PERIOD);
103 unsigned long flags;
104
105 if (timeout) {
106 write_seqlock_irqsave(&mdev->clock_lock, flags);
107 timecounter_read(&mdev->clock);
108 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
109 mdev->last_overflow_check = jiffies;
110 }
111}
112
113
114
115
116
117
118
119
120
121static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
122{
123 u64 adj;
124 u32 diff, mult;
125 int neg_adj = 0;
126 unsigned long flags;
127 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
128 ptp_clock_info);
129
130 if (delta < 0) {
131 neg_adj = 1;
132 delta = -delta;
133 }
134 mult = mdev->nominal_c_mult;
135 adj = mult;
136 adj *= delta;
137 diff = div_u64(adj, 1000000000ULL);
138
139 write_seqlock_irqsave(&mdev->clock_lock, flags);
140 timecounter_read(&mdev->clock);
141 mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
142 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
143
144 return 0;
145}
146
147
148
149
150
151
152
153
154static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
155{
156 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
157 ptp_clock_info);
158 unsigned long flags;
159
160 write_seqlock_irqsave(&mdev->clock_lock, flags);
161 timecounter_adjtime(&mdev->clock, delta);
162 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
163
164 return 0;
165}
166
167
168
169
170
171
172
173
174
175static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
176 struct timespec64 *ts)
177{
178 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
179 ptp_clock_info);
180 unsigned long flags;
181 u64 ns;
182
183 write_seqlock_irqsave(&mdev->clock_lock, flags);
184 ns = timecounter_read(&mdev->clock);
185 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
186
187 *ts = ns_to_timespec64(ns);
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199
200static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
201 const struct timespec64 *ts)
202{
203 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
204 ptp_clock_info);
205 u64 ns = timespec64_to_ns(ts);
206 unsigned long flags;
207
208
209 write_seqlock_irqsave(&mdev->clock_lock, flags);
210 timecounter_init(&mdev->clock, &mdev->cycles, ns);
211 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
212
213 return 0;
214}
215
216
217
218
219
220
221
222
223
224
225static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
226 struct ptp_clock_request __always_unused *request,
227 int __always_unused on)
228{
229 return -EOPNOTSUPP;
230}
231
232static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
233 .owner = THIS_MODULE,
234 .max_adj = 100000000,
235 .n_alarm = 0,
236 .n_ext_ts = 0,
237 .n_per_out = 0,
238 .n_pins = 0,
239 .pps = 0,
240 .adjfreq = mlx4_en_phc_adjfreq,
241 .adjtime = mlx4_en_phc_adjtime,
242 .gettime64 = mlx4_en_phc_gettime,
243 .settime64 = mlx4_en_phc_settime,
244 .enable = mlx4_en_phc_enable,
245};
246
247
248
249
250
251static u32 freq_to_shift(u16 freq)
252{
253 u32 freq_khz = freq * 1000;
254 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
255 u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
256
257 u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
258
259
260 return ilog2(div_u64(max_mul * freq_khz, 1000000));
261}
262
263void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
264{
265 struct mlx4_dev *dev = mdev->dev;
266 unsigned long flags;
267
268
269
270
271
272 if (mdev->ptp_clock)
273 return;
274
275 seqlock_init(&mdev->clock_lock);
276
277 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
278 mdev->cycles.read = mlx4_en_read_clock;
279 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
280 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
281 mdev->cycles.mult =
282 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
283 mdev->nominal_c_mult = mdev->cycles.mult;
284
285 write_seqlock_irqsave(&mdev->clock_lock, flags);
286 timecounter_init(&mdev->clock, &mdev->cycles,
287 ktime_to_ns(ktime_get_real()));
288 write_sequnlock_irqrestore(&mdev->clock_lock, flags);
289
290
291 mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
292 snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
293
294 mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
295 &mdev->pdev->dev);
296 if (IS_ERR(mdev->ptp_clock)) {
297 mdev->ptp_clock = NULL;
298 mlx4_err(mdev, "ptp_clock_register failed\n");
299 } else if (mdev->ptp_clock) {
300 mlx4_info(mdev, "registered PHC clock\n");
301 }
302
303}
304