1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/device.h>
35
36#include "mlx4_en.h"
37
38int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
39{
40 struct mlx4_en_priv *priv = netdev_priv(dev);
41 struct mlx4_en_dev *mdev = priv->mdev;
42 int port_up = 0;
43 int err = 0;
44
45 mutex_lock(&mdev->state_lock);
46 if (priv->port_up) {
47 port_up = 1;
48 mlx4_en_stop_port(dev, 1);
49 }
50
51 mlx4_en_free_resources(priv);
52
53 en_warn(priv, "Changing Time Stamp configuration\n");
54
55 priv->hwtstamp_config.tx_type = tx_type;
56 priv->hwtstamp_config.rx_filter = rx_filter;
57
58 if (rx_filter != HWTSTAMP_FILTER_NONE)
59 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
60 else
61 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
62
63 err = mlx4_en_alloc_resources(priv);
64 if (err) {
65 en_err(priv, "Failed reallocating port resources\n");
66 goto out;
67 }
68 if (port_up) {
69 err = mlx4_en_start_port(dev);
70 if (err)
71 en_err(priv, "Failed starting port\n");
72 }
73
74out:
75 mutex_unlock(&mdev->state_lock);
76 netdev_features_change(dev);
77 return err;
78}
79
80
81
82static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
83{
84 struct mlx4_en_dev *mdev =
85 container_of(tc, struct mlx4_en_dev, cycles);
86 struct mlx4_dev *dev = mdev->dev;
87
88 return mlx4_read_clock(dev) & tc->mask;
89}
90
91u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
92{
93 u64 hi, lo;
94 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
95
96 lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
97 hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
98
99 return hi | lo;
100}
101
102void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
103 struct skb_shared_hwtstamps *hwts,
104 u64 timestamp)
105{
106 u64 nsec;
107
108 nsec = timecounter_cyc2time(&mdev->clock, timestamp);
109
110 memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
111 hwts->hwtstamp = ns_to_ktime(nsec);
112}
113
114void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
115{
116 struct mlx4_dev *dev = mdev->dev;
117 u64 ns;
118
119 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
120 mdev->cycles.read = mlx4_en_read_clock;
121 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
122
123
124
125
126
127 mdev->cycles.shift = 14;
128 mdev->cycles.mult =
129 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
130
131 timecounter_init(&mdev->clock, &mdev->cycles,
132 ktime_to_ns(ktime_get_real()));
133
134
135
136
137 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
138 do_div(ns, NSEC_PER_SEC / 2 / HZ);
139 mdev->overflow_period = ns;
140}
141
142void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
143{
144 bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
145 mdev->overflow_period);
146
147 if (timeout) {
148 timecounter_read(&mdev->clock);
149 mdev->last_overflow_check = jiffies;
150 }
151}
152