1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xilinx_axienet.h"
19#include "xilinx_tsn_ptp.h"
20#include "xilinx_tsn_timer.h"
21#include <linux/ptp_classify.h>
22
23#define PTP_ONE_SECOND 1000000000
24
25#define msg_type_string(type) \
26 ((type) == PTP_TYPE_SYNC) ? "SYNC" : \
27 ((type) == PTP_TYPE_FOLLOW_UP) ? "FOLLOW_UP" : \
28 ((type) == PTP_TYPE_PDELAYREQ) ? "PDELAY_REQ" : \
29 ((type) == PTP_TYPE_PDELAYRESP) ? "PDELAY_RESP" : \
30 ((type) == PTP_TYPE_PDELAYRESP_FOLLOW_UP) ? "PDELAY_RESP_FOLLOW_UP" : \
31 ((type) == PTP_TYPE_ANNOUNCE) ? "ANNOUNCE" : \
32 "UNKNOWN"
33
34
35
36
37
38
39
40
41
42
43static void memcpy_fromio_32(struct axienet_local *lp,
44 unsigned long offset, u8 *data, size_t len)
45{
46 while (len >= 4) {
47 *(u32 *)data = axienet_ior(lp, offset);
48 len -= 4;
49 offset += 4;
50 data += 4;
51 }
52
53 if (len > 0) {
54 u32 leftover = axienet_ior(lp, offset);
55 u8 *src = (u8 *)&leftover;
56
57 while (len) {
58 *data++ = *src++;
59 len--;
60 }
61 }
62}
63
64
65
66
67
68
69
70
71
72
73static void memcpy_toio_32(struct axienet_local *lp,
74 unsigned long offset, u8 *data, size_t len)
75{
76 while (len >= 4) {
77 axienet_iow(lp, offset, *(u32 *)data);
78 len -= 4;
79 offset += 4;
80 data += 4;
81 }
82
83 if (len > 0) {
84 u32 leftover = 0;
85 u8 *dest = (u8 *)&leftover;
86
87 while (len) {
88 *dest++ = *data++;
89 len--;
90 }
91 axienet_iow(lp, offset, leftover);
92 }
93}
94
95static int is_sync(struct sk_buff *skb)
96{
97 u8 *msg_type;
98
99 msg_type = (u8 *)skb->data + ETH_HLEN;
100
101 return (*msg_type & 0xf) == PTP_TYPE_SYNC;
102}
103
104
105
106
107
108
109
110
111
112
113
114
115int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev)
116{
117 u8 msg_type;
118 struct axienet_local *lp = netdev_priv(ndev);
119 unsigned long flags;
120 u8 tx_frame_waiting;
121 u8 free_index;
122 u32 cmd1_field = 0;
123 u32 cmd2_field = 0;
124
125 msg_type = *(u8 *)(skb->data + ETH_HLEN);
126
127 pr_debug(" -->XMIT: protocol: %x message: %s frame_len: %d\n",
128 skb->protocol,
129 msg_type_string(msg_type & 0xf), skb->len);
130
131 tx_frame_waiting = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
132 PTP_TX_FRAME_WAITING_MASK) >>
133 PTP_TX_FRAME_WAITING_SHIFT;
134
135
136 if (tx_frame_waiting & (1 << 7)) {
137 if (!netif_queue_stopped(ndev))
138 netif_stop_queue(ndev);
139 pr_debug("tx_frame_waiting: %d\n", tx_frame_waiting);
140 return NETDEV_TX_BUSY;
141 }
142
143
144 free_index = fls(tx_frame_waiting);
145
146
147 if (lp->ptp_ts_type == HWTSTAMP_TX_ONESTEP_SYNC &&
148 is_sync(skb)) {
149
150 cmd1_field |= PTP_TX_CMD_1STEP_SHIFT;
151 cmd2_field |= PTP_TOD_FIELD_OFFSET;
152 }
153
154 cmd1_field |= skb->len;
155
156 axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index), cmd1_field);
157 axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index) +
158 PTP_TX_BUFFER_CMD2_FIELD, cmd2_field);
159 memcpy_toio_32(lp,
160 (PTP_TX_BUFFER_OFFSET(free_index) +
161 PTP_TX_CMD_FIELD_LEN),
162 skb->data, skb->len);
163
164
165 axienet_iow(lp, PTP_TX_CONTROL_OFFSET, (1 << free_index));
166
167 if (lp->ptp_ts_type != HWTSTAMP_TX_ONESTEP_SYNC ||
168 (!is_sync(skb))) {
169 spin_lock_irqsave(&lp->ptp_tx_lock, flags);
170 skb->cb[0] = free_index;
171 skb_queue_tail(&lp->ptp_txq, skb);
172
173 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
174 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
175
176 skb_tx_timestamp(skb);
177 spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
178 }
179 return NETDEV_TX_OK;
180}
181
182
183
184
185
186
187
188
189
190
191static void axienet_set_timestamp(struct axienet_local *lp,
192 struct skb_shared_hwtstamps *hwtstamps,
193 unsigned int offset)
194{
195 u32 captured_ns;
196 u32 captured_sec;
197
198 captured_ns = axienet_ior(lp, offset + 4);
199 captured_sec = axienet_ior(lp, offset);
200
201
202 hwtstamps->hwtstamp = ktime_set(captured_sec,
203 captured_ns);
204}
205
206
207
208
209
210
211
212
213
214
215static void axienet_ptp_recv(struct net_device *ndev)
216{
217 struct axienet_local *lp = netdev_priv(ndev);
218 unsigned long ptp_frame_base_addr = 0;
219 struct sk_buff *skb;
220 u16 msg_len;
221 u8 msg_type;
222 u32 bytes = 0;
223 u32 packets = 0;
224
225 pr_debug("%s:\n ", __func__);
226
227 while (((lp->ptp_rx_hw_pointer & 0xf) !=
228 (lp->ptp_rx_sw_pointer & 0xf))) {
229 skb = netdev_alloc_skb(ndev, PTP_RX_FRAME_SIZE);
230
231 lp->ptp_rx_sw_pointer += 1;
232
233 ptp_frame_base_addr = PTP_RX_BASE_OFFSET +
234 ((lp->ptp_rx_sw_pointer & 0xf) *
235 PTP_RX_HWBUF_SIZE);
236
237 memset(skb->data, 0x0, PTP_RX_FRAME_SIZE);
238
239 memcpy_fromio_32(lp, ptp_frame_base_addr, skb->data,
240 PTP_RX_FRAME_SIZE);
241
242 msg_type = *(u8 *)(skb->data + ETH_HLEN) & 0xf;
243 msg_len = *(u16 *)(skb->data + ETH_HLEN + 2);
244
245 skb_put(skb, ntohs(msg_len) + ETH_HLEN);
246
247 bytes += skb->len;
248 packets++;
249
250 skb->protocol = eth_type_trans(skb, ndev);
251 skb->ip_summed = CHECKSUM_UNNECESSARY;
252
253 pr_debug(" -->RECV: protocol: %x message: %s frame_len: %d\n",
254 skb->protocol, msg_type_string(msg_type & 0xf),
255 skb->len);
256
257 if (!(msg_type & PTP_MSG_TYPE_MASK)) {
258 axienet_set_timestamp(lp, skb_hwtstamps(skb),
259 (ptp_frame_base_addr +
260 PTP_HW_TSTAMP_OFFSET));
261 }
262
263 netif_rx(skb);
264 }
265 ndev->stats.rx_packets += packets;
266 ndev->stats.rx_bytes += bytes;
267}
268
269
270
271
272
273
274
275
276irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev)
277{
278 struct net_device *ndev = _ndev;
279 struct axienet_local *lp = netdev_priv(ndev);
280
281 pr_debug("%s: received\n ", __func__);
282 lp->ptp_rx_hw_pointer = (axienet_ior(lp, PTP_RX_CONTROL_OFFSET)
283 & PTP_RX_PACKET_FIELD_MASK) >> 8;
284
285 axienet_ptp_recv(ndev);
286
287 return IRQ_HANDLED;
288}
289
290
291
292
293
294
295
296void axienet_tx_tstamp(struct work_struct *work)
297{
298 struct axienet_local *lp = container_of(work, struct axienet_local,
299 tx_tstamp_work);
300 struct net_device *ndev = lp->ndev;
301 struct skb_shared_hwtstamps hwtstamps;
302 struct sk_buff *skb;
303 unsigned long ts_reg_offset;
304 unsigned long flags;
305 u8 tx_packet;
306 u8 index;
307 u32 bytes = 0;
308 u32 packets = 0;
309
310 memset(&hwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
311
312 spin_lock_irqsave(&lp->ptp_tx_lock, flags);
313
314 tx_packet = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
315 PTP_TX_PACKET_FIELD_MASK) >>
316 PTP_TX_PACKET_FIELD_SHIFT;
317
318 while ((skb = __skb_dequeue(&lp->ptp_txq)) != NULL) {
319 index = skb->cb[0];
320
321
322 if (index > tx_packet) {
323
324 skb_queue_tail(&lp->ptp_txq, skb);
325 break;
326 }
327
328 ts_reg_offset = PTP_TX_BUFFER_OFFSET(index) +
329 PTP_HW_TSTAMP_OFFSET;
330
331 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
332 axienet_set_timestamp(lp, &hwtstamps, ts_reg_offset);
333 skb_tstamp_tx(skb, &hwtstamps);
334 }
335
336 bytes += skb->len;
337 packets++;
338 dev_kfree_skb_any(skb);
339 }
340 ndev->stats.tx_packets += packets;
341 ndev->stats.tx_bytes += bytes;
342
343 spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
344}
345
346
347
348
349
350
351
352
353
354irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev)
355{
356 struct net_device *ndev = _ndev;
357 struct axienet_local *lp = netdev_priv(ndev);
358
359 pr_debug("%s: got tx interrupt\n", __func__);
360
361
362 axienet_ior(lp, PTP_TX_CONTROL_OFFSET);
363
364 schedule_work(&lp->tx_tstamp_work);
365
366 netif_wake_queue(ndev);
367
368 return IRQ_HANDLED;
369}
370