1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xilinx_axienet.h"
19#include "xilinx_tsn_ptp.h"
20#include "xilinx_tsn_timer.h"
21#include <linux/ptp_classify.h>
22
23#define PTP_ONE_SECOND 1000000000
24
25#define msg_type_string(type) \
26 ((type) == PTP_TYPE_SYNC) ? "SYNC" : \
27 ((type) == PTP_TYPE_FOLLOW_UP) ? "FOLLOW_UP" : \
28 ((type) == PTP_TYPE_PDELAYREQ) ? "PDELAY_REQ" : \
29 ((type) == PTP_TYPE_PDELAYRESP) ? "PDELAY_RESP" : \
30 ((type) == PTP_TYPE_PDELAYRESP_FOLLOW_UP) ? "PDELAY_RESP_FOLLOW_UP" : \
31 ((type) == PTP_TYPE_ANNOUNCE) ? "ANNOUNCE" : \
32 "UNKNOWN"
33
34
35
36
37
38
39
40
41
42
43static void memcpy_fromio_32(struct axienet_local *lp,
44 unsigned long offset, u8 *data, size_t len)
45{
46 while (len >= 4) {
47 *(u32 *)data = axienet_ior(lp, offset);
48 len -= 4;
49 offset += 4;
50 data += 4;
51 }
52
53 if (len > 0) {
54 u32 leftover = axienet_ior(lp, offset);
55 u8 *src = (u8 *)&leftover;
56
57 while (len) {
58 *data++ = *src++;
59 len--;
60 }
61 }
62}
63
64
65
66
67
68
69
70
71
72
73static void memcpy_toio_32(struct axienet_local *lp,
74 unsigned long offset, u8 *data, size_t len)
75{
76 while (len >= 4) {
77 axienet_iow(lp, offset, *(u32 *)data);
78 len -= 4;
79 offset += 4;
80 data += 4;
81 }
82
83 if (len > 0) {
84 u32 leftover = 0;
85 u8 *dest = (u8 *)&leftover;
86
87 while (len) {
88 *dest++ = *data++;
89 len--;
90 }
91 axienet_iow(lp, offset, leftover);
92 }
93}
94
95
96
97
98
99
100
101
102
103
104
105
106int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev)
107{
108 u8 msg_type;
109 struct axienet_local *lp = netdev_priv(ndev);
110 unsigned long flags;
111 u8 tx_frame_waiting;
112 u8 free_index;
113
114 msg_type = *(u8 *)(skb->data + ETH_HLEN);
115
116 pr_debug(" -->XMIT: protocol: %x message: %s frame_len: %d\n",
117 skb->protocol,
118 msg_type_string(msg_type & 0xf), skb->len);
119
120 tx_frame_waiting = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
121 PTP_TX_FRAME_WAITING_MASK) >>
122 PTP_TX_FRAME_WAITING_SHIFT;
123
124
125 if (tx_frame_waiting & (1 << 7)) {
126 if (!netif_queue_stopped(ndev))
127 netif_stop_queue(ndev);
128 pr_debug("tx_frame_waiting: %d\n", tx_frame_waiting);
129 return NETDEV_TX_BUSY;
130 }
131
132
133 free_index = fls(tx_frame_waiting);
134
135
136 axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index), skb->len);
137 memcpy_toio_32(lp, (PTP_TX_BUFFER_OFFSET(free_index) + 8),
138 skb->data, skb->len);
139
140
141 axienet_iow(lp, PTP_TX_CONTROL_OFFSET, (1 << free_index));
142
143 spin_lock_irqsave(&lp->ptp_tx_lock, flags);
144 skb->cb[0] = free_index;
145 skb_queue_tail(&lp->ptp_txq, skb);
146
147 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
148 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
149
150 skb_tx_timestamp(skb);
151 spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
152
153 return NETDEV_TX_OK;
154}
155
156
157
158
159
160
161
162
163
164
165static void axienet_set_timestamp(struct axienet_local *lp,
166 struct skb_shared_hwtstamps *hwtstamps,
167 unsigned int offset)
168{
169 u32 captured_ns;
170 u32 captured_sec;
171
172 captured_ns = axienet_ior(lp, offset + 4);
173 captured_sec = axienet_ior(lp, offset);
174
175
176 hwtstamps->hwtstamp = ktime_set(captured_sec,
177 captured_ns);
178}
179
180
181
182
183
184
185
186
187
188
189static void axienet_ptp_recv(struct net_device *ndev)
190{
191 struct axienet_local *lp = netdev_priv(ndev);
192 unsigned long ptp_frame_base_addr = 0;
193 struct sk_buff *skb;
194 u16 msg_len;
195 u8 msg_type;
196 u32 bytes = 0;
197 u32 packets = 0;
198
199 pr_debug("%s:\n ", __func__);
200
201 while (((lp->ptp_rx_hw_pointer & 0xf) !=
202 (lp->ptp_rx_sw_pointer & 0xf))) {
203 skb = netdev_alloc_skb(ndev, PTP_RX_FRAME_SIZE);
204
205 lp->ptp_rx_sw_pointer += 1;
206
207 ptp_frame_base_addr = PTP_RX_BASE_OFFSET +
208 ((lp->ptp_rx_sw_pointer & 0xf) *
209 PTP_RX_HWBUF_SIZE);
210
211 memset(skb->data, 0x0, PTP_RX_FRAME_SIZE);
212
213 memcpy_fromio_32(lp, ptp_frame_base_addr, skb->data,
214 PTP_RX_FRAME_SIZE);
215
216 msg_type = *(u8 *)(skb->data + ETH_HLEN) & 0xf;
217 msg_len = *(u16 *)(skb->data + ETH_HLEN + 2);
218
219 skb_put(skb, ntohs(msg_len) + ETH_HLEN);
220
221 bytes += skb->len;
222 packets++;
223
224 skb->protocol = eth_type_trans(skb, ndev);
225 skb->ip_summed = CHECKSUM_UNNECESSARY;
226
227 pr_debug(" -->RECV: protocol: %x message: %s frame_len: %d\n",
228 skb->protocol, msg_type_string(msg_type & 0xf),
229 skb->len);
230
231 if (!(msg_type & PTP_MSG_TYPE_MASK)) {
232 axienet_set_timestamp(lp, skb_hwtstamps(skb),
233 (ptp_frame_base_addr +
234 PTP_HW_TSTAMP_OFFSET));
235 }
236
237 netif_rx(skb);
238 }
239 ndev->stats.rx_packets += packets;
240 ndev->stats.rx_bytes += bytes;
241}
242
243
244
245
246
247
248
249
250irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev)
251{
252 struct net_device *ndev = _ndev;
253 struct axienet_local *lp = netdev_priv(ndev);
254
255 pr_debug("%s: received\n ", __func__);
256 lp->ptp_rx_hw_pointer = (axienet_ior(lp, PTP_RX_CONTROL_OFFSET)
257 & PTP_RX_PACKET_FIELD_MASK) >> 8;
258
259 axienet_ptp_recv(ndev);
260
261 return IRQ_HANDLED;
262}
263
264
265
266
267
268
269
270void axienet_tx_tstamp(struct work_struct *work)
271{
272 struct axienet_local *lp = container_of(work, struct axienet_local,
273 tx_tstamp_work);
274 struct net_device *ndev = lp->ndev;
275 struct skb_shared_hwtstamps hwtstamps;
276 struct sk_buff *skb;
277 unsigned long ts_reg_offset;
278 unsigned long flags;
279 u8 tx_packet;
280 u8 index;
281 u32 bytes = 0;
282 u32 packets = 0;
283
284 memset(&hwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
285
286 spin_lock_irqsave(&lp->ptp_tx_lock, flags);
287
288 tx_packet = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
289 PTP_TX_PACKET_FIELD_MASK) >>
290 PTP_TX_PACKET_FIELD_SHIFT;
291
292 while ((skb = __skb_dequeue(&lp->ptp_txq)) != NULL) {
293 index = skb->cb[0];
294
295
296 if (index > tx_packet) {
297
298 skb_queue_tail(&lp->ptp_txq, skb);
299 break;
300 }
301
302 ts_reg_offset = PTP_TX_BUFFER_OFFSET(index) +
303 PTP_HW_TSTAMP_OFFSET;
304
305 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
306 axienet_set_timestamp(lp, &hwtstamps, ts_reg_offset);
307 skb_tstamp_tx(skb, &hwtstamps);
308 }
309
310 bytes += skb->len;
311 packets++;
312 dev_kfree_skb_any(skb);
313 }
314 ndev->stats.tx_packets += packets;
315 ndev->stats.tx_bytes += bytes;
316
317 spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
318}
319
320
321
322
323
324
325
326
327
328irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev)
329{
330 struct net_device *ndev = _ndev;
331 struct axienet_local *lp = netdev_priv(ndev);
332
333 pr_debug("%s: got tx interrupt\n", __func__);
334
335
336 axienet_ior(lp, PTP_TX_CONTROL_OFFSET);
337
338 schedule_work(&lp->tx_tstamp_work);
339
340 netif_wake_queue(ndev);
341
342 return IRQ_HANDLED;
343}
344