1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/ip.h>
13#include <linux/ratelimit.h>
14#include <linux/string.h>
15#include <linux/interrupt.h>
16#include <net/dst.h>
17#ifdef CONFIG_XFRM
18#include <linux/xfrm.h>
19#include <net/xfrm.h>
20#endif
21
22#include <linux/atomic.h>
23#include <net/sch_generic.h>
24
25#include <asm/octeon/octeon.h>
26
27#include "ethernet-defines.h"
28#include "octeon-ethernet.h"
29#include "ethernet-tx.h"
30#include "ethernet-util.h"
31
32#include <asm/octeon/cvmx-wqe.h>
33#include <asm/octeon/cvmx-fau.h>
34#include <asm/octeon/cvmx-pip.h>
35#include <asm/octeon/cvmx-pko.h>
36#include <asm/octeon/cvmx-helper.h>
37
38#include <asm/octeon/cvmx-gmxx-defs.h>
39
40#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
41
42
43
44
45
46
47
48
49#ifndef GET_SKBUFF_QOS
50#define GET_SKBUFF_QOS(skb) 0
51#endif
52
53static void cvm_oct_tx_do_cleanup(unsigned long arg);
54static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
55
56
57#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
58
59static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
60{
61 int undo;
62
63 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
64 MAX_SKB_TO_FREE;
65 if (undo > 0)
66 cvmx_fau_atomic_add32(fau, -undo);
67 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
68 -skb_to_free;
69 return skb_to_free;
70}
71
72static void cvm_oct_kick_tx_poll_watchdog(void)
73{
74 union cvmx_ciu_timx ciu_timx;
75
76 ciu_timx.u64 = 0;
77 ciu_timx.s.one_shot = 1;
78 ciu_timx.s.len = cvm_oct_tx_poll_interval;
79 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
80}
81
82static void cvm_oct_free_tx_skbs(struct net_device *dev)
83{
84 int skb_to_free;
85 int qos, queues_per_port;
86 int total_freed = 0;
87 int total_remaining = 0;
88 unsigned long flags;
89 struct octeon_ethernet *priv = netdev_priv(dev);
90
91 queues_per_port = cvmx_pko_get_num_queues(priv->port);
92
93 for (qos = 0; qos < queues_per_port; qos++) {
94 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
95 continue;
96 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
97 MAX_SKB_TO_FREE);
98 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
99 priv->fau + qos * 4);
100 total_freed += skb_to_free;
101 if (skb_to_free > 0) {
102 struct sk_buff *to_free_list = NULL;
103
104 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
105 while (skb_to_free > 0) {
106 struct sk_buff *t;
107
108 t = __skb_dequeue(&priv->tx_free_list[qos]);
109 t->next = to_free_list;
110 to_free_list = t;
111 skb_to_free--;
112 }
113 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
114 flags);
115
116 while (to_free_list) {
117 struct sk_buff *t = to_free_list;
118
119 to_free_list = to_free_list->next;
120 dev_kfree_skb_any(t);
121 }
122 }
123 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
124 }
125 if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
126 netif_wake_queue(dev);
127 if (total_remaining)
128 cvm_oct_kick_tx_poll_watchdog();
129}
130
131
132
133
134
135
136
137
138int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
139{
140 cvmx_pko_command_word0_t pko_command;
141 union cvmx_buf_ptr hw_buffer;
142 u64 old_scratch;
143 u64 old_scratch2;
144 int qos;
145 int i;
146 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
147 struct octeon_ethernet *priv = netdev_priv(dev);
148 struct sk_buff *to_free_list;
149 int skb_to_free;
150 int buffers_to_free;
151 u32 total_to_clean;
152 unsigned long flags;
153#if REUSE_SKBUFFS_WITHOUT_FREE
154 unsigned char *fpa_head;
155#endif
156
157
158
159
160
161 prefetch(priv);
162
163
164
165
166
167
168 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
169 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
170 qos = GET_SKBUFF_QOS(skb);
171 if (qos <= 0)
172 qos = 0;
173 else if (qos >= cvmx_pko_get_num_queues(priv->port))
174 qos = 0;
175 } else {
176 qos = 0;
177 }
178
179 if (USE_ASYNC_IOBDMA) {
180
181 CVMX_SYNCIOBDMA;
182 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
183 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
184
185
186
187
188
189 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
190 FAU_NUM_PACKET_BUFFERS_TO_FREE,
191 0);
192 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
193 priv->fau + qos * 4,
194 MAX_SKB_TO_FREE);
195 }
196
197
198
199
200
201 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
202 if (unlikely(__skb_linearize(skb))) {
203 queue_type = QUEUE_DROP;
204 if (USE_ASYNC_IOBDMA) {
205
206
207
208
209 CVMX_SYNCIOBDMA;
210 skb_to_free =
211 cvmx_scratch_read64(CVMX_SCR_SCRATCH);
212 } else {
213
214
215
216
217 skb_to_free =
218 cvmx_fau_fetch_and_add32(priv->fau +
219 qos * 4,
220 MAX_SKB_TO_FREE);
221 }
222 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
223 priv->fau +
224 qos * 4);
225 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
226 goto skip_xmit;
227 }
228 }
229
230
231
232
233
234
235
236
237
238
239 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
240 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
241 int interface = INTERFACE(priv->port);
242 int index = INDEX(priv->port);
243
244 if (interface < 2) {
245
246 gmx_prt_cfg.u64 =
247 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
248 if (gmx_prt_cfg.s.duplex == 0) {
249 int add_bytes = 64 - skb->len;
250
251 if ((skb_tail_pointer(skb) + add_bytes) <=
252 skb_end_pointer(skb))
253 __skb_put_zero(skb, add_bytes);
254 }
255 }
256 }
257
258
259 pko_command.u64 = 0;
260#ifdef __LITTLE_ENDIAN
261 pko_command.s.le = 1;
262#endif
263 pko_command.s.n2 = 1;
264 pko_command.s.segs = 1;
265 pko_command.s.total_bytes = skb->len;
266 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
267 pko_command.s.subone0 = 1;
268
269 pko_command.s.dontfree = 1;
270
271
272 hw_buffer.u64 = 0;
273 if (skb_shinfo(skb)->nr_frags == 0) {
274 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
275 hw_buffer.s.pool = 0;
276 hw_buffer.s.size = skb->len;
277 } else {
278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
279 hw_buffer.s.pool = 0;
280 hw_buffer.s.size = skb_headlen(skb);
281 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
282 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
283 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
284
285 hw_buffer.s.addr =
286 XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
287 fs->page_offset));
288 hw_buffer.s.size = fs->size;
289 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
290 }
291 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
292 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
293 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
294 pko_command.s.gather = 1;
295 goto dont_put_skbuff_in_hw;
296 }
297
298
299
300
301
302
303
304
305
306
307
308#if REUSE_SKBUFFS_WITHOUT_FREE
309 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
310 if (unlikely(skb->data < fpa_head)) {
311
312 goto dont_put_skbuff_in_hw;
313 }
314 if (unlikely
315 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
316
317 goto dont_put_skbuff_in_hw;
318 }
319 if (unlikely(skb_shared(skb))) {
320
321 goto dont_put_skbuff_in_hw;
322 }
323 if (unlikely(skb_cloned(skb))) {
324
325 goto dont_put_skbuff_in_hw;
326 }
327 if (unlikely(skb_header_cloned(skb))) {
328
329 goto dont_put_skbuff_in_hw;
330 }
331 if (unlikely(skb->destructor)) {
332
333 goto dont_put_skbuff_in_hw;
334 }
335 if (unlikely(skb_shinfo(skb)->nr_frags)) {
336
337 goto dont_put_skbuff_in_hw;
338 }
339 if (unlikely
340 (skb->truesize !=
341 sizeof(*skb) + skb_end_offset(skb))) {
342
343 goto dont_put_skbuff_in_hw;
344 }
345
346
347
348
349
350 pko_command.s.dontfree = 0;
351
352 hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
353 ((unsigned long)fpa_head >> 7);
354
355 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
356
357
358
359
360
361 dst_release(skb_dst(skb));
362 skb_dst_set(skb, NULL);
363#ifdef CONFIG_XFRM
364 secpath_reset(skb);
365#endif
366 nf_reset(skb);
367
368#ifdef CONFIG_NET_SCHED
369 skb->tc_index = 0;
370 skb_reset_tc(skb);
371#endif
372#endif
373
374dont_put_skbuff_in_hw:
375
376
377 if ((skb->protocol == htons(ETH_P_IP)) &&
378 (ip_hdr(skb)->version == 4) &&
379 (ip_hdr(skb)->ihl == 5) &&
380 ((ip_hdr(skb)->frag_off == 0) ||
381 (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
382 ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
383 (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
384
385 pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
386 }
387
388 if (USE_ASYNC_IOBDMA) {
389
390 CVMX_SYNCIOBDMA;
391 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
392 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
393 } else {
394
395 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
396 MAX_SKB_TO_FREE);
397 buffers_to_free =
398 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
399 }
400
401 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
402 priv->fau + qos * 4);
403
404
405
406
407
408 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
409 pko_command.s.dontfree = 1;
410
411 if (pko_command.s.dontfree) {
412 queue_type = QUEUE_CORE;
413 pko_command.s.reg0 = priv->fau + qos * 4;
414 } else {
415 queue_type = QUEUE_HW;
416 }
417 if (USE_ASYNC_IOBDMA)
418 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
419 FAU_TOTAL_TX_TO_CLEAN, 1);
420
421 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
422
423
424 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
425 MAX_OUT_QUEUE_DEPTH)) {
426 if (dev->tx_queue_len != 0) {
427
428 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
429 flags);
430 netif_stop_queue(dev);
431 spin_lock_irqsave(&priv->tx_free_list[qos].lock,
432 flags);
433 } else {
434
435 queue_type = QUEUE_DROP;
436 goto skip_xmit;
437 }
438 }
439
440 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
441 CVMX_PKO_LOCK_NONE);
442
443
444 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
445 priv->queue + qos,
446 pko_command, hw_buffer,
447 CVMX_PKO_LOCK_NONE))) {
448 printk_ratelimited("%s: Failed to send the packet\n",
449 dev->name);
450 queue_type = QUEUE_DROP;
451 }
452skip_xmit:
453 to_free_list = NULL;
454
455 switch (queue_type) {
456 case QUEUE_DROP:
457 skb->next = to_free_list;
458 to_free_list = skb;
459 dev->stats.tx_dropped++;
460 break;
461 case QUEUE_HW:
462 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
463 break;
464 case QUEUE_CORE:
465 __skb_queue_tail(&priv->tx_free_list[qos], skb);
466 break;
467 default:
468 BUG();
469 }
470
471 while (skb_to_free > 0) {
472 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
473
474 t->next = to_free_list;
475 to_free_list = t;
476 skb_to_free--;
477 }
478
479 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
480
481
482 while (to_free_list) {
483 struct sk_buff *t = to_free_list;
484
485 to_free_list = to_free_list->next;
486 dev_kfree_skb_any(t);
487 }
488
489 if (USE_ASYNC_IOBDMA) {
490 CVMX_SYNCIOBDMA;
491 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
492
493 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
494 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
495 } else {
496 total_to_clean =
497 cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
498 }
499
500 if (total_to_clean & 0x3ff) {
501
502
503
504
505
506
507 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
508 }
509
510 cvm_oct_kick_tx_poll_watchdog();
511
512 return NETDEV_TX_OK;
513}
514
515
516
517
518
519
520
521
522int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
523{
524 struct octeon_ethernet *priv = netdev_priv(dev);
525 void *packet_buffer;
526 void *copy_location;
527
528
529 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
530
531 if (unlikely(!work)) {
532 printk_ratelimited("%s: Failed to allocate a work queue entry\n",
533 dev->name);
534 dev->stats.tx_dropped++;
535 dev_kfree_skb_any(skb);
536 return 0;
537 }
538
539
540 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
541 if (unlikely(!packet_buffer)) {
542 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
543 dev->name);
544 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
545 dev->stats.tx_dropped++;
546 dev_kfree_skb_any(skb);
547 return 0;
548 }
549
550
551
552
553
554
555
556
557
558 copy_location = packet_buffer + sizeof(u64);
559 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
560
561
562
563
564
565
566
567 memcpy(copy_location, skb->data, skb->len);
568
569
570
571
572
573 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
574 work->word0.pip.cn38xx.hw_chksum = skb->csum;
575 work->word1.len = skb->len;
576 cvmx_wqe_set_port(work, priv->port);
577 cvmx_wqe_set_qos(work, priv->port & 0x7);
578 cvmx_wqe_set_grp(work, pow_send_group);
579 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
580 work->word1.tag = pow_send_group;
581
582 work->word2.u64 = 0;
583 work->word2.s.bufs = 1;
584 work->packet_ptr.u64 = 0;
585 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
586 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
587 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
588 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
589
590 if (skb->protocol == htons(ETH_P_IP)) {
591 work->word2.s.ip_offset = 14;
592#if 0
593 work->word2.s.vlan_valid = 0;
594 work->word2.s.vlan_cfi = 0;
595 work->word2.s.vlan_id = 0;
596 work->word2.s.dec_ipcomp = 0;
597#endif
598 work->word2.s.tcp_or_udp =
599 (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
600 (ip_hdr(skb)->protocol == IPPROTO_UDP);
601#if 0
602
603 work->word2.s.dec_ipsec = 0;
604
605 work->word2.s.is_v6 = 0;
606
607 work->word2.s.software = 0;
608
609 work->word2.s.L4_error = 0;
610#endif
611 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
612 (ip_hdr(skb)->frag_off ==
613 1 << 14));
614#if 0
615
616 work->word2.s.IP_exc = 0;
617#endif
618 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
619 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
620#if 0
621
622 work->word2.s.not_IP = 0;
623
624 work->word2.s.rcv_error = 0;
625
626 work->word2.s.err_code = 0;
627#endif
628
629
630
631
632
633
634 memcpy(work->packet_data, skb->data + 10,
635 sizeof(work->packet_data));
636 } else {
637#if 0
638 work->word2.snoip.vlan_valid = 0;
639 work->word2.snoip.vlan_cfi = 0;
640 work->word2.snoip.vlan_id = 0;
641 work->word2.snoip.software = 0;
642#endif
643 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
644 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
645 work->word2.snoip.is_bcast =
646 (skb->pkt_type == PACKET_BROADCAST);
647 work->word2.snoip.is_mcast =
648 (skb->pkt_type == PACKET_MULTICAST);
649 work->word2.snoip.not_IP = 1;
650#if 0
651
652 work->word2.snoip.rcv_error = 0;
653
654 work->word2.snoip.err_code = 0;
655#endif
656 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
657 }
658
659
660 cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
661 cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
662 dev->stats.tx_packets++;
663 dev->stats.tx_bytes += skb->len;
664 dev_consume_skb_any(skb);
665 return 0;
666}
667
668
669
670
671
672
673void cvm_oct_tx_shutdown_dev(struct net_device *dev)
674{
675 struct octeon_ethernet *priv = netdev_priv(dev);
676 unsigned long flags;
677 int qos;
678
679 for (qos = 0; qos < 16; qos++) {
680 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
681 while (skb_queue_len(&priv->tx_free_list[qos]))
682 dev_kfree_skb_any(__skb_dequeue
683 (&priv->tx_free_list[qos]));
684 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
685 }
686}
687
688static void cvm_oct_tx_do_cleanup(unsigned long arg)
689{
690 int port;
691
692 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
693 if (cvm_oct_device[port]) {
694 struct net_device *dev = cvm_oct_device[port];
695
696 cvm_oct_free_tx_skbs(dev);
697 }
698 }
699}
700
701static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
702{
703
704 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
705
706 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
707 return IRQ_HANDLED;
708}
709
710void cvm_oct_tx_initialize(void)
711{
712 int i;
713
714
715 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
716
717 i = request_irq(OCTEON_IRQ_TIMER1,
718 cvm_oct_tx_cleanup_watchdog, 0,
719 "Ethernet", cvm_oct_device);
720
721 if (i)
722 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
723}
724
725void cvm_oct_tx_shutdown(void)
726{
727
728 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
729}
730