1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/init.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/ratelimit.h>
34#include <linux/string.h>
35#include <linux/interrupt.h>
36#include <net/dst.h>
37#ifdef CONFIG_XFRM
38#include <linux/xfrm.h>
39#include <net/xfrm.h>
40#endif
41
42#include <linux/atomic.h>
43#include <net/sch_generic.h>
44
45#include <asm/octeon/octeon.h>
46
47#include "ethernet-defines.h"
48#include "octeon-ethernet.h"
49#include "ethernet-tx.h"
50#include "ethernet-util.h"
51
52#include <asm/octeon/cvmx-wqe.h>
53#include <asm/octeon/cvmx-fau.h>
54#include <asm/octeon/cvmx-pip.h>
55#include <asm/octeon/cvmx-pko.h>
56#include <asm/octeon/cvmx-helper.h>
57
58#include <asm/octeon/cvmx-gmxx-defs.h>
59
60#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
61
62
63
64
65
66
67
68
69#ifndef GET_SKBUFF_QOS
70#define GET_SKBUFF_QOS(skb) 0
71#endif
72
73static void cvm_oct_tx_do_cleanup(unsigned long arg);
74static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
75
76
77#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
78
79static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
80{
81 int32_t undo;
82 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
83 if (undo > 0)
84 cvmx_fau_atomic_add32(fau, -undo);
85 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
86 return skb_to_free;
87}
88
89static void cvm_oct_kick_tx_poll_watchdog(void)
90{
91 union cvmx_ciu_timx ciu_timx;
92 ciu_timx.u64 = 0;
93 ciu_timx.s.one_shot = 1;
94 ciu_timx.s.len = cvm_oct_tx_poll_interval;
95 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
96}
97
98void cvm_oct_free_tx_skbs(struct net_device *dev)
99{
100 int32_t skb_to_free;
101 int qos, queues_per_port;
102 int total_freed = 0;
103 int total_remaining = 0;
104 unsigned long flags;
105 struct octeon_ethernet *priv = netdev_priv(dev);
106
107 queues_per_port = cvmx_pko_get_num_queues(priv->port);
108
109 for (qos = 0; qos < queues_per_port; qos++) {
110 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
111 continue;
112 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
113 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
114
115
116 total_freed += skb_to_free;
117 if (skb_to_free > 0) {
118 struct sk_buff *to_free_list = NULL;
119 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
120 while (skb_to_free > 0) {
121 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
122 t->next = to_free_list;
123 to_free_list = t;
124 skb_to_free--;
125 }
126 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
127
128 while (to_free_list) {
129 struct sk_buff *t = to_free_list;
130 to_free_list = to_free_list->next;
131 dev_kfree_skb_any(t);
132 }
133 }
134 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
135 }
136 if (total_freed >= 0 && netif_queue_stopped(dev))
137 netif_wake_queue(dev);
138 if (total_remaining)
139 cvm_oct_kick_tx_poll_watchdog();
140}
141
142
143
144
145
146
147
148
149int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
150{
151 cvmx_pko_command_word0_t pko_command;
152 union cvmx_buf_ptr hw_buffer;
153 uint64_t old_scratch;
154 uint64_t old_scratch2;
155 int qos;
156 int i;
157 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
158 struct octeon_ethernet *priv = netdev_priv(dev);
159 struct sk_buff *to_free_list;
160 int32_t skb_to_free;
161 int32_t buffers_to_free;
162 u32 total_to_clean;
163 unsigned long flags;
164#if REUSE_SKBUFFS_WITHOUT_FREE
165 unsigned char *fpa_head;
166#endif
167
168
169
170
171
172 prefetch(priv);
173
174
175
176
177
178
179 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
180 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
181 qos = GET_SKBUFF_QOS(skb);
182 if (qos <= 0)
183 qos = 0;
184 else if (qos >= cvmx_pko_get_num_queues(priv->port))
185 qos = 0;
186 } else
187 qos = 0;
188
189 if (USE_ASYNC_IOBDMA) {
190
191 CVMX_SYNCIOBDMA;
192 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
193 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
194
195
196
197
198
199 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
200 FAU_NUM_PACKET_BUFFERS_TO_FREE,
201 0);
202 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
203 priv->fau + qos * 4,
204 MAX_SKB_TO_FREE);
205 }
206
207
208
209
210
211 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
212 if (unlikely(__skb_linearize(skb))) {
213 queue_type = QUEUE_DROP;
214 if (USE_ASYNC_IOBDMA) {
215
216 CVMX_SYNCIOBDMA;
217 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
218 } else {
219
220 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
221 MAX_SKB_TO_FREE);
222 }
223 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
224 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
225 goto skip_xmit;
226 }
227 }
228
229
230
231
232
233
234
235
236
237
238 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
239 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
240 int interface = INTERFACE(priv->port);
241 int index = INDEX(priv->port);
242
243 if (interface < 2) {
244
245 gmx_prt_cfg.u64 =
246 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
247 if (gmx_prt_cfg.s.duplex == 0) {
248 int add_bytes = 64 - skb->len;
249 if ((skb_tail_pointer(skb) + add_bytes) <=
250 skb_end_pointer(skb))
251 memset(__skb_put(skb, add_bytes), 0,
252 add_bytes);
253 }
254 }
255 }
256
257
258 pko_command.u64 = 0;
259 pko_command.s.n2 = 1;
260 pko_command.s.segs = 1;
261 pko_command.s.total_bytes = skb->len;
262 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
263 pko_command.s.subone0 = 1;
264
265 pko_command.s.dontfree = 1;
266
267
268 hw_buffer.u64 = 0;
269 if (skb_shinfo(skb)->nr_frags == 0) {
270 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
271 hw_buffer.s.pool = 0;
272 hw_buffer.s.size = skb->len;
273 } else {
274 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
275 hw_buffer.s.pool = 0;
276 hw_buffer.s.size = skb_headlen(skb);
277 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
278 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
279 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
280 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
281 hw_buffer.s.size = fs->size;
282 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
283 }
284 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
285 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
286 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
287 pko_command.s.gather = 1;
288 goto dont_put_skbuff_in_hw;
289 }
290
291
292
293
294
295
296
297
298
299
300
301#if REUSE_SKBUFFS_WITHOUT_FREE
302 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
303 if (unlikely(skb->data < fpa_head)) {
304
305
306
307
308 goto dont_put_skbuff_in_hw;
309 }
310 if (unlikely
311 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
312
313
314
315 goto dont_put_skbuff_in_hw;
316 }
317 if (unlikely(skb_shared(skb))) {
318
319
320
321 goto dont_put_skbuff_in_hw;
322 }
323 if (unlikely(skb_cloned(skb))) {
324
325
326
327 goto dont_put_skbuff_in_hw;
328 }
329 if (unlikely(skb_header_cloned(skb))) {
330
331
332
333 goto dont_put_skbuff_in_hw;
334 }
335 if (unlikely(skb->destructor)) {
336
337
338
339 goto dont_put_skbuff_in_hw;
340 }
341 if (unlikely(skb_shinfo(skb)->nr_frags)) {
342
343
344
345 goto dont_put_skbuff_in_hw;
346 }
347 if (unlikely
348 (skb->truesize !=
349 sizeof(*skb) + skb_end_offset(skb))) {
350
351
352
353 goto dont_put_skbuff_in_hw;
354 }
355
356
357
358
359
360 pko_command.s.dontfree = 0;
361
362 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
363 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
364
365
366
367
368
369 dst_release(skb_dst(skb));
370 skb_dst_set(skb, NULL);
371#ifdef CONFIG_XFRM
372 secpath_put(skb->sp);
373 skb->sp = NULL;
374#endif
375 nf_reset(skb);
376
377#ifdef CONFIG_NET_SCHED
378 skb->tc_index = 0;
379 skb_reset_tc(skb);
380#endif
381#endif
382
383dont_put_skbuff_in_hw:
384
385
386 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
387 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
388 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
389 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
390 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
391
392 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
393 }
394
395 if (USE_ASYNC_IOBDMA) {
396
397 CVMX_SYNCIOBDMA;
398 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
399 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
400 } else {
401
402 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
403 MAX_SKB_TO_FREE);
404 buffers_to_free =
405 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
406 }
407
408 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
409
410
411
412
413
414 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
415 pko_command.s.dontfree = 1;
416
417 if (pko_command.s.dontfree) {
418 queue_type = QUEUE_CORE;
419 pko_command.s.reg0 = priv->fau+qos*4;
420 } else {
421 queue_type = QUEUE_HW;
422 }
423 if (USE_ASYNC_IOBDMA)
424 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
425
426 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
427
428
429 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
430 if (dev->tx_queue_len != 0) {
431
432 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
433 netif_stop_queue(dev);
434 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
435 } else {
436
437 queue_type = QUEUE_DROP;
438 goto skip_xmit;
439 }
440 }
441
442 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
443 CVMX_PKO_LOCK_NONE);
444
445
446 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
447 priv->queue + qos,
448 pko_command, hw_buffer,
449 CVMX_PKO_LOCK_NONE))) {
450 printk_ratelimited("%s: Failed to send the packet\n", dev->name);
451 queue_type = QUEUE_DROP;
452 }
453skip_xmit:
454 to_free_list = NULL;
455
456 switch (queue_type) {
457 case QUEUE_DROP:
458 skb->next = to_free_list;
459 to_free_list = skb;
460 priv->stats.tx_dropped++;
461 break;
462 case QUEUE_HW:
463 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
464 break;
465 case QUEUE_CORE:
466 __skb_queue_tail(&priv->tx_free_list[qos], skb);
467 break;
468 default:
469 BUG();
470 }
471
472 while (skb_to_free > 0) {
473 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
474 t->next = to_free_list;
475 to_free_list = t;
476 skb_to_free--;
477 }
478
479 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
480
481
482 while (to_free_list) {
483 struct sk_buff *t = to_free_list;
484 to_free_list = to_free_list->next;
485 dev_kfree_skb_any(t);
486 }
487
488 if (USE_ASYNC_IOBDMA) {
489 CVMX_SYNCIOBDMA;
490 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
491
492 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
493 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
494 } else {
495 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
496 }
497
498 if (total_to_clean & 0x3ff) {
499
500
501
502
503
504
505 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
506 }
507
508 cvm_oct_kick_tx_poll_watchdog();
509
510 return NETDEV_TX_OK;
511}
512
513
514
515
516
517
518
519
520int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
521{
522 struct octeon_ethernet *priv = netdev_priv(dev);
523 void *packet_buffer;
524 void *copy_location;
525
526
527 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
528 if (unlikely(work == NULL)) {
529 printk_ratelimited("%s: Failed to allocate a work "
530 "queue entry\n", dev->name);
531 priv->stats.tx_dropped++;
532 dev_kfree_skb(skb);
533 return 0;
534 }
535
536
537 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
538 if (unlikely(packet_buffer == NULL)) {
539 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
540 dev->name);
541 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
542 priv->stats.tx_dropped++;
543 dev_kfree_skb(skb);
544 return 0;
545 }
546
547
548
549
550
551
552
553
554
555 copy_location = packet_buffer + sizeof(uint64_t);
556 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
557
558
559
560
561
562
563
564 memcpy(copy_location, skb->data, skb->len);
565
566
567
568
569
570 work->hw_chksum = skb->csum;
571 work->len = skb->len;
572 work->ipprt = priv->port;
573 work->qos = priv->port & 0x7;
574 work->grp = pow_send_group;
575 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
576 work->tag = pow_send_group;
577
578 work->word2.u64 = 0;
579 work->word2.s.bufs = 1;
580 work->packet_ptr.u64 = 0;
581 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
582 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
583 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
584 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
585
586 if (skb->protocol == htons(ETH_P_IP)) {
587 work->word2.s.ip_offset = 14;
588#if 0
589 work->word2.s.vlan_valid = 0;
590 work->word2.s.vlan_cfi = 0;
591 work->word2.s.vlan_id = 0;
592 work->word2.s.dec_ipcomp = 0;
593#endif
594 work->word2.s.tcp_or_udp =
595 (ip_hdr(skb)->protocol == IPPROTO_TCP)
596 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
597#if 0
598
599 work->word2.s.dec_ipsec = 0;
600
601 work->word2.s.is_v6 = 0;
602
603 work->word2.s.software = 0;
604
605 work->word2.s.L4_error = 0;
606#endif
607 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
608 || (ip_hdr(skb)->frag_off ==
609 1 << 14));
610#if 0
611
612 work->word2.s.IP_exc = 0;
613#endif
614 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
615 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
616#if 0
617
618 work->word2.s.not_IP = 0;
619
620 work->word2.s.rcv_error = 0;
621
622 work->word2.s.err_code = 0;
623#endif
624
625
626
627
628
629
630 memcpy(work->packet_data, skb->data + 10,
631 sizeof(work->packet_data));
632 } else {
633#if 0
634 work->word2.snoip.vlan_valid = 0;
635 work->word2.snoip.vlan_cfi = 0;
636 work->word2.snoip.vlan_id = 0;
637 work->word2.snoip.software = 0;
638#endif
639 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
640 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
641 work->word2.snoip.is_bcast =
642 (skb->pkt_type == PACKET_BROADCAST);
643 work->word2.snoip.is_mcast =
644 (skb->pkt_type == PACKET_MULTICAST);
645 work->word2.snoip.not_IP = 1;
646#if 0
647
648 work->word2.snoip.rcv_error = 0;
649
650 work->word2.snoip.err_code = 0;
651#endif
652 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
653 }
654
655
656 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
657 work->grp);
658 priv->stats.tx_packets++;
659 priv->stats.tx_bytes += skb->len;
660 dev_kfree_skb(skb);
661 return 0;
662}
663
664
665
666
667
668
669void cvm_oct_tx_shutdown_dev(struct net_device *dev)
670{
671 struct octeon_ethernet *priv = netdev_priv(dev);
672 unsigned long flags;
673 int qos;
674
675 for (qos = 0; qos < 16; qos++) {
676 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
677 while (skb_queue_len(&priv->tx_free_list[qos]))
678 dev_kfree_skb_any(__skb_dequeue
679 (&priv->tx_free_list[qos]));
680 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
681 }
682}
683
684static void cvm_oct_tx_do_cleanup(unsigned long arg)
685{
686 int port;
687
688 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
689 if (cvm_oct_device[port]) {
690 struct net_device *dev = cvm_oct_device[port];
691 cvm_oct_free_tx_skbs(dev);
692 }
693 }
694}
695
696static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
697{
698
699 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
700
701 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
702 return IRQ_HANDLED;
703}
704
705void cvm_oct_tx_initialize(void)
706{
707 int i;
708
709
710 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
711
712 i = request_irq(OCTEON_IRQ_TIMER1,
713 cvm_oct_tx_cleanup_watchdog, 0,
714 "Ethernet", cvm_oct_device);
715
716 if (i)
717 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
718}
719
720void cvm_oct_tx_shutdown(void)
721{
722
723 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
724}
725