1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <net/udp_tunnel.h>
36#include <linux/ip.h>
37#include <net/ipv6.h>
38#include <net/tcp.h>
39#include <linux/if_ether.h>
40#include <linux/if_vlan.h>
41#include <net/ip6_checksum.h>
42#include "qede_ptp.h"
43
44#include <linux/qed/qed_if.h>
45#include "qede.h"
46
47
48
49
50int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
51{
52 struct sw_rx_data *sw_rx_data;
53 struct eth_rx_bd *rx_bd;
54 dma_addr_t mapping;
55 struct page *data;
56
57
58
59
60
61 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
62 rxq->filled_buffers--;
63 return 0;
64 }
65
66 data = alloc_pages(GFP_ATOMIC, 0);
67 if (unlikely(!data))
68 return -ENOMEM;
69
70
71
72
73 mapping = dma_map_page(rxq->dev, data, 0,
74 PAGE_SIZE, DMA_FROM_DEVICE);
75 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
76 __free_page(data);
77 return -ENOMEM;
78 }
79
80 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
81 sw_rx_data->page_offset = 0;
82 sw_rx_data->data = data;
83 sw_rx_data->mapping = mapping;
84
85
86 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
87 WARN_ON(!rx_bd);
88 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
89 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
90 rxq->rx_headroom);
91
92 rxq->sw_rx_prod++;
93 rxq->filled_buffers++;
94
95 return 0;
96}
97
98
99int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
100{
101 u16 idx = txq->sw_tx_cons;
102 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
103 struct eth_tx_1st_bd *first_bd;
104 struct eth_tx_bd *tx_data_bd;
105 int bds_consumed = 0;
106 int nbds;
107 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
108 int i, split_bd_len = 0;
109
110 if (unlikely(!skb)) {
111 DP_ERR(edev,
112 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
113 idx, txq->sw_tx_cons, txq->sw_tx_prod);
114 return -1;
115 }
116
117 *len = skb->len;
118
119 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
120
121 bds_consumed++;
122
123 nbds = first_bd->data.nbds;
124
125 if (data_split) {
126 struct eth_tx_bd *split = (struct eth_tx_bd *)
127 qed_chain_consume(&txq->tx_pbl);
128 split_bd_len = BD_UNMAP_LEN(split);
129 bds_consumed++;
130 }
131 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
132 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
133
134
135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
136 tx_data_bd = (struct eth_tx_bd *)
137 qed_chain_consume(&txq->tx_pbl);
138 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
139 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
140 }
141
142 while (bds_consumed++ < nbds)
143 qed_chain_consume(&txq->tx_pbl);
144
145
146 dev_kfree_skb_any(skb);
147 txq->sw_tx_ring[idx].skb = NULL;
148 txq->sw_tx_ring[idx].flags = 0;
149
150 return 0;
151}
152
153
154static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
155 struct eth_tx_1st_bd *first_bd,
156 int nbd, bool data_split)
157{
158 u16 idx = txq->sw_tx_prod;
159 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
160 struct eth_tx_bd *tx_data_bd;
161 int i, split_bd_len = 0;
162
163
164 qed_chain_set_prod(&txq->tx_pbl,
165 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
166
167 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
168
169 if (data_split) {
170 struct eth_tx_bd *split = (struct eth_tx_bd *)
171 qed_chain_produce(&txq->tx_pbl);
172 split_bd_len = BD_UNMAP_LEN(split);
173 nbd--;
174 }
175
176 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
177 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
178
179
180 for (i = 0; i < nbd; i++) {
181 tx_data_bd = (struct eth_tx_bd *)
182 qed_chain_produce(&txq->tx_pbl);
183 if (tx_data_bd->nbytes)
184 dma_unmap_page(txq->dev,
185 BD_UNMAP_ADDR(tx_data_bd),
186 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
187 }
188
189
190 qed_chain_set_prod(&txq->tx_pbl,
191 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
192
193
194 dev_kfree_skb_any(skb);
195 txq->sw_tx_ring[idx].skb = NULL;
196 txq->sw_tx_ring[idx].flags = 0;
197}
198
199static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
200{
201 u32 rc = XMIT_L4_CSUM;
202 __be16 l3_proto;
203
204 if (skb->ip_summed != CHECKSUM_PARTIAL)
205 return XMIT_PLAIN;
206
207 l3_proto = vlan_get_protocol(skb);
208 if (l3_proto == htons(ETH_P_IPV6) &&
209 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
210 *ipv6_ext = 1;
211
212 if (skb->encapsulation) {
213 rc |= XMIT_ENC;
214 if (skb_is_gso(skb)) {
215 unsigned short gso_type = skb_shinfo(skb)->gso_type;
216
217 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
218 (gso_type & SKB_GSO_GRE_CSUM))
219 rc |= XMIT_ENC_GSO_L4_CSUM;
220
221 rc |= XMIT_LSO;
222 return rc;
223 }
224 }
225
226 if (skb_is_gso(skb))
227 rc |= XMIT_LSO;
228
229 return rc;
230}
231
232static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
233 struct eth_tx_2nd_bd *second_bd,
234 struct eth_tx_3rd_bd *third_bd)
235{
236 u8 l4_proto;
237 u16 bd2_bits1 = 0, bd2_bits2 = 0;
238
239 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
240
241 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
242 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
243 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
244
245 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
246 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
247
248 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
249 l4_proto = ipv6_hdr(skb)->nexthdr;
250 else
251 l4_proto = ip_hdr(skb)->protocol;
252
253 if (l4_proto == IPPROTO_UDP)
254 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
255
256 if (third_bd)
257 third_bd->data.bitfields |=
258 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
259 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
261
262 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
263 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
264}
265
266static int map_frag_to_bd(struct qede_tx_queue *txq,
267 skb_frag_t *frag, struct eth_tx_bd *bd)
268{
269 dma_addr_t mapping;
270
271
272 mapping = skb_frag_dma_map(txq->dev, frag, 0,
273 skb_frag_size(frag), DMA_TO_DEVICE);
274 if (unlikely(dma_mapping_error(txq->dev, mapping)))
275 return -ENOMEM;
276
277
278 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
279
280 return 0;
281}
282
283static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
284{
285 if (is_encap_pkt)
286 return (skb_inner_transport_header(skb) +
287 inner_tcp_hdrlen(skb) - skb->data);
288 else
289 return (skb_transport_header(skb) +
290 tcp_hdrlen(skb) - skb->data);
291}
292
293
294#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
295static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
296{
297 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
298
299 if (xmit_type & XMIT_LSO) {
300 int hlen;
301
302 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
303
304
305 if (skb_headlen(skb) > hlen)
306 allowed_frags--;
307 }
308
309 return (skb_shinfo(skb)->nr_frags > allowed_frags);
310}
311#endif
312
313static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
314{
315
316
317
318 wmb();
319 barrier();
320 writel(txq->tx_db.raw, txq->doorbell_addr);
321
322
323
324
325
326
327
328 mmiowb();
329}
330
331int qede_txq_has_work(struct qede_tx_queue *txq)
332{
333 u16 hw_bd_cons;
334
335
336 barrier();
337 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
338 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
339 return 0;
340
341 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
342}
343
344static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
345{
346 struct netdev_queue *netdev_txq;
347 u16 hw_bd_cons;
348 unsigned int pkts_compl = 0, bytes_compl = 0;
349 int rc;
350
351 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
352
353 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
354 barrier();
355
356 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
357 int len = 0;
358
359 rc = qede_free_tx_pkt(edev, txq, &len);
360 if (rc) {
361 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
362 hw_bd_cons,
363 qed_chain_get_cons_idx(&txq->tx_pbl));
364 break;
365 }
366
367 bytes_compl += len;
368 pkts_compl++;
369 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
370 txq->xmit_pkts++;
371 }
372
373 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
374
375
376
377
378
379
380
381
382
383
384 smp_mb();
385
386 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
387
388
389
390
391
392
393
394
395
396
397 __netif_tx_lock(netdev_txq, smp_processor_id());
398
399 if ((netif_tx_queue_stopped(netdev_txq)) &&
400 (edev->state == QEDE_STATE_OPEN) &&
401 (qed_chain_get_elem_left(&txq->tx_pbl)
402 >= (MAX_SKB_FRAGS + 1))) {
403 netif_tx_wake_queue(netdev_txq);
404 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
405 "Wake queue was called\n");
406 }
407
408 __netif_tx_unlock(netdev_txq);
409 }
410
411 return 0;
412}
413
414bool qede_has_rx_work(struct qede_rx_queue *rxq)
415{
416 u16 hw_comp_cons, sw_comp_cons;
417
418
419 barrier();
420
421 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
422 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
423
424 return hw_comp_cons != sw_comp_cons;
425}
426
427static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
428{
429 qed_chain_consume(&rxq->rx_bd_ring);
430 rxq->sw_rx_cons++;
431}
432
433
434
435
436static inline void qede_reuse_page(struct qede_rx_queue *rxq,
437 struct sw_rx_data *curr_cons)
438{
439 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
440 struct sw_rx_data *curr_prod;
441 dma_addr_t new_mapping;
442
443 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
444 *curr_prod = *curr_cons;
445
446 new_mapping = curr_prod->mapping + curr_prod->page_offset;
447
448 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
449 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
450 rxq->rx_headroom);
451
452 rxq->sw_rx_prod++;
453 curr_cons->data = NULL;
454}
455
456
457
458
459void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
460{
461 struct sw_rx_data *curr_cons;
462
463 for (; count > 0; count--) {
464 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
465 qede_reuse_page(rxq, curr_cons);
466 qede_rx_bd_ring_consume(rxq);
467 }
468}
469
470static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
471 struct sw_rx_data *curr_cons)
472{
473
474 curr_cons->page_offset += rxq->rx_buf_seg_size;
475
476 if (curr_cons->page_offset == PAGE_SIZE) {
477 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
478
479
480
481 curr_cons->page_offset -= rxq->rx_buf_seg_size;
482
483 return -ENOMEM;
484 }
485
486 dma_unmap_page(rxq->dev, curr_cons->mapping,
487 PAGE_SIZE, DMA_FROM_DEVICE);
488 } else {
489
490
491
492
493 page_ref_inc(curr_cons->data);
494 qede_reuse_page(rxq, curr_cons);
495 }
496
497 return 0;
498}
499
500void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
501{
502 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
503 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
504 struct eth_rx_prod_data rx_prods = {0};
505
506
507 rx_prods.bd_prod = cpu_to_le16(bd_prod);
508 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
509
510
511
512
513
514 wmb();
515
516 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
517 (u32 *)&rx_prods);
518
519
520
521
522
523
524
525 mmiowb();
526}
527
528static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
529{
530 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
531 enum rss_hash_type htype;
532 u32 hash = 0;
533
534 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
535 if (htype) {
536 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
537 (htype == RSS_HASH_TYPE_IPV6)) ?
538 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
539 hash = le32_to_cpu(rss_hash);
540 }
541 skb_set_hash(skb, hash, hash_type);
542}
543
544static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
545{
546 skb_checksum_none_assert(skb);
547
548 if (csum_flag & QEDE_CSUM_UNNECESSARY)
549 skb->ip_summed = CHECKSUM_UNNECESSARY;
550
551 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
552 skb->csum_level = 1;
553 skb->encapsulation = 1;
554 }
555}
556
557static inline void qede_skb_receive(struct qede_dev *edev,
558 struct qede_fastpath *fp,
559 struct qede_rx_queue *rxq,
560 struct sk_buff *skb, u16 vlan_tag)
561{
562 if (vlan_tag)
563 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
564
565 napi_gro_receive(&fp->napi, skb);
566}
567
568static void qede_set_gro_params(struct qede_dev *edev,
569 struct sk_buff *skb,
570 struct eth_fast_path_rx_tpa_start_cqe *cqe)
571{
572 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
573
574 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
575 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
576 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
577 else
578 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
579
580 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
581 cqe->header_len;
582}
583
584static int qede_fill_frag_skb(struct qede_dev *edev,
585 struct qede_rx_queue *rxq,
586 u8 tpa_agg_index, u16 len_on_bd)
587{
588 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
589 NUM_RX_BDS_MAX];
590 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
591 struct sk_buff *skb = tpa_info->skb;
592
593 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
594 goto out;
595
596
597 skb_fill_page_desc(skb, tpa_info->frag_id++,
598 current_bd->data, current_bd->page_offset,
599 len_on_bd);
600
601 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
602
603
604
605 page_ref_inc(current_bd->data);
606 goto out;
607 }
608
609 qed_chain_consume(&rxq->rx_bd_ring);
610 rxq->sw_rx_cons++;
611
612 skb->data_len += len_on_bd;
613 skb->truesize += rxq->rx_buf_seg_size;
614 skb->len += len_on_bd;
615
616 return 0;
617
618out:
619 tpa_info->state = QEDE_AGG_STATE_ERROR;
620 qede_recycle_rx_bd_ring(rxq, 1);
621
622 return -ENOMEM;
623}
624
625static bool qede_tunn_exist(u16 flag)
626{
627 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
628 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
629}
630
631static u8 qede_check_tunn_csum(u16 flag)
632{
633 u16 csum_flag = 0;
634 u8 tcsum = 0;
635
636 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
637 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
638 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
639 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
640
641 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
642 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
643 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
644 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
645 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
646 }
647
648 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
649 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
650 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
651 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
652
653 if (csum_flag & flag)
654 return QEDE_CSUM_ERROR;
655
656 return QEDE_CSUM_UNNECESSARY | tcsum;
657}
658
659static void qede_tpa_start(struct qede_dev *edev,
660 struct qede_rx_queue *rxq,
661 struct eth_fast_path_rx_tpa_start_cqe *cqe)
662{
663 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
664 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
665 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
666 struct sw_rx_data *replace_buf = &tpa_info->buffer;
667 dma_addr_t mapping = tpa_info->buffer_mapping;
668 struct sw_rx_data *sw_rx_data_cons;
669 struct sw_rx_data *sw_rx_data_prod;
670
671 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
672 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
673
674
675
676
677
678 sw_rx_data_prod->mapping = replace_buf->mapping;
679
680 sw_rx_data_prod->data = replace_buf->data;
681 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
682 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
683 sw_rx_data_prod->page_offset = replace_buf->page_offset;
684
685 rxq->sw_rx_prod++;
686
687
688
689
690 tpa_info->buffer = *sw_rx_data_cons;
691 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
692 le32_to_cpu(rx_bd_cons->addr.lo));
693
694 tpa_info->buffer_mapping = mapping;
695 rxq->sw_rx_cons++;
696
697
698
699
700
701 tpa_info->skb = netdev_alloc_skb(edev->ndev,
702 le16_to_cpu(cqe->len_on_first_bd));
703 if (unlikely(!tpa_info->skb)) {
704 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
705 tpa_info->state = QEDE_AGG_STATE_ERROR;
706 goto cons_buf;
707 }
708
709
710 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
711 tpa_info->frag_id = 0;
712 tpa_info->state = QEDE_AGG_STATE_START;
713
714
715 tpa_info->start_cqe_placement_offset = cqe->placement_offset;
716 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
717 if ((le16_to_cpu(cqe->pars_flags.flags) >>
718 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
719 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
720 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
721 else
722 tpa_info->vlan_tag = 0;
723
724 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
725
726
727 qede_set_gro_params(edev, tpa_info->skb, cqe);
728
729cons_buf:
730 if (likely(cqe->ext_bd_len_list[0]))
731 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
732 le16_to_cpu(cqe->ext_bd_len_list[0]));
733
734 if (unlikely(cqe->ext_bd_len_list[1])) {
735 DP_ERR(edev,
736 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
737 tpa_info->state = QEDE_AGG_STATE_ERROR;
738 }
739}
740
741#ifdef CONFIG_INET
742static void qede_gro_ip_csum(struct sk_buff *skb)
743{
744 const struct iphdr *iph = ip_hdr(skb);
745 struct tcphdr *th;
746
747 skb_set_transport_header(skb, sizeof(struct iphdr));
748 th = tcp_hdr(skb);
749
750 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
751 iph->saddr, iph->daddr, 0);
752
753 tcp_gro_complete(skb);
754}
755
756static void qede_gro_ipv6_csum(struct sk_buff *skb)
757{
758 struct ipv6hdr *iph = ipv6_hdr(skb);
759 struct tcphdr *th;
760
761 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
762 th = tcp_hdr(skb);
763
764 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
765 &iph->saddr, &iph->daddr, 0);
766 tcp_gro_complete(skb);
767}
768#endif
769
770static void qede_gro_receive(struct qede_dev *edev,
771 struct qede_fastpath *fp,
772 struct sk_buff *skb,
773 u16 vlan_tag)
774{
775
776
777
778
779
780 if (unlikely(!skb->data_len)) {
781 skb_shinfo(skb)->gso_type = 0;
782 skb_shinfo(skb)->gso_size = 0;
783 goto send_skb;
784 }
785
786#ifdef CONFIG_INET
787 if (skb_shinfo(skb)->gso_size) {
788 skb_reset_network_header(skb);
789
790 switch (skb->protocol) {
791 case htons(ETH_P_IP):
792 qede_gro_ip_csum(skb);
793 break;
794 case htons(ETH_P_IPV6):
795 qede_gro_ipv6_csum(skb);
796 break;
797 default:
798 DP_ERR(edev,
799 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
800 ntohs(skb->protocol));
801 }
802 }
803#endif
804
805send_skb:
806 skb_record_rx_queue(skb, fp->rxq->rxq_id);
807 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
808}
809
810static inline void qede_tpa_cont(struct qede_dev *edev,
811 struct qede_rx_queue *rxq,
812 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
813{
814 int i;
815
816 for (i = 0; cqe->len_list[i]; i++)
817 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
818 le16_to_cpu(cqe->len_list[i]));
819
820 if (unlikely(i > 1))
821 DP_ERR(edev,
822 "Strange - TPA cont with more than a single len_list entry\n");
823}
824
825static int qede_tpa_end(struct qede_dev *edev,
826 struct qede_fastpath *fp,
827 struct eth_fast_path_rx_tpa_end_cqe *cqe)
828{
829 struct qede_rx_queue *rxq = fp->rxq;
830 struct qede_agg_info *tpa_info;
831 struct sk_buff *skb;
832 int i;
833
834 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
835 skb = tpa_info->skb;
836
837 for (i = 0; cqe->len_list[i]; i++)
838 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
839 le16_to_cpu(cqe->len_list[i]));
840 if (unlikely(i > 1))
841 DP_ERR(edev,
842 "Strange - TPA emd with more than a single len_list entry\n");
843
844 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
845 goto err;
846
847
848 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
849 DP_ERR(edev,
850 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
851 cqe->num_of_bds, tpa_info->frag_id);
852 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
853 DP_ERR(edev,
854 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
855 le16_to_cpu(cqe->total_packet_len), skb->len);
856
857 memcpy(skb->data,
858 page_address(tpa_info->buffer.data) +
859 tpa_info->start_cqe_placement_offset +
860 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
861
862
863 skb->protocol = eth_type_trans(skb, edev->ndev);
864 skb->ip_summed = CHECKSUM_UNNECESSARY;
865
866
867
868
869 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
870
871 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
872
873 tpa_info->state = QEDE_AGG_STATE_NONE;
874
875 return 1;
876err:
877 tpa_info->state = QEDE_AGG_STATE_NONE;
878 dev_kfree_skb_any(tpa_info->skb);
879 tpa_info->skb = NULL;
880 return 0;
881}
882
883static u8 qede_check_notunn_csum(u16 flag)
884{
885 u16 csum_flag = 0;
886 u8 csum = 0;
887
888 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
889 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
890 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
891 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
892 csum = QEDE_CSUM_UNNECESSARY;
893 }
894
895 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
896 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
897
898 if (csum_flag & flag)
899 return QEDE_CSUM_ERROR;
900
901 return csum;
902}
903
904static u8 qede_check_csum(u16 flag)
905{
906 if (!qede_tunn_exist(flag))
907 return qede_check_notunn_csum(flag);
908 else
909 return qede_check_tunn_csum(flag);
910}
911
912static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
913 u16 flag)
914{
915 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
916
917 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
918 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
919 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
920 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
921 return true;
922
923 return false;
924}
925
926static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
927 struct qede_rx_queue *rxq,
928 struct sw_rx_data *bd, u16 len,
929 u16 pad)
930{
931 unsigned int offset = bd->page_offset + pad;
932 struct skb_frag_struct *frag;
933 struct page *page = bd->data;
934 unsigned int pull_len;
935 struct sk_buff *skb;
936 unsigned char *va;
937
938
939 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
940 if (unlikely(!skb))
941 return NULL;
942
943
944
945
946 if (len + pad <= edev->rx_copybreak) {
947 memcpy(skb_put(skb, len),
948 page_address(page) + offset, len);
949 qede_reuse_page(rxq, bd);
950 goto out;
951 }
952
953 frag = &skb_shinfo(skb)->frags[0];
954
955 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
956 page, offset, len, rxq->rx_buf_seg_size);
957
958 va = skb_frag_address(frag);
959 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
960
961
962 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
963
964
965 skb_frag_size_sub(frag, pull_len);
966 frag->page_offset += pull_len;
967 skb->data_len -= pull_len;
968 skb->tail += pull_len;
969
970 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
971
972
973
974
975 page_ref_inc(page);
976 dev_kfree_skb_any(skb);
977 return NULL;
978 }
979
980out:
981
982 qede_rx_bd_ring_consume(rxq);
983 return skb;
984}
985
986static int qede_rx_build_jumbo(struct qede_dev *edev,
987 struct qede_rx_queue *rxq,
988 struct sk_buff *skb,
989 struct eth_fast_path_rx_reg_cqe *cqe,
990 u16 first_bd_len)
991{
992 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
993 struct sw_rx_data *bd;
994 u16 bd_cons_idx;
995 u8 num_frags;
996
997 pkt_len -= first_bd_len;
998
999
1000 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1001 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1002 pkt_len;
1003
1004 if (unlikely(!cur_size)) {
1005 DP_ERR(edev,
1006 "Still got %d BDs for mapping jumbo, but length became 0\n",
1007 num_frags);
1008 goto out;
1009 }
1010
1011
1012 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1013 goto out;
1014
1015
1016
1017
1018 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1019 bd = &rxq->sw_rx_ring[bd_cons_idx];
1020 qede_rx_bd_ring_consume(rxq);
1021
1022 dma_unmap_page(rxq->dev, bd->mapping,
1023 PAGE_SIZE, DMA_FROM_DEVICE);
1024
1025 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1026 bd->data, 0, cur_size);
1027
1028 skb->truesize += PAGE_SIZE;
1029 skb->data_len += cur_size;
1030 skb->len += cur_size;
1031 pkt_len -= cur_size;
1032 }
1033
1034 if (unlikely(pkt_len))
1035 DP_ERR(edev,
1036 "Mapped all BDs of jumbo, but still have %d bytes\n",
1037 pkt_len);
1038
1039out:
1040 return num_frags;
1041}
1042
1043static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1044 struct qede_fastpath *fp,
1045 struct qede_rx_queue *rxq,
1046 union eth_rx_cqe *cqe,
1047 enum eth_rx_cqe_type type)
1048{
1049 switch (type) {
1050 case ETH_RX_CQE_TYPE_TPA_START:
1051 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1052 return 0;
1053 case ETH_RX_CQE_TYPE_TPA_CONT:
1054 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1055 return 0;
1056 case ETH_RX_CQE_TYPE_TPA_END:
1057 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1058 default:
1059 return 0;
1060 }
1061}
1062
1063static int qede_rx_process_cqe(struct qede_dev *edev,
1064 struct qede_fastpath *fp,
1065 struct qede_rx_queue *rxq)
1066{
1067 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1068 u16 len, pad, bd_cons_idx, parse_flag;
1069 enum eth_rx_cqe_type cqe_type;
1070 union eth_rx_cqe *cqe;
1071 struct sw_rx_data *bd;
1072 struct sk_buff *skb;
1073 __le16 flags;
1074 u8 csum_flag;
1075
1076
1077 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1078 cqe_type = cqe->fast_path_regular.type;
1079
1080
1081 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1082 struct eth_slow_path_rx_cqe *sp_cqe;
1083
1084 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1085 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1086 return 0;
1087 }
1088
1089
1090 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1091 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1092
1093
1094
1095
1096 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1097 bd = &rxq->sw_rx_ring[bd_cons_idx];
1098
1099 fp_cqe = &cqe->fast_path_regular;
1100 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1101 pad = fp_cqe->placement_offset + rxq->rx_headroom;
1102
1103
1104
1105 flags = cqe->fast_path_regular.pars_flags.flags;
1106 parse_flag = le16_to_cpu(flags);
1107
1108 csum_flag = qede_check_csum(parse_flag);
1109 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1110 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
1111 rxq->rx_ip_frags++;
1112 } else {
1113 DP_NOTICE(edev,
1114 "CQE has error, flags = %x, dropping incoming packet\n",
1115 parse_flag);
1116 rxq->rx_hw_errors++;
1117 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1118 return 0;
1119 }
1120 }
1121
1122
1123
1124
1125 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
1126 if (!skb) {
1127 rxq->rx_alloc_errors++;
1128 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1129 return 0;
1130 }
1131
1132
1133
1134
1135 if (fp_cqe->bd_num > 1) {
1136 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1137 fp_cqe, len);
1138
1139 if (unlikely(unmapped_frags > 0)) {
1140 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1141 dev_kfree_skb_any(skb);
1142 return 0;
1143 }
1144 }
1145
1146
1147 skb->protocol = eth_type_trans(skb, edev->ndev);
1148 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1149 qede_set_skb_csum(skb, csum_flag);
1150 skb_record_rx_queue(skb, rxq->rxq_id);
1151 qede_ptp_record_rx_ts(edev, cqe, skb);
1152
1153
1154 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1155
1156 return 1;
1157}
1158
1159static int qede_rx_int(struct qede_fastpath *fp, int budget)
1160{
1161 struct qede_rx_queue *rxq = fp->rxq;
1162 struct qede_dev *edev = fp->edev;
1163 int work_done = 0, rcv_pkts = 0;
1164 u16 hw_comp_cons, sw_comp_cons;
1165
1166 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1167 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1168
1169
1170
1171
1172
1173
1174 rmb();
1175
1176
1177 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1178 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1179 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1180 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1181 work_done++;
1182 }
1183
1184 rxq->rcv_pkts += rcv_pkts;
1185
1186
1187 while (rxq->num_rx_buffers - rxq->filled_buffers)
1188 if (qede_alloc_rx_buffer(rxq, false))
1189 break;
1190
1191
1192 qede_update_rx_prod(edev, rxq);
1193
1194 return work_done;
1195}
1196
1197static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1198{
1199 qed_sb_update_sb_idx(fp->sb_info);
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 rmb();
1212
1213 if (likely(fp->type & QEDE_FASTPATH_RX))
1214 if (qede_has_rx_work(fp->rxq))
1215 return true;
1216
1217 if (likely(fp->type & QEDE_FASTPATH_TX))
1218 if (qede_txq_has_work(fp->txq))
1219 return true;
1220
1221 return false;
1222}
1223
1224
1225
1226
1227int qede_poll(struct napi_struct *napi, int budget)
1228{
1229 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1230 napi);
1231 struct qede_dev *edev = fp->edev;
1232 int rx_work_done = 0;
1233
1234 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1235 qede_tx_int(edev, fp->txq);
1236
1237 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1238 qede_has_rx_work(fp->rxq)) ?
1239 qede_rx_int(fp, budget) : 0;
1240 if (rx_work_done < budget) {
1241 if (!qede_poll_is_more_work(fp)) {
1242 napi_complete(napi);
1243
1244
1245 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1246 } else {
1247 rx_work_done = budget;
1248 }
1249 }
1250
1251 return rx_work_done;
1252}
1253
1254irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1255{
1256 struct qede_fastpath *fp = fp_cookie;
1257
1258 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 );
1259
1260 napi_schedule_irqoff(&fp->napi);
1261 return IRQ_HANDLED;
1262}
1263
1264
1265netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1266{
1267 struct qede_dev *edev = netdev_priv(ndev);
1268 struct netdev_queue *netdev_txq;
1269 struct qede_tx_queue *txq;
1270 struct eth_tx_1st_bd *first_bd;
1271 struct eth_tx_2nd_bd *second_bd = NULL;
1272 struct eth_tx_3rd_bd *third_bd = NULL;
1273 struct eth_tx_bd *tx_data_bd = NULL;
1274 u16 txq_index, val = 0;
1275 u8 nbd = 0;
1276 dma_addr_t mapping;
1277 int rc, frag_idx = 0, ipv6_ext = 0;
1278 u8 xmit_type;
1279 u16 idx;
1280 u16 hlen;
1281 bool data_split = false;
1282
1283
1284 txq_index = skb_get_queue_mapping(skb);
1285 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
1286 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
1287 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1288
1289 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1290
1291 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1292
1293#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1294 if (qede_pkt_req_lin(skb, xmit_type)) {
1295 if (skb_linearize(skb)) {
1296 DP_NOTICE(edev,
1297 "SKB linearization failed - silently dropping this SKB\n");
1298 dev_kfree_skb_any(skb);
1299 return NETDEV_TX_OK;
1300 }
1301 }
1302#endif
1303
1304
1305 idx = txq->sw_tx_prod;
1306 txq->sw_tx_ring[idx].skb = skb;
1307 first_bd = (struct eth_tx_1st_bd *)
1308 qed_chain_produce(&txq->tx_pbl);
1309 memset(first_bd, 0, sizeof(*first_bd));
1310 first_bd->data.bd_flags.bitfields =
1311 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1312
1313 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1314 qede_ptp_tx_ts(edev, skb);
1315
1316
1317 mapping = dma_map_single(txq->dev, skb->data,
1318 skb_headlen(skb), DMA_TO_DEVICE);
1319 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1320 DP_NOTICE(edev, "SKB mapping failed\n");
1321 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1322 qede_update_tx_producer(txq);
1323 return NETDEV_TX_OK;
1324 }
1325 nbd++;
1326 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1327
1328
1329
1330
1331 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1332 second_bd = (struct eth_tx_2nd_bd *)
1333 qed_chain_produce(&txq->tx_pbl);
1334 memset(second_bd, 0, sizeof(*second_bd));
1335
1336 nbd++;
1337 third_bd = (struct eth_tx_3rd_bd *)
1338 qed_chain_produce(&txq->tx_pbl);
1339 memset(third_bd, 0, sizeof(*third_bd));
1340
1341 nbd++;
1342
1343 tx_data_bd = (struct eth_tx_bd *)second_bd;
1344 }
1345
1346 if (skb_vlan_tag_present(skb)) {
1347 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1348 first_bd->data.bd_flags.bitfields |=
1349 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1350 }
1351
1352
1353 if (xmit_type & XMIT_L4_CSUM) {
1354
1355
1356
1357 first_bd->data.bd_flags.bitfields |=
1358 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1359
1360 if (xmit_type & XMIT_ENC) {
1361 first_bd->data.bd_flags.bitfields |=
1362 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1363
1364 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1365 }
1366
1367
1368
1369
1370
1371 if (unlikely(txq->is_legacy))
1372 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1373
1374
1375
1376
1377
1378 if (unlikely(ipv6_ext))
1379 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1380 }
1381
1382 if (xmit_type & XMIT_LSO) {
1383 first_bd->data.bd_flags.bitfields |=
1384 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1385 third_bd->data.lso_mss =
1386 cpu_to_le16(skb_shinfo(skb)->gso_size);
1387
1388 if (unlikely(xmit_type & XMIT_ENC)) {
1389 first_bd->data.bd_flags.bitfields |=
1390 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1391
1392 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1393 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1394
1395 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1396 }
1397 hlen = qede_get_skb_hlen(skb, true);
1398 } else {
1399 first_bd->data.bd_flags.bitfields |=
1400 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1401 hlen = qede_get_skb_hlen(skb, false);
1402 }
1403
1404
1405 third_bd->data.bitfields |=
1406 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1407
1408
1409
1410
1411 if (unlikely(skb_headlen(skb) > hlen)) {
1412 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1413 "TSO split header size is %d (%x:%x)\n",
1414 first_bd->nbytes, first_bd->addr.hi,
1415 first_bd->addr.lo);
1416
1417 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1418 le32_to_cpu(first_bd->addr.lo)) +
1419 hlen;
1420
1421 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1422 le16_to_cpu(first_bd->nbytes) -
1423 hlen);
1424
1425
1426
1427
1428 txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
1429
1430 first_bd->nbytes = cpu_to_le16(hlen);
1431
1432 tx_data_bd = (struct eth_tx_bd *)third_bd;
1433 data_split = true;
1434 }
1435 } else {
1436 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1437 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1438 }
1439
1440 first_bd->data.bitfields = cpu_to_le16(val);
1441
1442
1443
1444 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1445 rc = map_frag_to_bd(txq,
1446 &skb_shinfo(skb)->frags[frag_idx],
1447 tx_data_bd);
1448 if (rc) {
1449 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1450 qede_update_tx_producer(txq);
1451 return NETDEV_TX_OK;
1452 }
1453
1454 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1455 tx_data_bd = (struct eth_tx_bd *)third_bd;
1456 else
1457 tx_data_bd = NULL;
1458
1459 frag_idx++;
1460 }
1461
1462
1463 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1464 tx_data_bd = (struct eth_tx_bd *)
1465 qed_chain_produce(&txq->tx_pbl);
1466
1467 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1468
1469 rc = map_frag_to_bd(txq,
1470 &skb_shinfo(skb)->frags[frag_idx],
1471 tx_data_bd);
1472 if (rc) {
1473 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1474 qede_update_tx_producer(txq);
1475 return NETDEV_TX_OK;
1476 }
1477 }
1478
1479
1480 first_bd->data.nbds = nbd;
1481
1482 netdev_tx_sent_queue(netdev_txq, skb->len);
1483
1484 skb_tx_timestamp(skb);
1485
1486
1487
1488
1489 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1490
1491
1492 txq->tx_db.data.bd_prod =
1493 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1494
1495 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
1496 qede_update_tx_producer(txq);
1497
1498 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1499 < (MAX_SKB_FRAGS + 1))) {
1500 if (skb->xmit_more)
1501 qede_update_tx_producer(txq);
1502
1503 netif_tx_stop_queue(netdev_txq);
1504 txq->stopped_cnt++;
1505 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1506 "Stop queue was called\n");
1507
1508
1509
1510
1511 smp_mb();
1512
1513 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1514 (MAX_SKB_FRAGS + 1)) &&
1515 (edev->state == QEDE_STATE_OPEN)) {
1516 netif_tx_wake_queue(netdev_txq);
1517 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1518 "Wake queue was called\n");
1519 }
1520 }
1521
1522 return NETDEV_TX_OK;
1523}
1524
1525
1526#define QEDE_MAX_TUN_HDR_LEN 48
1527
1528netdev_features_t qede_features_check(struct sk_buff *skb,
1529 struct net_device *dev,
1530 netdev_features_t features)
1531{
1532 if (skb->encapsulation) {
1533 u8 l4_proto = 0;
1534
1535 switch (vlan_get_protocol(skb)) {
1536 case htons(ETH_P_IP):
1537 l4_proto = ip_hdr(skb)->protocol;
1538 break;
1539 case htons(ETH_P_IPV6):
1540 l4_proto = ipv6_hdr(skb)->nexthdr;
1541 break;
1542 default:
1543 return features;
1544 }
1545
1546
1547
1548
1549
1550 if (l4_proto == IPPROTO_UDP) {
1551 struct qede_dev *edev = netdev_priv(dev);
1552 u16 hdrlen, vxln_port, gnv_port;
1553
1554 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1555 vxln_port = edev->vxlan_dst_port;
1556 gnv_port = edev->geneve_dst_port;
1557
1558 if ((skb_inner_mac_header(skb) -
1559 skb_transport_header(skb)) > hdrlen ||
1560 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1561 ntohs(udp_hdr(skb)->dest) != gnv_port))
1562 return features & ~(NETIF_F_CSUM_MASK |
1563 NETIF_F_GSO_MASK);
1564 }
1565 }
1566
1567 return features;
1568}
1569