1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/netdevice.h>
9#include <linux/u64_stats_sync.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/pci.h>
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/skbuff.h>
18#include <linux/smp.h>
19#include <asm/byteorder.h>
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/sctp.h>
23#include <linux/ipv6.h>
24#include <net/ipv6.h>
25#include <net/checksum.h>
26#include <net/ip6_checksum.h>
27
28#include "hinic_common.h"
29#include "hinic_hw_if.h"
30#include "hinic_hw_wqe.h"
31#include "hinic_hw_wq.h"
32#include "hinic_hw_qp.h"
33#include "hinic_hw_dev.h"
34#include "hinic_dev.h"
35#include "hinic_tx.h"
36
37#define TX_IRQ_NO_PENDING 0
38#define TX_IRQ_NO_COALESC 0
39#define TX_IRQ_NO_LLI_TIMER 0
40#define TX_IRQ_NO_CREDIT 0
41#define TX_IRQ_NO_RESEND_TIMER 0
42
43#define CI_UPDATE_NO_PENDING 0
44#define CI_UPDATE_NO_COALESC 0
45
46#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
47
48#define MIN_SKB_LEN 17
49
50#define MAX_PAYLOAD_OFFSET 221
51#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
52
53union hinic_l3 {
54 struct iphdr *v4;
55 struct ipv6hdr *v6;
56 unsigned char *hdr;
57};
58
59union hinic_l4 {
60 struct tcphdr *tcp;
61 struct udphdr *udp;
62 unsigned char *hdr;
63};
64
65enum hinic_offload_type {
66 TX_OFFLOAD_TSO = BIT(0),
67 TX_OFFLOAD_CSUM = BIT(1),
68 TX_OFFLOAD_VLAN = BIT(2),
69 TX_OFFLOAD_INVALID = BIT(3),
70};
71
72
73
74
75
76void hinic_txq_clean_stats(struct hinic_txq *txq)
77{
78 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
79
80 u64_stats_update_begin(&txq_stats->syncp);
81 txq_stats->pkts = 0;
82 txq_stats->bytes = 0;
83 txq_stats->tx_busy = 0;
84 txq_stats->tx_wake = 0;
85 txq_stats->tx_dropped = 0;
86 u64_stats_update_end(&txq_stats->syncp);
87}
88
89
90
91
92
93
94void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
95{
96 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
97 unsigned int start;
98
99 u64_stats_update_begin(&stats->syncp);
100 do {
101 start = u64_stats_fetch_begin(&txq_stats->syncp);
102 stats->pkts = txq_stats->pkts;
103 stats->bytes = txq_stats->bytes;
104 stats->tx_busy = txq_stats->tx_busy;
105 stats->tx_wake = txq_stats->tx_wake;
106 stats->tx_dropped = txq_stats->tx_dropped;
107 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
108 u64_stats_update_end(&stats->syncp);
109}
110
111
112
113
114
115static void txq_stats_init(struct hinic_txq *txq)
116{
117 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
118
119 u64_stats_init(&txq_stats->syncp);
120 hinic_txq_clean_stats(txq);
121}
122
123
124
125
126
127
128
129
130
131static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
132 struct hinic_sge *sges)
133{
134 struct hinic_hwdev *hwdev = nic_dev->hwdev;
135 struct hinic_hwif *hwif = hwdev->hwif;
136 struct pci_dev *pdev = hwif->pdev;
137 struct skb_frag_struct *frag;
138 dma_addr_t dma_addr;
139 int i, j;
140
141 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
142 DMA_TO_DEVICE);
143 if (dma_mapping_error(&pdev->dev, dma_addr)) {
144 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
145 return -EFAULT;
146 }
147
148 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
149
150 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
151 frag = &skb_shinfo(skb)->frags[i];
152
153 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
154 skb_frag_size(frag),
155 DMA_TO_DEVICE);
156 if (dma_mapping_error(&pdev->dev, dma_addr)) {
157 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
158 goto err_tx_map;
159 }
160
161 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
162 }
163
164 return 0;
165
166err_tx_map:
167 for (j = 0; j < i; j++)
168 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
169 sges[j + 1].len, DMA_TO_DEVICE);
170
171 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
172 DMA_TO_DEVICE);
173 return -EFAULT;
174}
175
176
177
178
179
180
181
182static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
183 struct hinic_sge *sges)
184{
185 struct hinic_hwdev *hwdev = nic_dev->hwdev;
186 struct hinic_hwif *hwif = hwdev->hwif;
187 struct pci_dev *pdev = hwif->pdev;
188 int i;
189
190 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
191 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
192 sges[i + 1].len, DMA_TO_DEVICE);
193
194 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
195 DMA_TO_DEVICE);
196}
197
198static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
199 union hinic_l4 *l4,
200 enum hinic_offload_type offload_type,
201 enum hinic_l3_offload_type *l3_type,
202 u8 *l4_proto)
203{
204 u8 *exthdr;
205
206 if (ip->v4->version == 4) {
207 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
208 IPV4_PKT_NO_CHKSUM_OFFLOAD :
209 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
210 *l4_proto = ip->v4->protocol;
211 } else if (ip->v4->version == 6) {
212 *l3_type = IPV6_PKT;
213 exthdr = ip->hdr + sizeof(*ip->v6);
214 *l4_proto = ip->v6->nexthdr;
215 if (exthdr != l4->hdr) {
216 int start = exthdr - skb->data;
217 __be16 frag_off;
218
219 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
220 }
221 } else {
222 *l3_type = L3TYPE_UNKNOWN;
223 *l4_proto = 0;
224 }
225}
226
227static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
228 enum hinic_offload_type offload_type, u8 l4_proto,
229 enum hinic_l4_offload_type *l4_offload,
230 u32 *l4_len, u32 *offset)
231{
232 *l4_offload = OFFLOAD_DISABLE;
233 *offset = 0;
234 *l4_len = 0;
235
236 switch (l4_proto) {
237 case IPPROTO_TCP:
238 *l4_offload = TCP_OFFLOAD_ENABLE;
239
240 *l4_len = l4->tcp->doff * 4;
241 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
242 break;
243
244 case IPPROTO_UDP:
245 *l4_offload = UDP_OFFLOAD_ENABLE;
246 *l4_len = sizeof(struct udphdr);
247 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
248 break;
249
250 case IPPROTO_SCTP:
251
252 if (offload_type != TX_OFFLOAD_CSUM)
253 break;
254
255 *l4_offload = SCTP_OFFLOAD_ENABLE;
256 *l4_len = sizeof(struct sctphdr);
257 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
258 break;
259
260 default:
261 break;
262 }
263}
264
265static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
266{
267 return (ip->v4->version == 4) ?
268 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
269 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
270}
271
272static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
273 struct sk_buff *skb)
274{
275 u32 offset, l4_len, ip_identify, network_hdr_len;
276 enum hinic_l3_offload_type l3_offload;
277 enum hinic_l4_offload_type l4_offload;
278 union hinic_l3 ip;
279 union hinic_l4 l4;
280 u8 l4_proto;
281
282 if (!skb_is_gso(skb))
283 return 0;
284
285 if (skb_cow_head(skb, 0) < 0)
286 return -EPROTONOSUPPORT;
287
288 if (skb->encapsulation) {
289 u32 gso_type = skb_shinfo(skb)->gso_type;
290 u32 tunnel_type = 0;
291 u32 l4_tunnel_len;
292
293 ip.hdr = skb_network_header(skb);
294 l4.hdr = skb_transport_header(skb);
295 network_hdr_len = skb_inner_network_header_len(skb);
296
297 if (ip.v4->version == 4) {
298 ip.v4->tot_len = 0;
299 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
300 } else if (ip.v4->version == 6) {
301 l3_offload = IPV6_PKT;
302 } else {
303 l3_offload = 0;
304 }
305
306 hinic_task_set_outter_l3(task, l3_offload,
307 skb_network_header_len(skb));
308
309 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
310 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
311 tunnel_type = TUNNEL_UDP_CSUM;
312 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
313 tunnel_type = TUNNEL_UDP_NO_CSUM;
314 }
315
316 l4_tunnel_len = skb_inner_network_offset(skb) -
317 skb_transport_offset(skb);
318 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
319
320 ip.hdr = skb_inner_network_header(skb);
321 l4.hdr = skb_inner_transport_header(skb);
322 } else {
323 ip.hdr = skb_network_header(skb);
324 l4.hdr = skb_transport_header(skb);
325 network_hdr_len = skb_network_header_len(skb);
326 }
327
328
329 if (ip.v4->version == 4)
330 ip.v4->tot_len = 0;
331 else
332 ip.v6->payload_len = 0;
333
334 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
335 &l4_proto);
336
337 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
338
339 ip_identify = 0;
340 if (l4_proto == IPPROTO_TCP)
341 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
342
343 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
344 &l4_len, &offset);
345
346 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
347 ip_identify, skb_shinfo(skb)->gso_size);
348
349 return 1;
350}
351
352static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
353 struct sk_buff *skb)
354{
355 enum hinic_l4_offload_type l4_offload;
356 u32 offset, l4_len, network_hdr_len;
357 enum hinic_l3_offload_type l3_type;
358 union hinic_l3 ip;
359 union hinic_l4 l4;
360 u8 l4_proto;
361
362 if (skb->ip_summed != CHECKSUM_PARTIAL)
363 return 0;
364
365 if (skb->encapsulation) {
366 u32 l4_tunnel_len;
367
368 ip.hdr = skb_network_header(skb);
369
370 if (ip.v4->version == 4)
371 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
372 else if (ip.v4->version == 6)
373 l3_type = IPV6_PKT;
374 else
375 l3_type = L3TYPE_UNKNOWN;
376
377 hinic_task_set_outter_l3(task, l3_type,
378 skb_network_header_len(skb));
379
380 l4_tunnel_len = skb_inner_network_offset(skb) -
381 skb_transport_offset(skb);
382
383 hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
384 l4_tunnel_len);
385
386 ip.hdr = skb_inner_network_header(skb);
387 l4.hdr = skb_inner_transport_header(skb);
388 network_hdr_len = skb_inner_network_header_len(skb);
389 } else {
390 ip.hdr = skb_network_header(skb);
391 l4.hdr = skb_transport_header(skb);
392 network_hdr_len = skb_network_header_len(skb);
393 }
394
395 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
396 &l4_proto);
397
398 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
399
400 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
401 &l4_len, &offset);
402
403 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
404
405 return 1;
406}
407
408static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
409 u32 *queue_info)
410{
411 enum hinic_offload_type offload = 0;
412 int enabled;
413
414 enabled = offload_tso(task, queue_info, skb);
415 if (enabled > 0) {
416 offload |= TX_OFFLOAD_TSO;
417 } else if (enabled == 0) {
418 enabled = offload_csum(task, queue_info, skb);
419 if (enabled)
420 offload |= TX_OFFLOAD_CSUM;
421 } else {
422 return -EPROTONOSUPPORT;
423 }
424
425 if (offload)
426 hinic_task_set_l2hdr(task, skb_network_offset(skb));
427
428
429 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
430 MAX_PAYLOAD_OFFSET) {
431 return -EPROTONOSUPPORT;
432 }
433
434
435 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
436 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
437 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
438 }
439
440 return 0;
441}
442
443netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
444{
445 struct hinic_dev *nic_dev = netdev_priv(netdev);
446 u16 prod_idx, q_id = skb->queue_mapping;
447 struct netdev_queue *netdev_txq;
448 int nr_sges, err = NETDEV_TX_OK;
449 struct hinic_sq_wqe *sq_wqe;
450 unsigned int wqe_size;
451 struct hinic_txq *txq;
452 struct hinic_qp *qp;
453
454 txq = &nic_dev->txqs[q_id];
455 qp = container_of(txq->sq, struct hinic_qp, sq);
456
457 if (skb->len < MIN_SKB_LEN) {
458 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
459 netdev_err(netdev, "Failed to pad skb\n");
460 goto update_error_stats;
461 }
462
463 skb->len = MIN_SKB_LEN;
464 }
465
466 nr_sges = skb_shinfo(skb)->nr_frags + 1;
467 if (nr_sges > txq->max_sges) {
468 netdev_err(netdev, "Too many Tx sges\n");
469 goto skb_error;
470 }
471
472 err = tx_map_skb(nic_dev, skb, txq->sges);
473 if (err)
474 goto skb_error;
475
476 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
477
478 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
479 if (!sq_wqe) {
480 netif_stop_subqueue(netdev, qp->q_id);
481
482
483
484
485 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
486 if (sq_wqe) {
487 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
488 goto process_sq_wqe;
489 }
490
491 tx_unmap_skb(nic_dev, skb, txq->sges);
492
493 u64_stats_update_begin(&txq->txq_stats.syncp);
494 txq->txq_stats.tx_busy++;
495 u64_stats_update_end(&txq->txq_stats.syncp);
496 err = NETDEV_TX_BUSY;
497 wqe_size = 0;
498 goto flush_skbs;
499 }
500
501process_sq_wqe:
502 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
503
504 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
505 if (err)
506 goto offload_error;
507
508 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
509
510flush_skbs:
511 netdev_txq = netdev_get_tx_queue(netdev, q_id);
512 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
513 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
514
515 return err;
516
517offload_error:
518 hinic_sq_return_wqe(txq->sq, wqe_size);
519 tx_unmap_skb(nic_dev, skb, txq->sges);
520
521skb_error:
522 dev_kfree_skb_any(skb);
523
524update_error_stats:
525 u64_stats_update_begin(&txq->txq_stats.syncp);
526 txq->txq_stats.tx_dropped++;
527 u64_stats_update_end(&txq->txq_stats.syncp);
528
529 return NETDEV_TX_OK;
530}
531
532
533
534
535
536
537
538static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
539 struct hinic_sge *sges)
540{
541 tx_unmap_skb(nic_dev, skb, sges);
542
543 dev_kfree_skb_any(skb);
544}
545
546
547
548
549
550static void free_all_tx_skbs(struct hinic_txq *txq)
551{
552 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
553 struct hinic_sq *sq = txq->sq;
554 struct hinic_sq_wqe *sq_wqe;
555 unsigned int wqe_size;
556 struct sk_buff *skb;
557 int nr_sges;
558 u16 ci;
559
560 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
561 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
562 if (!sq_wqe)
563 break;
564
565 nr_sges = skb_shinfo(skb)->nr_frags + 1;
566
567 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
568
569 hinic_sq_put_wqe(sq, wqe_size);
570
571 tx_free_skb(nic_dev, skb, txq->free_sges);
572 }
573}
574
575
576
577
578
579
580
581
582static int free_tx_poll(struct napi_struct *napi, int budget)
583{
584 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
585 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
586 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
587 struct netdev_queue *netdev_txq;
588 struct hinic_sq *sq = txq->sq;
589 struct hinic_wq *wq = sq->wq;
590 struct hinic_sq_wqe *sq_wqe;
591 unsigned int wqe_size;
592 int nr_sges, pkts = 0;
593 struct sk_buff *skb;
594 u64 tx_bytes = 0;
595 u16 hw_ci, sw_ci;
596
597 do {
598 hw_ci = HW_CONS_IDX(sq) & wq->mask;
599
600
601 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
602 if ((!sq_wqe) ||
603 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
604 break;
605
606
607
608
609 if (wqe_size > wq->wqebb_size) {
610 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
611 if (unlikely(!sq_wqe))
612 break;
613 }
614
615 tx_bytes += skb->len;
616 pkts++;
617
618 nr_sges = skb_shinfo(skb)->nr_frags + 1;
619
620 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
621
622 hinic_sq_put_wqe(sq, wqe_size);
623
624 tx_free_skb(nic_dev, skb, txq->free_sges);
625 } while (pkts < budget);
626
627 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
628 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
629 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
630
631 __netif_tx_lock(netdev_txq, smp_processor_id());
632
633 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
634
635 __netif_tx_unlock(netdev_txq);
636
637 u64_stats_update_begin(&txq->txq_stats.syncp);
638 txq->txq_stats.tx_wake++;
639 u64_stats_update_end(&txq->txq_stats.syncp);
640 }
641
642 u64_stats_update_begin(&txq->txq_stats.syncp);
643 txq->txq_stats.bytes += tx_bytes;
644 txq->txq_stats.pkts += pkts;
645 u64_stats_update_end(&txq->txq_stats.syncp);
646
647 if (pkts < budget) {
648 napi_complete(napi);
649 hinic_hwdev_set_msix_state(nic_dev->hwdev,
650 sq->msix_entry,
651 HINIC_MSIX_ENABLE);
652 return pkts;
653 }
654
655 return budget;
656}
657
658static void tx_napi_add(struct hinic_txq *txq, int weight)
659{
660 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
661 napi_enable(&txq->napi);
662}
663
664static void tx_napi_del(struct hinic_txq *txq)
665{
666 napi_disable(&txq->napi);
667 netif_napi_del(&txq->napi);
668}
669
670static irqreturn_t tx_irq(int irq, void *data)
671{
672 struct hinic_txq *txq = data;
673 struct hinic_dev *nic_dev;
674
675 nic_dev = netdev_priv(txq->netdev);
676
677
678 hinic_hwdev_set_msix_state(nic_dev->hwdev,
679 txq->sq->msix_entry,
680 HINIC_MSIX_DISABLE);
681
682 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
683
684 napi_schedule(&txq->napi);
685 return IRQ_HANDLED;
686}
687
688static int tx_request_irq(struct hinic_txq *txq)
689{
690 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
691 struct hinic_hwdev *hwdev = nic_dev->hwdev;
692 struct hinic_hwif *hwif = hwdev->hwif;
693 struct pci_dev *pdev = hwif->pdev;
694 struct hinic_sq *sq = txq->sq;
695 int err;
696
697 tx_napi_add(txq, nic_dev->tx_weight);
698
699 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
700 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
701 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
702 TX_IRQ_NO_RESEND_TIMER);
703
704 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
705 if (err) {
706 dev_err(&pdev->dev, "Failed to request Tx irq\n");
707 tx_napi_del(txq);
708 return err;
709 }
710
711 return 0;
712}
713
714static void tx_free_irq(struct hinic_txq *txq)
715{
716 struct hinic_sq *sq = txq->sq;
717
718 free_irq(sq->irq, txq);
719 tx_napi_del(txq);
720}
721
722
723
724
725
726
727
728
729
730int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
731 struct net_device *netdev)
732{
733 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
734 struct hinic_dev *nic_dev = netdev_priv(netdev);
735 struct hinic_hwdev *hwdev = nic_dev->hwdev;
736 int err, irqname_len;
737 size_t sges_size;
738
739 txq->netdev = netdev;
740 txq->sq = sq;
741
742 txq_stats_init(txq);
743
744 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
745
746 sges_size = txq->max_sges * sizeof(*txq->sges);
747 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
748 if (!txq->sges)
749 return -ENOMEM;
750
751 sges_size = txq->max_sges * sizeof(*txq->free_sges);
752 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
753 if (!txq->free_sges) {
754 err = -ENOMEM;
755 goto err_alloc_free_sges;
756 }
757
758 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
759 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
760 if (!txq->irq_name) {
761 err = -ENOMEM;
762 goto err_alloc_irqname;
763 }
764
765 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
766
767 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
768 CI_UPDATE_NO_COALESC);
769 if (err)
770 goto err_hw_ci;
771
772 err = tx_request_irq(txq);
773 if (err) {
774 netdev_err(netdev, "Failed to request Tx irq\n");
775 goto err_req_tx_irq;
776 }
777
778 return 0;
779
780err_req_tx_irq:
781err_hw_ci:
782 devm_kfree(&netdev->dev, txq->irq_name);
783
784err_alloc_irqname:
785 devm_kfree(&netdev->dev, txq->free_sges);
786
787err_alloc_free_sges:
788 devm_kfree(&netdev->dev, txq->sges);
789 return err;
790}
791
792
793
794
795
796void hinic_clean_txq(struct hinic_txq *txq)
797{
798 struct net_device *netdev = txq->netdev;
799
800 tx_free_irq(txq);
801
802 free_all_tx_skbs(txq);
803
804 devm_kfree(&netdev->dev, txq->irq_name);
805 devm_kfree(&netdev->dev, txq->free_sges);
806 devm_kfree(&netdev->dev, txq->sges);
807}
808