1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "chcr:" fmt
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/crypto.h>
43#include <linux/cryptohash.h>
44#include <linux/skbuff.h>
45#include <linux/rtnetlink.h>
46#include <linux/highmem.h>
47#include <linux/if_vlan.h>
48#include <linux/ip.h>
49#include <linux/netdevice.h>
50#include <net/esp.h>
51#include <net/xfrm.h>
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
55#include <crypto/sha.h>
56#include <crypto/authenc.h>
57#include <crypto/internal/aead.h>
58#include <crypto/null.h>
59#include <crypto/internal/skcipher.h>
60#include <crypto/aead.h>
61#include <crypto/scatterwalk.h>
62#include <crypto/internal/hash.h>
63
64#include "chcr_core.h"
65#include "chcr_algo.h"
66#include "chcr_crypto.h"
67
68
69
70
71
72#define MAX_IMM_TX_PKT_LEN 256
73#define GCM_ESP_IV_SIZE 8
74
75static int chcr_xfrm_add_state(struct xfrm_state *x);
76static void chcr_xfrm_del_state(struct xfrm_state *x);
77static void chcr_xfrm_free_state(struct xfrm_state *x);
78static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
79static void chcr_advance_esn_state(struct xfrm_state *x);
80
81static const struct xfrmdev_ops chcr_xfrmdev_ops = {
82 .xdo_dev_state_add = chcr_xfrm_add_state,
83 .xdo_dev_state_delete = chcr_xfrm_del_state,
84 .xdo_dev_state_free = chcr_xfrm_free_state,
85 .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
86 .xdo_dev_state_advance_esn = chcr_advance_esn_state,
87};
88
89
90void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
91{
92 struct net_device *netdev = NULL;
93 int i;
94
95 for (i = 0; i < lld->nports; i++) {
96 netdev = lld->ports[i];
97 if (!netdev)
98 continue;
99 netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
100 netdev->hw_enc_features |= NETIF_F_HW_ESP;
101 netdev->features |= NETIF_F_HW_ESP;
102 rtnl_lock();
103 netdev_change_features(netdev);
104 rtnl_unlock();
105 }
106}
107
108static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
109 struct ipsec_sa_entry *sa_entry)
110{
111 int hmac_ctrl;
112 int authsize = x->aead->alg_icv_len / 8;
113
114 sa_entry->authsize = authsize;
115
116 switch (authsize) {
117 case ICV_8:
118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
119 break;
120 case ICV_12:
121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
122 break;
123 case ICV_16:
124 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
125 break;
126 default:
127 return -EINVAL;
128 }
129 return hmac_ctrl;
130}
131
132static inline int chcr_ipsec_setkey(struct xfrm_state *x,
133 struct ipsec_sa_entry *sa_entry)
134{
135 struct crypto_cipher *cipher;
136 int keylen = (x->aead->alg_key_len + 7) / 8;
137 unsigned char *key = x->aead->alg_key;
138 int ck_size, key_ctx_size = 0;
139 unsigned char ghash_h[AEAD_H_SIZE];
140 int ret = 0;
141
142 if (keylen > 3) {
143 keylen -= 4;
144 memcpy(sa_entry->salt, key + keylen, 4);
145 }
146
147 if (keylen == AES_KEYSIZE_128) {
148 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
149 } else if (keylen == AES_KEYSIZE_192) {
150 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
151 } else if (keylen == AES_KEYSIZE_256) {
152 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
153 } else {
154 pr_err("GCM: Invalid key length %d\n", keylen);
155 ret = -EINVAL;
156 goto out;
157 }
158
159 memcpy(sa_entry->key, key, keylen);
160 sa_entry->enckey_len = keylen;
161 key_ctx_size = sizeof(struct _key_ctx) +
162 ((DIV_ROUND_UP(keylen, 16)) << 4) +
163 AEAD_H_SIZE;
164
165 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
166 CHCR_KEYCTX_MAC_KEY_SIZE_128,
167 0, 0,
168 key_ctx_size >> 4);
169
170
171
172
173 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
174 if (IS_ERR(cipher)) {
175 sa_entry->enckey_len = 0;
176 ret = -ENOMEM;
177 goto out;
178 }
179
180 ret = crypto_cipher_setkey(cipher, key, keylen);
181 if (ret) {
182 sa_entry->enckey_len = 0;
183 goto out1;
184 }
185 memset(ghash_h, 0, AEAD_H_SIZE);
186 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
187 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
188 16), ghash_h, AEAD_H_SIZE);
189 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
190 AEAD_H_SIZE;
191out1:
192 crypto_free_cipher(cipher);
193out:
194 return ret;
195}
196
197
198
199
200
201
202static int chcr_xfrm_add_state(struct xfrm_state *x)
203{
204 struct ipsec_sa_entry *sa_entry;
205 int res = 0;
206
207 if (x->props.aalgo != SADB_AALG_NONE) {
208 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
209 return -EINVAL;
210 }
211 if (x->props.calgo != SADB_X_CALG_NONE) {
212 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
213 return -EINVAL;
214 }
215 if (x->props.family != AF_INET &&
216 x->props.family != AF_INET6) {
217 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
218 return -EINVAL;
219 }
220 if (x->props.mode != XFRM_MODE_TRANSPORT &&
221 x->props.mode != XFRM_MODE_TUNNEL) {
222 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
223 return -EINVAL;
224 }
225 if (x->id.proto != IPPROTO_ESP) {
226 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
227 return -EINVAL;
228 }
229 if (x->encap) {
230 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
231 return -EINVAL;
232 }
233 if (!x->aead) {
234 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
235 return -EINVAL;
236 }
237 if (x->aead->alg_icv_len != 128 &&
238 x->aead->alg_icv_len != 96) {
239 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
240 return -EINVAL;
241 }
242 if ((x->aead->alg_key_len != 128 + 32) &&
243 (x->aead->alg_key_len != 256 + 32)) {
244 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
245 return -EINVAL;
246 }
247 if (x->tfcpad) {
248 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
249 return -EINVAL;
250 }
251 if (!x->geniv) {
252 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
253 return -EINVAL;
254 }
255 if (strcmp(x->geniv, "seqiv")) {
256 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
257 return -EINVAL;
258 }
259
260 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
261 if (!sa_entry) {
262 res = -ENOMEM;
263 goto out;
264 }
265
266 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
267 if (x->props.flags & XFRM_STATE_ESN)
268 sa_entry->esn = 1;
269 chcr_ipsec_setkey(x, sa_entry);
270 x->xso.offload_handle = (unsigned long)sa_entry;
271 try_module_get(THIS_MODULE);
272out:
273 return res;
274}
275
276static void chcr_xfrm_del_state(struct xfrm_state *x)
277{
278
279 if (!x->xso.offload_handle)
280 return;
281}
282
283static void chcr_xfrm_free_state(struct xfrm_state *x)
284{
285 struct ipsec_sa_entry *sa_entry;
286
287 if (!x->xso.offload_handle)
288 return;
289
290 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
291 kfree(sa_entry);
292 module_put(THIS_MODULE);
293}
294
295static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
296{
297 if (x->props.family == AF_INET) {
298
299 if (ip_hdr(skb)->ihl > 5)
300 return false;
301 } else {
302
303 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
304 return false;
305 }
306
307 if (skb_shinfo(skb)->gso_size)
308 return false;
309 return true;
310}
311
312static void chcr_advance_esn_state(struct xfrm_state *x)
313{
314
315 if (!x->xso.offload_handle)
316 return;
317}
318
319static inline int is_eth_imm(const struct sk_buff *skb,
320 struct ipsec_sa_entry *sa_entry)
321{
322 unsigned int kctx_len;
323 int hdrlen;
324
325 kctx_len = sa_entry->kctx_len;
326 hdrlen = sizeof(struct fw_ulptx_wr) +
327 sizeof(struct chcr_ipsec_req) + kctx_len;
328
329 hdrlen += sizeof(struct cpl_tx_pkt);
330 if (sa_entry->esn)
331 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
332 << 4);
333 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
334 return hdrlen;
335 return 0;
336}
337
338static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
339 struct ipsec_sa_entry *sa_entry,
340 bool *immediate)
341{
342 unsigned int kctx_len;
343 unsigned int flits;
344 int aadivlen;
345 int hdrlen;
346
347 kctx_len = sa_entry->kctx_len;
348 hdrlen = is_eth_imm(skb, sa_entry);
349 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
350 16) : 0;
351 aadivlen <<= 4;
352
353
354
355
356
357
358 if (hdrlen) {
359 *immediate = true;
360 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
361 }
362
363 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
364
365
366
367
368
369
370
371
372
373 flits += (sizeof(struct fw_ulptx_wr) +
374 sizeof(struct chcr_ipsec_req) +
375 kctx_len +
376 sizeof(struct cpl_tx_pkt_core) +
377 aadivlen) / sizeof(__be64);
378 return flits;
379}
380
381inline void *copy_esn_pktxt(struct sk_buff *skb,
382 struct net_device *dev,
383 void *pos,
384 struct ipsec_sa_entry *sa_entry)
385{
386 struct chcr_ipsec_aadiv *aadiv;
387 struct ulptx_idata *sc_imm;
388 struct ip_esp_hdr *esphdr;
389 struct xfrm_offload *xo;
390 struct sge_eth_txq *q;
391 struct adapter *adap;
392 struct port_info *pi;
393 __be64 seqno;
394 u32 qidx;
395 u32 seqlo;
396 u8 *iv;
397 int eoq;
398 int len;
399
400 pi = netdev_priv(dev);
401 adap = pi->adapter;
402 qidx = skb->queue_mapping;
403 q = &adap->sge.ethtxq[qidx + pi->first_qset];
404
405
406 eoq = (void *)q->q.stat - pos;
407 if (!eoq)
408 pos = q->q.desc;
409
410 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
411 memset(pos, 0, len);
412 aadiv = (struct chcr_ipsec_aadiv *)pos;
413 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
414 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
415 xo = xfrm_offload(skb);
416
417 aadiv->spi = (esphdr->spi);
418 seqlo = htonl(esphdr->seq_no);
419 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
420 memcpy(aadiv->seq_no, &seqno, 8);
421 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
422 memcpy(aadiv->iv, iv, 8);
423
424 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
425 sc_imm = (struct ulptx_idata *)(pos +
426 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
427 sizeof(__be64)) << 3));
428 sc_imm->cmd_more = FILL_CMD_MORE(0);
429 sc_imm->len = cpu_to_be32(skb->len);
430 }
431 pos += len;
432 return pos;
433}
434
435inline void *copy_cpltx_pktxt(struct sk_buff *skb,
436 struct net_device *dev,
437 void *pos,
438 struct ipsec_sa_entry *sa_entry)
439{
440 struct cpl_tx_pkt_core *cpl;
441 struct sge_eth_txq *q;
442 struct adapter *adap;
443 struct port_info *pi;
444 u32 ctrl0, qidx;
445 u64 cntrl = 0;
446 int left;
447
448 pi = netdev_priv(dev);
449 adap = pi->adapter;
450 qidx = skb->queue_mapping;
451 q = &adap->sge.ethtxq[qidx + pi->first_qset];
452
453 left = (void *)q->q.stat - pos;
454 if (!left)
455 pos = q->q.desc;
456
457 cpl = (struct cpl_tx_pkt_core *)pos;
458
459 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
460 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
461 TXPKT_PF_V(adap->pf);
462 if (skb_vlan_tag_present(skb)) {
463 q->vlan_ins++;
464 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
465 }
466
467 cpl->ctrl0 = htonl(ctrl0);
468 cpl->pack = htons(0);
469 cpl->len = htons(skb->len);
470 cpl->ctrl1 = cpu_to_be64(cntrl);
471
472 pos += sizeof(struct cpl_tx_pkt_core);
473
474 if (sa_entry->esn)
475 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
476 return pos;
477}
478
479inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
480 struct net_device *dev,
481 void *pos,
482 struct ipsec_sa_entry *sa_entry)
483{
484 struct _key_ctx *key_ctx;
485 int left, eoq, key_len;
486 struct sge_eth_txq *q;
487 struct adapter *adap;
488 struct port_info *pi;
489 unsigned int qidx;
490
491 pi = netdev_priv(dev);
492 adap = pi->adapter;
493 qidx = skb->queue_mapping;
494 q = &adap->sge.ethtxq[qidx + pi->first_qset];
495 key_len = sa_entry->kctx_len;
496
497
498 eoq = (void *)q->q.stat - pos;
499 left = eoq;
500 if (!eoq) {
501 pos = q->q.desc;
502 left = 64 * q->q.size;
503 }
504
505
506 key_ctx = (struct _key_ctx *)pos;
507 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
508 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
509 pos += sizeof(struct _key_ctx);
510 left -= sizeof(struct _key_ctx);
511
512 if (likely(key_len <= left)) {
513 memcpy(key_ctx->key, sa_entry->key, key_len);
514 pos += key_len;
515 } else {
516 memcpy(pos, sa_entry->key, left);
517 memcpy(q->q.desc, sa_entry->key + left,
518 key_len - left);
519 pos = (u8 *)q->q.desc + (key_len - left);
520 }
521
522 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
523
524 return pos;
525}
526
527inline void *chcr_crypto_wreq(struct sk_buff *skb,
528 struct net_device *dev,
529 void *pos,
530 int credits,
531 struct ipsec_sa_entry *sa_entry)
532{
533 struct port_info *pi = netdev_priv(dev);
534 struct adapter *adap = pi->adapter;
535 unsigned int ivsize = GCM_ESP_IV_SIZE;
536 struct chcr_ipsec_wr *wr;
537 bool immediate = false;
538 u16 immdatalen = 0;
539 unsigned int flits;
540 u32 ivinoffset;
541 u32 aadstart;
542 u32 aadstop;
543 u32 ciphstart;
544 u16 sc_more = 0;
545 u32 ivdrop = 0;
546 u32 esnlen = 0;
547 u32 wr_mid;
548 u16 ndesc;
549 int qidx = skb_get_queue_mapping(skb);
550 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
551 unsigned int kctx_len = sa_entry->kctx_len;
552 int qid = q->q.cntxt_id;
553
554 atomic_inc(&adap->chcr_stats.ipsec_cnt);
555
556 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
557 ndesc = DIV_ROUND_UP(flits, 2);
558 if (sa_entry->esn)
559 ivdrop = 1;
560
561 if (immediate)
562 immdatalen = skb->len;
563
564 if (sa_entry->esn) {
565 esnlen = sizeof(struct chcr_ipsec_aadiv);
566 if (!skb_is_nonlinear(skb))
567 sc_more = 1;
568 }
569
570
571 wr = (struct chcr_ipsec_wr *)pos;
572 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
573 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
574
575 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
576 netif_tx_stop_queue(q->txq);
577 q->q.stops++;
578 if (!q->dbqt)
579 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
580 }
581 wr_mid |= FW_ULPTX_WR_DATA_F;
582 wr->wreq.flowid_len16 = htonl(wr_mid);
583
584
585 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
586 wr->req.ulptx.len = htonl(ndesc - 1);
587
588
589 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
590 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
591 sizeof(wr->req.key_ctx) +
592 kctx_len +
593 sizeof(struct cpl_tx_pkt_core) +
594 esnlen +
595 (esnlen ? 0 : immdatalen));
596
597
598 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
599 (skb_transport_offset(skb) +
600 sizeof(struct ip_esp_hdr) + 1);
601 wr->req.sec_cpl.op_ivinsrtofst = htonl(
602 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
603 CPL_TX_SEC_PDU_CPLLEN_V(2) |
604 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
605 CPL_TX_SEC_PDU_IVINSRTOFST_V(
606 ivinoffset));
607
608 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
609 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
610 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
611 (skb_transport_offset(skb) +
612 sizeof(struct ip_esp_hdr));
613 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
614 GCM_ESP_IV_SIZE + 1;
615 ciphstart += sa_entry->esn ? esnlen : 0;
616
617 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
618 aadstart,
619 aadstop,
620 ciphstart, 0);
621
622 wr->req.sec_cpl.cipherstop_lo_authinsert =
623 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
624 sa_entry->authsize,
625 sa_entry->authsize);
626 wr->req.sec_cpl.seqno_numivs =
627 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
628 CHCR_SCMD_CIPHER_MODE_AES_GCM,
629 CHCR_SCMD_AUTH_MODE_GHASH,
630 sa_entry->hmac_ctrl,
631 ivsize >> 1);
632 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
633 0, ivdrop, 0);
634
635 pos += sizeof(struct fw_ulptx_wr) +
636 sizeof(struct ulp_txpkt) +
637 sizeof(struct ulptx_idata) +
638 sizeof(struct cpl_tx_sec_pdu);
639
640 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
641
642 return pos;
643}
644
645
646
647
648
649
650
651
652static inline unsigned int flits_to_desc(unsigned int n)
653{
654 WARN_ON(n > SGE_MAX_WR_LEN / 8);
655 return DIV_ROUND_UP(n, 8);
656}
657
658static inline unsigned int txq_avail(const struct sge_txq *q)
659{
660 return q->size - 1 - q->in_use;
661}
662
663static void eth_txq_stop(struct sge_eth_txq *q)
664{
665 netif_tx_stop_queue(q->txq);
666 q->q.stops++;
667}
668
669static inline void txq_advance(struct sge_txq *q, unsigned int n)
670{
671 q->in_use += n;
672 q->pidx += n;
673 if (q->pidx >= q->size)
674 q->pidx -= q->size;
675}
676
677
678
679
680int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
681{
682 struct xfrm_state *x = xfrm_input_state(skb);
683 struct ipsec_sa_entry *sa_entry;
684 u64 *pos, *end, *before, *sgl;
685 int qidx, left, credits;
686 unsigned int flits = 0, ndesc;
687 struct adapter *adap;
688 struct sge_eth_txq *q;
689 struct port_info *pi;
690 dma_addr_t addr[MAX_SKB_FRAGS + 1];
691 struct sec_path *sp;
692 bool immediate = false;
693
694 if (!x->xso.offload_handle)
695 return NETDEV_TX_BUSY;
696
697 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
698
699 sp = skb_sec_path(skb);
700 if (sp->len != 1) {
701out_free: dev_kfree_skb_any(skb);
702 return NETDEV_TX_OK;
703 }
704
705 pi = netdev_priv(dev);
706 adap = pi->adapter;
707 qidx = skb->queue_mapping;
708 q = &adap->sge.ethtxq[qidx + pi->first_qset];
709
710 cxgb4_reclaim_completed_tx(adap, &q->q, true);
711
712 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
713 ndesc = flits_to_desc(flits);
714 credits = txq_avail(&q->q) - ndesc;
715
716 if (unlikely(credits < 0)) {
717 eth_txq_stop(q);
718 dev_err(adap->pdev_dev,
719 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
720 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
721 flits);
722 return NETDEV_TX_BUSY;
723 }
724
725 if (!immediate &&
726 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
727 q->mapping_err++;
728 goto out_free;
729 }
730
731 pos = (u64 *)&q->q.desc[q->q.pidx];
732 before = (u64 *)pos;
733 end = (u64 *)pos + flits;
734
735 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
736 credits, sa_entry);
737 if (before > (u64 *)pos) {
738 left = (u8 *)end - (u8 *)q->q.stat;
739 end = (void *)q->q.desc + left;
740 }
741 if (pos == (u64 *)q->q.stat) {
742 left = (u8 *)end - (u8 *)q->q.stat;
743 end = (void *)q->q.desc + left;
744 pos = (void *)q->q.desc;
745 }
746
747 sgl = (void *)pos;
748 if (immediate) {
749 cxgb4_inline_tx_skb(skb, &q->q, sgl);
750 dev_consume_skb_any(skb);
751 } else {
752 int last_desc;
753
754 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
755 0, addr);
756 skb_orphan(skb);
757
758 last_desc = q->q.pidx + ndesc - 1;
759 if (last_desc >= q->q.size)
760 last_desc -= q->q.size;
761 q->q.sdesc[last_desc].skb = skb;
762 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
763 }
764 txq_advance(&q->q, ndesc);
765
766 cxgb4_ring_tx_db(adap, &q->q, ndesc);
767 return NETDEV_TX_OK;
768}
769