1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "ch_ipsec: " fmt
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/crypto.h>
43#include <linux/skbuff.h>
44#include <linux/rtnetlink.h>
45#include <linux/highmem.h>
46#include <linux/if_vlan.h>
47#include <linux/ip.h>
48#include <linux/netdevice.h>
49#include <net/esp.h>
50#include <net/xfrm.h>
51#include <crypto/aes.h>
52#include <crypto/algapi.h>
53#include <crypto/hash.h>
54#include <crypto/sha1.h>
55#include <crypto/sha2.h>
56#include <crypto/authenc.h>
57#include <crypto/internal/aead.h>
58#include <crypto/null.h>
59#include <crypto/internal/skcipher.h>
60#include <crypto/aead.h>
61#include <crypto/scatterwalk.h>
62#include <crypto/internal/hash.h>
63
64#include "chcr_ipsec.h"
65
66
67
68
69
70#define MAX_IMM_TX_PKT_LEN 256
71#define GCM_ESP_IV_SIZE 8
72
73static LIST_HEAD(uld_ctx_list);
74static DEFINE_MUTEX(dev_mutex);
75
76static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
77static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
78static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
79static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
80static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
81static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
82static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
83static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
84
85static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
86 .xdo_dev_state_add = ch_ipsec_xfrm_add_state,
87 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
88 .xdo_dev_state_free = ch_ipsec_xfrm_free_state,
89 .xdo_dev_offload_ok = ch_ipsec_offload_ok,
90 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
91};
92
93static struct cxgb4_uld_info ch_ipsec_uld_info = {
94 .name = CHIPSEC_DRV_MODULE_NAME,
95 .add = ch_ipsec_uld_add,
96 .state_change = ch_ipsec_uld_state_change,
97 .tx_handler = ch_ipsec_xmit,
98 .xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
99};
100
101static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
102{
103 struct ipsec_uld_ctx *u_ctx;
104
105 pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
106 CHIPSEC_DRV_VERSION);
107 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
108 if (!u_ctx) {
109 u_ctx = ERR_PTR(-ENOMEM);
110 goto out;
111 }
112 u_ctx->lldi = *infop;
113out:
114 return u_ctx;
115}
116
117static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
118{
119 struct ipsec_uld_ctx *u_ctx = handle;
120
121 pr_debug("new_state %u\n", new_state);
122 switch (new_state) {
123 case CXGB4_STATE_UP:
124 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
125 mutex_lock(&dev_mutex);
126 list_add_tail(&u_ctx->entry, &uld_ctx_list);
127 mutex_unlock(&dev_mutex);
128 break;
129 case CXGB4_STATE_START_RECOVERY:
130 case CXGB4_STATE_DOWN:
131 case CXGB4_STATE_DETACH:
132 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
133 list_del(&u_ctx->entry);
134 break;
135 default:
136 break;
137 }
138
139 return 0;
140}
141
142static int ch_ipsec_setauthsize(struct xfrm_state *x,
143 struct ipsec_sa_entry *sa_entry)
144{
145 int hmac_ctrl;
146 int authsize = x->aead->alg_icv_len / 8;
147
148 sa_entry->authsize = authsize;
149
150 switch (authsize) {
151 case ICV_8:
152 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
153 break;
154 case ICV_12:
155 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
156 break;
157 case ICV_16:
158 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
159 break;
160 default:
161 return -EINVAL;
162 }
163 return hmac_ctrl;
164}
165
166static int ch_ipsec_setkey(struct xfrm_state *x,
167 struct ipsec_sa_entry *sa_entry)
168{
169 int keylen = (x->aead->alg_key_len + 7) / 8;
170 unsigned char *key = x->aead->alg_key;
171 int ck_size, key_ctx_size = 0;
172 unsigned char ghash_h[AEAD_H_SIZE];
173 struct crypto_aes_ctx aes;
174 int ret = 0;
175
176 if (keylen > 3) {
177 keylen -= 4;
178 memcpy(sa_entry->salt, key + keylen, 4);
179 }
180
181 if (keylen == AES_KEYSIZE_128) {
182 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
183 } else if (keylen == AES_KEYSIZE_192) {
184 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
185 } else if (keylen == AES_KEYSIZE_256) {
186 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
187 } else {
188 pr_err("GCM: Invalid key length %d\n", keylen);
189 ret = -EINVAL;
190 goto out;
191 }
192
193 memcpy(sa_entry->key, key, keylen);
194 sa_entry->enckey_len = keylen;
195 key_ctx_size = sizeof(struct _key_ctx) +
196 ((DIV_ROUND_UP(keylen, 16)) << 4) +
197 AEAD_H_SIZE;
198
199 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
200 CHCR_KEYCTX_MAC_KEY_SIZE_128,
201 0, 0,
202 key_ctx_size >> 4);
203
204
205
206
207 ret = aes_expandkey(&aes, key, keylen);
208 if (ret) {
209 sa_entry->enckey_len = 0;
210 goto out;
211 }
212 memset(ghash_h, 0, AEAD_H_SIZE);
213 aes_encrypt(&aes, ghash_h, ghash_h);
214 memzero_explicit(&aes, sizeof(aes));
215
216 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
217 16), ghash_h, AEAD_H_SIZE);
218 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
219 AEAD_H_SIZE;
220out:
221 return ret;
222}
223
224
225
226
227
228
229static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
230{
231 struct ipsec_sa_entry *sa_entry;
232 int res = 0;
233
234 if (x->props.aalgo != SADB_AALG_NONE) {
235 pr_debug("Cannot offload authenticated xfrm states\n");
236 return -EINVAL;
237 }
238 if (x->props.calgo != SADB_X_CALG_NONE) {
239 pr_debug("Cannot offload compressed xfrm states\n");
240 return -EINVAL;
241 }
242 if (x->props.family != AF_INET &&
243 x->props.family != AF_INET6) {
244 pr_debug("Only IPv4/6 xfrm state offloaded\n");
245 return -EINVAL;
246 }
247 if (x->props.mode != XFRM_MODE_TRANSPORT &&
248 x->props.mode != XFRM_MODE_TUNNEL) {
249 pr_debug("Only transport and tunnel xfrm offload\n");
250 return -EINVAL;
251 }
252 if (x->id.proto != IPPROTO_ESP) {
253 pr_debug("Only ESP xfrm state offloaded\n");
254 return -EINVAL;
255 }
256 if (x->encap) {
257 pr_debug("Encapsulated xfrm state not offloaded\n");
258 return -EINVAL;
259 }
260 if (!x->aead) {
261 pr_debug("Cannot offload xfrm states without aead\n");
262 return -EINVAL;
263 }
264 if (x->aead->alg_icv_len != 128 &&
265 x->aead->alg_icv_len != 96) {
266 pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
267 return -EINVAL;
268 }
269 if ((x->aead->alg_key_len != 128 + 32) &&
270 (x->aead->alg_key_len != 256 + 32)) {
271 pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
272 return -EINVAL;
273 }
274 if (x->tfcpad) {
275 pr_debug("Cannot offload xfrm states with tfc padding\n");
276 return -EINVAL;
277 }
278 if (!x->geniv) {
279 pr_debug("Cannot offload xfrm states without geniv\n");
280 return -EINVAL;
281 }
282 if (strcmp(x->geniv, "seqiv")) {
283 pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
284 return -EINVAL;
285 }
286
287 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
288 if (!sa_entry) {
289 res = -ENOMEM;
290 goto out;
291 }
292
293 sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
294 if (x->props.flags & XFRM_STATE_ESN)
295 sa_entry->esn = 1;
296 ch_ipsec_setkey(x, sa_entry);
297 x->xso.offload_handle = (unsigned long)sa_entry;
298 try_module_get(THIS_MODULE);
299out:
300 return res;
301}
302
303static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
304{
305
306 if (!x->xso.offload_handle)
307 return;
308}
309
310static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
311{
312 struct ipsec_sa_entry *sa_entry;
313
314 if (!x->xso.offload_handle)
315 return;
316
317 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
318 kfree(sa_entry);
319 module_put(THIS_MODULE);
320}
321
322static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
323{
324 if (x->props.family == AF_INET) {
325
326 if (ip_hdr(skb)->ihl > 5)
327 return false;
328 } else {
329
330 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
331 return false;
332 }
333 return true;
334}
335
336static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
337{
338
339 if (!x->xso.offload_handle)
340 return;
341}
342
343static int is_eth_imm(const struct sk_buff *skb,
344 struct ipsec_sa_entry *sa_entry)
345{
346 unsigned int kctx_len;
347 int hdrlen;
348
349 kctx_len = sa_entry->kctx_len;
350 hdrlen = sizeof(struct fw_ulptx_wr) +
351 sizeof(struct chcr_ipsec_req) + kctx_len;
352
353 hdrlen += sizeof(struct cpl_tx_pkt);
354 if (sa_entry->esn)
355 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
356 << 4);
357 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
358 return hdrlen;
359 return 0;
360}
361
362static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
363 struct ipsec_sa_entry *sa_entry,
364 bool *immediate)
365{
366 unsigned int kctx_len;
367 unsigned int flits;
368 int aadivlen;
369 int hdrlen;
370
371 kctx_len = sa_entry->kctx_len;
372 hdrlen = is_eth_imm(skb, sa_entry);
373 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
374 16) : 0;
375 aadivlen <<= 4;
376
377
378
379
380
381
382 if (hdrlen) {
383 *immediate = true;
384 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
385 }
386
387 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
388
389
390
391
392
393
394
395
396
397 flits += (sizeof(struct fw_ulptx_wr) +
398 sizeof(struct chcr_ipsec_req) +
399 kctx_len +
400 sizeof(struct cpl_tx_pkt_core) +
401 aadivlen) / sizeof(__be64);
402 return flits;
403}
404
405static void *copy_esn_pktxt(struct sk_buff *skb,
406 struct net_device *dev,
407 void *pos,
408 struct ipsec_sa_entry *sa_entry)
409{
410 struct chcr_ipsec_aadiv *aadiv;
411 struct ulptx_idata *sc_imm;
412 struct ip_esp_hdr *esphdr;
413 struct xfrm_offload *xo;
414 struct sge_eth_txq *q;
415 struct adapter *adap;
416 struct port_info *pi;
417 __be64 seqno;
418 u32 qidx;
419 u32 seqlo;
420 u8 *iv;
421 int eoq;
422 int len;
423
424 pi = netdev_priv(dev);
425 adap = pi->adapter;
426 qidx = skb->queue_mapping;
427 q = &adap->sge.ethtxq[qidx + pi->first_qset];
428
429
430 eoq = (void *)q->q.stat - pos;
431 if (!eoq)
432 pos = q->q.desc;
433
434 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
435 memset(pos, 0, len);
436 aadiv = (struct chcr_ipsec_aadiv *)pos;
437 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
438 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
439 xo = xfrm_offload(skb);
440
441 aadiv->spi = (esphdr->spi);
442 seqlo = ntohl(esphdr->seq_no);
443 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
444 memcpy(aadiv->seq_no, &seqno, 8);
445 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
446 memcpy(aadiv->iv, iv, 8);
447
448 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
449 sc_imm = (struct ulptx_idata *)(pos +
450 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
451 sizeof(__be64)) << 3));
452 sc_imm->cmd_more = FILL_CMD_MORE(0);
453 sc_imm->len = cpu_to_be32(skb->len);
454 }
455 pos += len;
456 return pos;
457}
458
459static void *copy_cpltx_pktxt(struct sk_buff *skb,
460 struct net_device *dev,
461 void *pos,
462 struct ipsec_sa_entry *sa_entry)
463{
464 struct cpl_tx_pkt_core *cpl;
465 struct sge_eth_txq *q;
466 struct adapter *adap;
467 struct port_info *pi;
468 u32 ctrl0, qidx;
469 u64 cntrl = 0;
470 int left;
471
472 pi = netdev_priv(dev);
473 adap = pi->adapter;
474 qidx = skb->queue_mapping;
475 q = &adap->sge.ethtxq[qidx + pi->first_qset];
476
477 left = (void *)q->q.stat - pos;
478 if (!left)
479 pos = q->q.desc;
480
481 cpl = (struct cpl_tx_pkt_core *)pos;
482
483 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
484 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
485 TXPKT_PF_V(adap->pf);
486 if (skb_vlan_tag_present(skb)) {
487 q->vlan_ins++;
488 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
489 }
490
491 cpl->ctrl0 = htonl(ctrl0);
492 cpl->pack = htons(0);
493 cpl->len = htons(skb->len);
494 cpl->ctrl1 = cpu_to_be64(cntrl);
495
496 pos += sizeof(struct cpl_tx_pkt_core);
497
498 if (sa_entry->esn)
499 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
500 return pos;
501}
502
503static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
504 struct net_device *dev,
505 void *pos,
506 struct ipsec_sa_entry *sa_entry)
507{
508 struct _key_ctx *key_ctx;
509 int left, eoq, key_len;
510 struct sge_eth_txq *q;
511 struct adapter *adap;
512 struct port_info *pi;
513 unsigned int qidx;
514
515 pi = netdev_priv(dev);
516 adap = pi->adapter;
517 qidx = skb->queue_mapping;
518 q = &adap->sge.ethtxq[qidx + pi->first_qset];
519 key_len = sa_entry->kctx_len;
520
521
522 eoq = (void *)q->q.stat - pos;
523 left = eoq;
524 if (!eoq) {
525 pos = q->q.desc;
526 left = 64 * q->q.size;
527 }
528
529
530 key_ctx = (struct _key_ctx *)pos;
531 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
532 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
533 pos += sizeof(struct _key_ctx);
534 left -= sizeof(struct _key_ctx);
535
536 if (likely(key_len <= left)) {
537 memcpy(key_ctx->key, sa_entry->key, key_len);
538 pos += key_len;
539 } else {
540 memcpy(pos, sa_entry->key, left);
541 memcpy(q->q.desc, sa_entry->key + left,
542 key_len - left);
543 pos = (u8 *)q->q.desc + (key_len - left);
544 }
545
546 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
547
548 return pos;
549}
550
551static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
552 struct net_device *dev,
553 void *pos,
554 int credits,
555 struct ipsec_sa_entry *sa_entry)
556{
557 struct port_info *pi = netdev_priv(dev);
558 struct adapter *adap = pi->adapter;
559 unsigned int ivsize = GCM_ESP_IV_SIZE;
560 struct chcr_ipsec_wr *wr;
561 bool immediate = false;
562 u16 immdatalen = 0;
563 unsigned int flits;
564 u32 ivinoffset;
565 u32 aadstart;
566 u32 aadstop;
567 u32 ciphstart;
568 u16 sc_more = 0;
569 u32 ivdrop = 0;
570 u32 esnlen = 0;
571 u32 wr_mid;
572 u16 ndesc;
573 int qidx = skb_get_queue_mapping(skb);
574 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
575 unsigned int kctx_len = sa_entry->kctx_len;
576 int qid = q->q.cntxt_id;
577
578 atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
579
580 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
581 ndesc = DIV_ROUND_UP(flits, 2);
582 if (sa_entry->esn)
583 ivdrop = 1;
584
585 if (immediate)
586 immdatalen = skb->len;
587
588 if (sa_entry->esn) {
589 esnlen = sizeof(struct chcr_ipsec_aadiv);
590 if (!skb_is_nonlinear(skb))
591 sc_more = 1;
592 }
593
594
595 wr = (struct chcr_ipsec_wr *)pos;
596 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
597 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
598
599 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
600 netif_tx_stop_queue(q->txq);
601 q->q.stops++;
602 if (!q->dbqt)
603 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
604 }
605 wr_mid |= FW_ULPTX_WR_DATA_F;
606 wr->wreq.flowid_len16 = htonl(wr_mid);
607
608
609 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
610 wr->req.ulptx.len = htonl(ndesc - 1);
611
612
613 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
614 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
615 sizeof(wr->req.key_ctx) +
616 kctx_len +
617 sizeof(struct cpl_tx_pkt_core) +
618 esnlen +
619 (esnlen ? 0 : immdatalen));
620
621
622 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
623 (skb_transport_offset(skb) +
624 sizeof(struct ip_esp_hdr) + 1);
625 wr->req.sec_cpl.op_ivinsrtofst = htonl(
626 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
627 CPL_TX_SEC_PDU_CPLLEN_V(2) |
628 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
629 CPL_TX_SEC_PDU_IVINSRTOFST_V(
630 ivinoffset));
631
632 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
633 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
634 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
635 (skb_transport_offset(skb) +
636 sizeof(struct ip_esp_hdr));
637 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
638 GCM_ESP_IV_SIZE + 1;
639 ciphstart += sa_entry->esn ? esnlen : 0;
640
641 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
642 aadstart,
643 aadstop,
644 ciphstart, 0);
645
646 wr->req.sec_cpl.cipherstop_lo_authinsert =
647 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
648 sa_entry->authsize,
649 sa_entry->authsize);
650 wr->req.sec_cpl.seqno_numivs =
651 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
652 CHCR_SCMD_CIPHER_MODE_AES_GCM,
653 CHCR_SCMD_AUTH_MODE_GHASH,
654 sa_entry->hmac_ctrl,
655 ivsize >> 1);
656 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
657 0, ivdrop, 0);
658
659 pos += sizeof(struct fw_ulptx_wr) +
660 sizeof(struct ulp_txpkt) +
661 sizeof(struct ulptx_idata) +
662 sizeof(struct cpl_tx_sec_pdu);
663
664 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
665
666 return pos;
667}
668
669
670
671
672
673
674
675
676static unsigned int flits_to_desc(unsigned int n)
677{
678 WARN_ON(n > SGE_MAX_WR_LEN / 8);
679 return DIV_ROUND_UP(n, 8);
680}
681
682static unsigned int txq_avail(const struct sge_txq *q)
683{
684 return q->size - 1 - q->in_use;
685}
686
687static void eth_txq_stop(struct sge_eth_txq *q)
688{
689 netif_tx_stop_queue(q->txq);
690 q->q.stops++;
691}
692
693static void txq_advance(struct sge_txq *q, unsigned int n)
694{
695 q->in_use += n;
696 q->pidx += n;
697 if (q->pidx >= q->size)
698 q->pidx -= q->size;
699}
700
701
702
703
704int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
705{
706 struct xfrm_state *x = xfrm_input_state(skb);
707 unsigned int last_desc, ndesc, flits = 0;
708 struct ipsec_sa_entry *sa_entry;
709 u64 *pos, *end, *before, *sgl;
710 struct tx_sw_desc *sgl_sdesc;
711 int qidx, left, credits;
712 bool immediate = false;
713 struct sge_eth_txq *q;
714 struct adapter *adap;
715 struct port_info *pi;
716 struct sec_path *sp;
717
718 if (!x->xso.offload_handle)
719 return NETDEV_TX_BUSY;
720
721 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
722
723 sp = skb_sec_path(skb);
724 if (sp->len != 1) {
725out_free: dev_kfree_skb_any(skb);
726 return NETDEV_TX_OK;
727 }
728
729 pi = netdev_priv(dev);
730 adap = pi->adapter;
731 qidx = skb->queue_mapping;
732 q = &adap->sge.ethtxq[qidx + pi->first_qset];
733
734 cxgb4_reclaim_completed_tx(adap, &q->q, true);
735
736 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
737 ndesc = flits_to_desc(flits);
738 credits = txq_avail(&q->q) - ndesc;
739
740 if (unlikely(credits < 0)) {
741 eth_txq_stop(q);
742 dev_err(adap->pdev_dev,
743 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
744 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
745 flits);
746 return NETDEV_TX_BUSY;
747 }
748
749 last_desc = q->q.pidx + ndesc - 1;
750 if (last_desc >= q->q.size)
751 last_desc -= q->q.size;
752 sgl_sdesc = &q->q.sdesc[last_desc];
753
754 if (!immediate &&
755 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
756 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
757 q->mapping_err++;
758 goto out_free;
759 }
760
761 pos = (u64 *)&q->q.desc[q->q.pidx];
762 before = (u64 *)pos;
763 end = (u64 *)pos + flits;
764
765 pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
766 credits, sa_entry);
767 if (before > (u64 *)pos) {
768 left = (u8 *)end - (u8 *)q->q.stat;
769 end = (void *)q->q.desc + left;
770 }
771 if (pos == (u64 *)q->q.stat) {
772 left = (u8 *)end - (u8 *)q->q.stat;
773 end = (void *)q->q.desc + left;
774 pos = (void *)q->q.desc;
775 }
776
777 sgl = (void *)pos;
778 if (immediate) {
779 cxgb4_inline_tx_skb(skb, &q->q, sgl);
780 dev_consume_skb_any(skb);
781 } else {
782 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
783 0, sgl_sdesc->addr);
784 skb_orphan(skb);
785 sgl_sdesc->skb = skb;
786 }
787 txq_advance(&q->q, ndesc);
788
789 cxgb4_ring_tx_db(adap, &q->q, ndesc);
790 return NETDEV_TX_OK;
791}
792
793static int __init ch_ipsec_init(void)
794{
795 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
796
797 return 0;
798}
799
800static void __exit ch_ipsec_exit(void)
801{
802 struct ipsec_uld_ctx *u_ctx, *tmp;
803 struct adapter *adap;
804
805 mutex_lock(&dev_mutex);
806 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
807 adap = pci_get_drvdata(u_ctx->lldi.pdev);
808 atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
809 list_del(&u_ctx->entry);
810 kfree(u_ctx);
811 }
812 mutex_unlock(&dev_mutex);
813 cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
814}
815
816module_init(ch_ipsec_init);
817module_exit(ch_ipsec_exit);
818
819MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
820MODULE_LICENSE("GPL");
821MODULE_AUTHOR("Chelsio Communications");
822MODULE_VERSION(CHIPSEC_DRV_VERSION);
823
824