1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "ch_ipsec: " fmt
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/crypto.h>
43#include <linux/skbuff.h>
44#include <linux/rtnetlink.h>
45#include <linux/highmem.h>
46#include <linux/if_vlan.h>
47#include <linux/ip.h>
48#include <linux/netdevice.h>
49#include <net/esp.h>
50#include <net/xfrm.h>
51#include <crypto/aes.h>
52#include <crypto/algapi.h>
53#include <crypto/hash.h>
54#include <crypto/sha1.h>
55#include <crypto/sha2.h>
56#include <crypto/authenc.h>
57#include <crypto/internal/aead.h>
58#include <crypto/null.h>
59#include <crypto/internal/skcipher.h>
60#include <crypto/aead.h>
61#include <crypto/scatterwalk.h>
62#include <crypto/internal/hash.h>
63
64#include "chcr_ipsec.h"
65
66
67
68
69
70#define MAX_IMM_TX_PKT_LEN 256
71#define GCM_ESP_IV_SIZE 8
72
73static LIST_HEAD(uld_ctx_list);
74static DEFINE_MUTEX(dev_mutex);
75
76static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
77static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
78static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
79static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
80static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
81static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
82static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
83static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
84
85static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
86 .xdo_dev_state_add = ch_ipsec_xfrm_add_state,
87 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
88 .xdo_dev_state_free = ch_ipsec_xfrm_free_state,
89 .xdo_dev_offload_ok = ch_ipsec_offload_ok,
90 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
91};
92
93static struct cxgb4_uld_info ch_ipsec_uld_info = {
94 .name = CHIPSEC_DRV_MODULE_NAME,
95 .nrxq = MAX_ULD_QSETS,
96
97 .rxq_size = 1024,
98 .add = ch_ipsec_uld_add,
99 .state_change = ch_ipsec_uld_state_change,
100 .tx_handler = ch_ipsec_xmit,
101 .xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
102};
103
104static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
105{
106 struct ipsec_uld_ctx *u_ctx;
107
108 pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
109 CHIPSEC_DRV_VERSION);
110 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
111 if (!u_ctx) {
112 u_ctx = ERR_PTR(-ENOMEM);
113 goto out;
114 }
115 u_ctx->lldi = *infop;
116out:
117 return u_ctx;
118}
119
120static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
121{
122 struct ipsec_uld_ctx *u_ctx = handle;
123
124 pr_debug("new_state %u\n", new_state);
125 switch (new_state) {
126 case CXGB4_STATE_UP:
127 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
128 mutex_lock(&dev_mutex);
129 list_add_tail(&u_ctx->entry, &uld_ctx_list);
130 mutex_unlock(&dev_mutex);
131 break;
132 case CXGB4_STATE_START_RECOVERY:
133 case CXGB4_STATE_DOWN:
134 case CXGB4_STATE_DETACH:
135 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
136 list_del(&u_ctx->entry);
137 break;
138 default:
139 break;
140 }
141
142 return 0;
143}
144
145static int ch_ipsec_setauthsize(struct xfrm_state *x,
146 struct ipsec_sa_entry *sa_entry)
147{
148 int hmac_ctrl;
149 int authsize = x->aead->alg_icv_len / 8;
150
151 sa_entry->authsize = authsize;
152
153 switch (authsize) {
154 case ICV_8:
155 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
156 break;
157 case ICV_12:
158 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
159 break;
160 case ICV_16:
161 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
162 break;
163 default:
164 return -EINVAL;
165 }
166 return hmac_ctrl;
167}
168
169static int ch_ipsec_setkey(struct xfrm_state *x,
170 struct ipsec_sa_entry *sa_entry)
171{
172 int keylen = (x->aead->alg_key_len + 7) / 8;
173 unsigned char *key = x->aead->alg_key;
174 int ck_size, key_ctx_size = 0;
175 unsigned char ghash_h[AEAD_H_SIZE];
176 struct crypto_aes_ctx aes;
177 int ret = 0;
178
179 if (keylen > 3) {
180 keylen -= 4;
181 memcpy(sa_entry->salt, key + keylen, 4);
182 }
183
184 if (keylen == AES_KEYSIZE_128) {
185 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
186 } else if (keylen == AES_KEYSIZE_192) {
187 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
188 } else if (keylen == AES_KEYSIZE_256) {
189 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
190 } else {
191 pr_err("GCM: Invalid key length %d\n", keylen);
192 ret = -EINVAL;
193 goto out;
194 }
195
196 memcpy(sa_entry->key, key, keylen);
197 sa_entry->enckey_len = keylen;
198 key_ctx_size = sizeof(struct _key_ctx) +
199 ((DIV_ROUND_UP(keylen, 16)) << 4) +
200 AEAD_H_SIZE;
201
202 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
203 CHCR_KEYCTX_MAC_KEY_SIZE_128,
204 0, 0,
205 key_ctx_size >> 4);
206
207
208
209
210 ret = aes_expandkey(&aes, key, keylen);
211 if (ret) {
212 sa_entry->enckey_len = 0;
213 goto out;
214 }
215 memset(ghash_h, 0, AEAD_H_SIZE);
216 aes_encrypt(&aes, ghash_h, ghash_h);
217 memzero_explicit(&aes, sizeof(aes));
218
219 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
220 16), ghash_h, AEAD_H_SIZE);
221 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
222 AEAD_H_SIZE;
223out:
224 return ret;
225}
226
227
228
229
230
231
232static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
233{
234 struct ipsec_sa_entry *sa_entry;
235 int res = 0;
236
237 if (x->props.aalgo != SADB_AALG_NONE) {
238 pr_debug("Cannot offload authenticated xfrm states\n");
239 return -EINVAL;
240 }
241 if (x->props.calgo != SADB_X_CALG_NONE) {
242 pr_debug("Cannot offload compressed xfrm states\n");
243 return -EINVAL;
244 }
245 if (x->props.family != AF_INET &&
246 x->props.family != AF_INET6) {
247 pr_debug("Only IPv4/6 xfrm state offloaded\n");
248 return -EINVAL;
249 }
250 if (x->props.mode != XFRM_MODE_TRANSPORT &&
251 x->props.mode != XFRM_MODE_TUNNEL) {
252 pr_debug("Only transport and tunnel xfrm offload\n");
253 return -EINVAL;
254 }
255 if (x->id.proto != IPPROTO_ESP) {
256 pr_debug("Only ESP xfrm state offloaded\n");
257 return -EINVAL;
258 }
259 if (x->encap) {
260 pr_debug("Encapsulated xfrm state not offloaded\n");
261 return -EINVAL;
262 }
263 if (!x->aead) {
264 pr_debug("Cannot offload xfrm states without aead\n");
265 return -EINVAL;
266 }
267 if (x->aead->alg_icv_len != 128 &&
268 x->aead->alg_icv_len != 96) {
269 pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
270 return -EINVAL;
271 }
272 if ((x->aead->alg_key_len != 128 + 32) &&
273 (x->aead->alg_key_len != 256 + 32)) {
274 pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
275 return -EINVAL;
276 }
277 if (x->tfcpad) {
278 pr_debug("Cannot offload xfrm states with tfc padding\n");
279 return -EINVAL;
280 }
281 if (!x->geniv) {
282 pr_debug("Cannot offload xfrm states without geniv\n");
283 return -EINVAL;
284 }
285 if (strcmp(x->geniv, "seqiv")) {
286 pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
287 return -EINVAL;
288 }
289
290 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
291 if (!sa_entry) {
292 res = -ENOMEM;
293 goto out;
294 }
295
296 sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
297 if (x->props.flags & XFRM_STATE_ESN)
298 sa_entry->esn = 1;
299 ch_ipsec_setkey(x, sa_entry);
300 x->xso.offload_handle = (unsigned long)sa_entry;
301 try_module_get(THIS_MODULE);
302out:
303 return res;
304}
305
306static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
307{
308
309 if (!x->xso.offload_handle)
310 return;
311}
312
313static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
314{
315 struct ipsec_sa_entry *sa_entry;
316
317 if (!x->xso.offload_handle)
318 return;
319
320 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
321 kfree(sa_entry);
322 module_put(THIS_MODULE);
323}
324
325static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
326{
327 if (x->props.family == AF_INET) {
328
329 if (ip_hdr(skb)->ihl > 5)
330 return false;
331 } else {
332
333 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
334 return false;
335 }
336 return true;
337}
338
339static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
340{
341
342 if (!x->xso.offload_handle)
343 return;
344}
345
346static int is_eth_imm(const struct sk_buff *skb,
347 struct ipsec_sa_entry *sa_entry)
348{
349 unsigned int kctx_len;
350 int hdrlen;
351
352 kctx_len = sa_entry->kctx_len;
353 hdrlen = sizeof(struct fw_ulptx_wr) +
354 sizeof(struct chcr_ipsec_req) + kctx_len;
355
356 hdrlen += sizeof(struct cpl_tx_pkt);
357 if (sa_entry->esn)
358 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
359 << 4);
360 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
361 return hdrlen;
362 return 0;
363}
364
365static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
366 struct ipsec_sa_entry *sa_entry,
367 bool *immediate)
368{
369 unsigned int kctx_len;
370 unsigned int flits;
371 int aadivlen;
372 int hdrlen;
373
374 kctx_len = sa_entry->kctx_len;
375 hdrlen = is_eth_imm(skb, sa_entry);
376 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
377 16) : 0;
378 aadivlen <<= 4;
379
380
381
382
383
384
385 if (hdrlen) {
386 *immediate = true;
387 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
388 }
389
390 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
391
392
393
394
395
396
397
398
399
400 flits += (sizeof(struct fw_ulptx_wr) +
401 sizeof(struct chcr_ipsec_req) +
402 kctx_len +
403 sizeof(struct cpl_tx_pkt_core) +
404 aadivlen) / sizeof(__be64);
405 return flits;
406}
407
408static void *copy_esn_pktxt(struct sk_buff *skb,
409 struct net_device *dev,
410 void *pos,
411 struct ipsec_sa_entry *sa_entry)
412{
413 struct chcr_ipsec_aadiv *aadiv;
414 struct ulptx_idata *sc_imm;
415 struct ip_esp_hdr *esphdr;
416 struct xfrm_offload *xo;
417 struct sge_eth_txq *q;
418 struct adapter *adap;
419 struct port_info *pi;
420 __be64 seqno;
421 u32 qidx;
422 u32 seqlo;
423 u8 *iv;
424 int eoq;
425 int len;
426
427 pi = netdev_priv(dev);
428 adap = pi->adapter;
429 qidx = skb->queue_mapping;
430 q = &adap->sge.ethtxq[qidx + pi->first_qset];
431
432
433 eoq = (void *)q->q.stat - pos;
434 if (!eoq)
435 pos = q->q.desc;
436
437 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
438 memset(pos, 0, len);
439 aadiv = (struct chcr_ipsec_aadiv *)pos;
440 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
441 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
442 xo = xfrm_offload(skb);
443
444 aadiv->spi = (esphdr->spi);
445 seqlo = ntohl(esphdr->seq_no);
446 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
447 memcpy(aadiv->seq_no, &seqno, 8);
448 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
449 memcpy(aadiv->iv, iv, 8);
450
451 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
452 sc_imm = (struct ulptx_idata *)(pos +
453 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
454 sizeof(__be64)) << 3));
455 sc_imm->cmd_more = FILL_CMD_MORE(0);
456 sc_imm->len = cpu_to_be32(skb->len);
457 }
458 pos += len;
459 return pos;
460}
461
462static void *copy_cpltx_pktxt(struct sk_buff *skb,
463 struct net_device *dev,
464 void *pos,
465 struct ipsec_sa_entry *sa_entry)
466{
467 struct cpl_tx_pkt_core *cpl;
468 struct sge_eth_txq *q;
469 struct adapter *adap;
470 struct port_info *pi;
471 u32 ctrl0, qidx;
472 u64 cntrl = 0;
473 int left;
474
475 pi = netdev_priv(dev);
476 adap = pi->adapter;
477 qidx = skb->queue_mapping;
478 q = &adap->sge.ethtxq[qidx + pi->first_qset];
479
480 left = (void *)q->q.stat - pos;
481 if (!left)
482 pos = q->q.desc;
483
484 cpl = (struct cpl_tx_pkt_core *)pos;
485
486 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
487 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
488 TXPKT_PF_V(adap->pf);
489 if (skb_vlan_tag_present(skb)) {
490 q->vlan_ins++;
491 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
492 }
493
494 cpl->ctrl0 = htonl(ctrl0);
495 cpl->pack = htons(0);
496 cpl->len = htons(skb->len);
497 cpl->ctrl1 = cpu_to_be64(cntrl);
498
499 pos += sizeof(struct cpl_tx_pkt_core);
500
501 if (sa_entry->esn)
502 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
503 return pos;
504}
505
506static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
507 struct net_device *dev,
508 void *pos,
509 struct ipsec_sa_entry *sa_entry)
510{
511 struct _key_ctx *key_ctx;
512 int left, eoq, key_len;
513 struct sge_eth_txq *q;
514 struct adapter *adap;
515 struct port_info *pi;
516 unsigned int qidx;
517
518 pi = netdev_priv(dev);
519 adap = pi->adapter;
520 qidx = skb->queue_mapping;
521 q = &adap->sge.ethtxq[qidx + pi->first_qset];
522 key_len = sa_entry->kctx_len;
523
524
525 eoq = (void *)q->q.stat - pos;
526 left = eoq;
527 if (!eoq) {
528 pos = q->q.desc;
529 left = 64 * q->q.size;
530 }
531
532
533 key_ctx = (struct _key_ctx *)pos;
534 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
535 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
536 pos += sizeof(struct _key_ctx);
537 left -= sizeof(struct _key_ctx);
538
539 if (likely(key_len <= left)) {
540 memcpy(key_ctx->key, sa_entry->key, key_len);
541 pos += key_len;
542 } else {
543 memcpy(pos, sa_entry->key, left);
544 memcpy(q->q.desc, sa_entry->key + left,
545 key_len - left);
546 pos = (u8 *)q->q.desc + (key_len - left);
547 }
548
549 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
550
551 return pos;
552}
553
554static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
555 struct net_device *dev,
556 void *pos,
557 int credits,
558 struct ipsec_sa_entry *sa_entry)
559{
560 struct port_info *pi = netdev_priv(dev);
561 struct adapter *adap = pi->adapter;
562 unsigned int ivsize = GCM_ESP_IV_SIZE;
563 struct chcr_ipsec_wr *wr;
564 bool immediate = false;
565 u16 immdatalen = 0;
566 unsigned int flits;
567 u32 ivinoffset;
568 u32 aadstart;
569 u32 aadstop;
570 u32 ciphstart;
571 u16 sc_more = 0;
572 u32 ivdrop = 0;
573 u32 esnlen = 0;
574 u32 wr_mid;
575 u16 ndesc;
576 int qidx = skb_get_queue_mapping(skb);
577 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
578 unsigned int kctx_len = sa_entry->kctx_len;
579 int qid = q->q.cntxt_id;
580
581 atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
582
583 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
584 ndesc = DIV_ROUND_UP(flits, 2);
585 if (sa_entry->esn)
586 ivdrop = 1;
587
588 if (immediate)
589 immdatalen = skb->len;
590
591 if (sa_entry->esn) {
592 esnlen = sizeof(struct chcr_ipsec_aadiv);
593 if (!skb_is_nonlinear(skb))
594 sc_more = 1;
595 }
596
597
598 wr = (struct chcr_ipsec_wr *)pos;
599 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
600 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
601
602 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
603 netif_tx_stop_queue(q->txq);
604 q->q.stops++;
605 if (!q->dbqt)
606 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
607 }
608 wr_mid |= FW_ULPTX_WR_DATA_F;
609 wr->wreq.flowid_len16 = htonl(wr_mid);
610
611
612 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
613 wr->req.ulptx.len = htonl(ndesc - 1);
614
615
616 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
617 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
618 sizeof(wr->req.key_ctx) +
619 kctx_len +
620 sizeof(struct cpl_tx_pkt_core) +
621 esnlen +
622 (esnlen ? 0 : immdatalen));
623
624
625 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
626 (skb_transport_offset(skb) +
627 sizeof(struct ip_esp_hdr) + 1);
628 wr->req.sec_cpl.op_ivinsrtofst = htonl(
629 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
630 CPL_TX_SEC_PDU_CPLLEN_V(2) |
631 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
632 CPL_TX_SEC_PDU_IVINSRTOFST_V(
633 ivinoffset));
634
635 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
636 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
637 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
638 (skb_transport_offset(skb) +
639 sizeof(struct ip_esp_hdr));
640 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
641 GCM_ESP_IV_SIZE + 1;
642 ciphstart += sa_entry->esn ? esnlen : 0;
643
644 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
645 aadstart,
646 aadstop,
647 ciphstart, 0);
648
649 wr->req.sec_cpl.cipherstop_lo_authinsert =
650 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
651 sa_entry->authsize,
652 sa_entry->authsize);
653 wr->req.sec_cpl.seqno_numivs =
654 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
655 CHCR_SCMD_CIPHER_MODE_AES_GCM,
656 CHCR_SCMD_AUTH_MODE_GHASH,
657 sa_entry->hmac_ctrl,
658 ivsize >> 1);
659 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
660 0, ivdrop, 0);
661
662 pos += sizeof(struct fw_ulptx_wr) +
663 sizeof(struct ulp_txpkt) +
664 sizeof(struct ulptx_idata) +
665 sizeof(struct cpl_tx_sec_pdu);
666
667 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
668
669 return pos;
670}
671
672
673
674
675
676
677
678
679static unsigned int flits_to_desc(unsigned int n)
680{
681 WARN_ON(n > SGE_MAX_WR_LEN / 8);
682 return DIV_ROUND_UP(n, 8);
683}
684
685static unsigned int txq_avail(const struct sge_txq *q)
686{
687 return q->size - 1 - q->in_use;
688}
689
690static void eth_txq_stop(struct sge_eth_txq *q)
691{
692 netif_tx_stop_queue(q->txq);
693 q->q.stops++;
694}
695
696static void txq_advance(struct sge_txq *q, unsigned int n)
697{
698 q->in_use += n;
699 q->pidx += n;
700 if (q->pidx >= q->size)
701 q->pidx -= q->size;
702}
703
704
705
706
707int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
708{
709 struct xfrm_state *x = xfrm_input_state(skb);
710 unsigned int last_desc, ndesc, flits = 0;
711 struct ipsec_sa_entry *sa_entry;
712 u64 *pos, *end, *before, *sgl;
713 struct tx_sw_desc *sgl_sdesc;
714 int qidx, left, credits;
715 bool immediate = false;
716 struct sge_eth_txq *q;
717 struct adapter *adap;
718 struct port_info *pi;
719 struct sec_path *sp;
720
721 if (!x->xso.offload_handle)
722 return NETDEV_TX_BUSY;
723
724 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
725
726 sp = skb_sec_path(skb);
727 if (sp->len != 1) {
728out_free: dev_kfree_skb_any(skb);
729 return NETDEV_TX_OK;
730 }
731
732 pi = netdev_priv(dev);
733 adap = pi->adapter;
734 qidx = skb->queue_mapping;
735 q = &adap->sge.ethtxq[qidx + pi->first_qset];
736
737 cxgb4_reclaim_completed_tx(adap, &q->q, true);
738
739 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
740 ndesc = flits_to_desc(flits);
741 credits = txq_avail(&q->q) - ndesc;
742
743 if (unlikely(credits < 0)) {
744 eth_txq_stop(q);
745 dev_err(adap->pdev_dev,
746 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
747 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
748 flits);
749 return NETDEV_TX_BUSY;
750 }
751
752 last_desc = q->q.pidx + ndesc - 1;
753 if (last_desc >= q->q.size)
754 last_desc -= q->q.size;
755 sgl_sdesc = &q->q.sdesc[last_desc];
756
757 if (!immediate &&
758 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
759 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
760 q->mapping_err++;
761 goto out_free;
762 }
763
764 pos = (u64 *)&q->q.desc[q->q.pidx];
765 before = (u64 *)pos;
766 end = (u64 *)pos + flits;
767
768 pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
769 credits, sa_entry);
770 if (before > (u64 *)pos) {
771 left = (u8 *)end - (u8 *)q->q.stat;
772 end = (void *)q->q.desc + left;
773 }
774 if (pos == (u64 *)q->q.stat) {
775 left = (u8 *)end - (u8 *)q->q.stat;
776 end = (void *)q->q.desc + left;
777 pos = (void *)q->q.desc;
778 }
779
780 sgl = (void *)pos;
781 if (immediate) {
782 cxgb4_inline_tx_skb(skb, &q->q, sgl);
783 dev_consume_skb_any(skb);
784 } else {
785 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
786 0, sgl_sdesc->addr);
787 skb_orphan(skb);
788 sgl_sdesc->skb = skb;
789 }
790 txq_advance(&q->q, ndesc);
791
792 cxgb4_ring_tx_db(adap, &q->q, ndesc);
793 return NETDEV_TX_OK;
794}
795
796static int __init ch_ipsec_init(void)
797{
798 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
799
800 return 0;
801}
802
803static void __exit ch_ipsec_exit(void)
804{
805 struct ipsec_uld_ctx *u_ctx, *tmp;
806 struct adapter *adap;
807
808 mutex_lock(&dev_mutex);
809 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
810 adap = pci_get_drvdata(u_ctx->lldi.pdev);
811 atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
812 list_del(&u_ctx->entry);
813 kfree(u_ctx);
814 }
815 mutex_unlock(&dev_mutex);
816 cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
817}
818
819module_init(ch_ipsec_init);
820module_exit(ch_ipsec_exit);
821
822MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
823MODULE_LICENSE("GPL");
824MODULE_AUTHOR("Chelsio Communications");
825MODULE_VERSION(CHIPSEC_DRV_VERSION);
826
827