1
2
3
4#include "ixgbevf.h"
5#include <net/xfrm.h>
6#include <crypto/aead.h>
7
8#define IXGBE_IPSEC_KEY_BITS 160
9static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
10
11
12
13
14
15
16
17
18static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
19 struct xfrm_state *xs)
20{
21 u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
22 struct ixgbe_hw *hw = &adapter->hw;
23 struct sa_mbx_msg *sam;
24 int ret;
25
26
27 sam = (struct sa_mbx_msg *)(&msgbuf[1]);
28 sam->flags = xs->xso.flags;
29 sam->spi = xs->id.spi;
30 sam->proto = xs->id.proto;
31 sam->family = xs->props.family;
32
33 if (xs->props.family == AF_INET6)
34 memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
35 else
36 memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
37 memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
38
39 msgbuf[0] = IXGBE_VF_IPSEC_ADD;
40
41 spin_lock_bh(&adapter->mbx_lock);
42
43 ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
44 if (ret)
45 goto out;
46
47 ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
48 if (ret)
49 goto out;
50
51 ret = (int)msgbuf[1];
52 if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
53 ret = -1;
54
55out:
56 spin_unlock_bh(&adapter->mbx_lock);
57
58 return ret;
59}
60
61
62
63
64
65
66
67
68static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
69{
70 struct ixgbe_hw *hw = &adapter->hw;
71 u32 msgbuf[2];
72 int err;
73
74 memset(msgbuf, 0, sizeof(msgbuf));
75 msgbuf[0] = IXGBE_VF_IPSEC_DEL;
76 msgbuf[1] = (u32)pfsa;
77
78 spin_lock_bh(&adapter->mbx_lock);
79
80 err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
81 if (err)
82 goto out;
83
84 err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
85 if (err)
86 goto out;
87
88out:
89 spin_unlock_bh(&adapter->mbx_lock);
90 return err;
91}
92
93
94
95
96
97
98
99
100
101void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
102{
103 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
104 struct net_device *netdev = adapter->netdev;
105 int i;
106
107 if (!(adapter->netdev->features & NETIF_F_HW_ESP))
108 return;
109
110
111 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
112 struct rx_sa *r = &ipsec->rx_tbl[i];
113 struct tx_sa *t = &ipsec->tx_tbl[i];
114 int ret;
115
116 if (r->used) {
117 ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
118 if (ret < 0)
119 netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
120 i, ret);
121 }
122
123 if (t->used) {
124 ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
125 if (ret < 0)
126 netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
127 i, ret);
128 }
129 }
130}
131
132
133
134
135
136
137
138
139static
140int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
141{
142 u32 i;
143
144 if (rxtable) {
145 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
146 return -ENOSPC;
147
148
149 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
150 if (!ipsec->rx_tbl[i].used)
151 return i;
152 }
153 } else {
154 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
155 return -ENOSPC;
156
157
158 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
159 if (!ipsec->tx_tbl[i].used)
160 return i;
161 }
162 }
163
164 return -ENOSPC;
165}
166
167
168
169
170
171
172
173
174
175
176
177static
178struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
179 __be32 *daddr, u8 proto,
180 __be32 spi, bool ip4)
181{
182 struct xfrm_state *ret = NULL;
183 struct rx_sa *rsa;
184
185 rcu_read_lock();
186 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
187 (__force u32)spi) {
188 if (spi == rsa->xs->id.spi &&
189 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
190 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
191 sizeof(rsa->xs->id.daddr.a6)))) &&
192 proto == rsa->xs->id.proto) {
193 ret = rsa->xs;
194 xfrm_state_hold(ret);
195 break;
196 }
197 }
198 rcu_read_unlock();
199 return ret;
200}
201
202
203
204
205
206
207
208
209
210
211static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
212 u32 *mykey, u32 *mysalt)
213{
214 struct net_device *dev = xs->xso.dev;
215 unsigned char *key_data;
216 char *alg_name = NULL;
217 int key_len;
218
219 if (!xs->aead) {
220 netdev_err(dev, "Unsupported IPsec algorithm\n");
221 return -EINVAL;
222 }
223
224 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
225 netdev_err(dev, "IPsec offload requires %d bit authentication\n",
226 IXGBE_IPSEC_AUTH_BITS);
227 return -EINVAL;
228 }
229
230 key_data = &xs->aead->alg_key[0];
231 key_len = xs->aead->alg_key_len;
232 alg_name = xs->aead->alg_name;
233
234 if (strcmp(alg_name, aes_gcm_name)) {
235 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
236 aes_gcm_name);
237 return -EINVAL;
238 }
239
240
241
242
243
244 if (key_len > IXGBE_IPSEC_KEY_BITS) {
245 *mysalt = ((u32 *)key_data)[4];
246 } else if (key_len == IXGBE_IPSEC_KEY_BITS) {
247 *mysalt = 0;
248 } else {
249 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
250 return -EINVAL;
251 }
252 memcpy(mykey, key_data, 16);
253
254 return 0;
255}
256
257
258
259
260
261static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
262{
263 struct net_device *dev = xs->xso.dev;
264 struct ixgbevf_adapter *adapter = netdev_priv(dev);
265 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
266 u16 sa_idx;
267 int ret;
268
269 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
270 netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
271 xs->id.proto);
272 return -EINVAL;
273 }
274
275 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
276 struct rx_sa rsa;
277
278 if (xs->calg) {
279 netdev_err(dev, "Compression offload not supported\n");
280 return -EINVAL;
281 }
282
283
284 ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
285 if (ret < 0) {
286 netdev_err(dev, "No space for SA in Rx table!\n");
287 return ret;
288 }
289 sa_idx = (u16)ret;
290
291 memset(&rsa, 0, sizeof(rsa));
292 rsa.used = true;
293 rsa.xs = xs;
294
295 if (rsa.xs->id.proto & IPPROTO_ESP)
296 rsa.decrypt = xs->ealg || xs->aead;
297
298
299 ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
300 if (ret) {
301 netdev_err(dev, "Failed to get key data for Rx SA table\n");
302 return ret;
303 }
304
305
306 if (xs->props.family == AF_INET6)
307 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
308 else
309 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
310
311 rsa.mode = IXGBE_RXMOD_VALID;
312 if (rsa.xs->id.proto & IPPROTO_ESP)
313 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
314 if (rsa.decrypt)
315 rsa.mode |= IXGBE_RXMOD_DECRYPT;
316 if (rsa.xs->props.family == AF_INET6)
317 rsa.mode |= IXGBE_RXMOD_IPV6;
318
319 ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
320 if (ret < 0)
321 return ret;
322 rsa.pfsa = ret;
323
324
325 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
326
327 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
328
329 ipsec->num_rx_sa++;
330
331
332 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
333 (__force u32)rsa.xs->id.spi);
334 } else {
335 struct tx_sa tsa;
336
337
338 ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
339 if (ret < 0) {
340 netdev_err(dev, "No space for SA in Tx table\n");
341 return ret;
342 }
343 sa_idx = (u16)ret;
344
345 memset(&tsa, 0, sizeof(tsa));
346 tsa.used = true;
347 tsa.xs = xs;
348
349 if (xs->id.proto & IPPROTO_ESP)
350 tsa.encrypt = xs->ealg || xs->aead;
351
352 ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
353 if (ret) {
354 netdev_err(dev, "Failed to get key data for Tx SA table\n");
355 memset(&tsa, 0, sizeof(tsa));
356 return ret;
357 }
358
359 ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
360 if (ret < 0)
361 return ret;
362 tsa.pfsa = ret;
363
364
365 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
366
367 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
368
369 ipsec->num_tx_sa++;
370 }
371
372 return 0;
373}
374
375
376
377
378
379static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
380{
381 struct net_device *dev = xs->xso.dev;
382 struct ixgbevf_adapter *adapter = netdev_priv(dev);
383 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
384 u16 sa_idx;
385
386 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
387 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
388
389 if (!ipsec->rx_tbl[sa_idx].used) {
390 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
391 sa_idx, xs->xso.offload_handle);
392 return;
393 }
394
395 ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
396 hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
397 memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
398 ipsec->num_rx_sa--;
399 } else {
400 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
401
402 if (!ipsec->tx_tbl[sa_idx].used) {
403 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
404 sa_idx, xs->xso.offload_handle);
405 return;
406 }
407
408 ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
409 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
410 ipsec->num_tx_sa--;
411 }
412}
413
414
415
416
417
418
419static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
420{
421 if (xs->props.family == AF_INET) {
422
423 if (ip_hdr(skb)->ihl != 5)
424 return false;
425 } else {
426
427 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
428 return false;
429 }
430
431 return true;
432}
433
434static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
435 .xdo_dev_state_add = ixgbevf_ipsec_add_sa,
436 .xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
437 .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
438};
439
440
441
442
443
444
445
446int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
447 struct ixgbevf_tx_buffer *first,
448 struct ixgbevf_ipsec_tx_data *itd)
449{
450 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
451 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
452 struct xfrm_state *xs;
453 struct sec_path *sp;
454 struct tx_sa *tsa;
455 u16 sa_idx;
456
457 sp = skb_sec_path(first->skb);
458 if (unlikely(!sp->len)) {
459 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
460 __func__, sp->len);
461 return 0;
462 }
463
464 xs = xfrm_input_state(first->skb);
465 if (unlikely(!xs)) {
466 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
467 __func__, xs);
468 return 0;
469 }
470
471 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
472 if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
473 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
474 __func__, sa_idx, xs->xso.offload_handle);
475 return 0;
476 }
477
478 tsa = &ipsec->tx_tbl[sa_idx];
479 if (unlikely(!tsa->used)) {
480 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
481 __func__, sa_idx);
482 return 0;
483 }
484
485 itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
486
487 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
488
489 if (xs->id.proto == IPPROTO_ESP) {
490 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
491 IXGBE_ADVTXD_TUCMD_L4T_TCP;
492 if (first->protocol == htons(ETH_P_IP))
493 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
494
495
496
497
498
499
500
501
502
503 if (!skb_is_gso(first->skb)) {
504
505
506
507
508
509
510
511 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
512 struct sk_buff *skb = first->skb;
513 u8 padlen;
514 int ret;
515
516 ret = skb_copy_bits(skb, skb->len - (authlen + 2),
517 &padlen, 1);
518 if (unlikely(ret))
519 return 0;
520 itd->trailer_len = authlen + 2 + padlen;
521 }
522 }
523 if (tsa->encrypt)
524 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
525
526 return 1;
527}
528
529
530
531
532
533
534
535
536
537
538void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
539 union ixgbe_adv_rx_desc *rx_desc,
540 struct sk_buff *skb)
541{
542 struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
543 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
544 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
545 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
546 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
547 struct xfrm_offload *xo = NULL;
548 struct xfrm_state *xs = NULL;
549 struct ipv6hdr *ip6 = NULL;
550 struct iphdr *ip4 = NULL;
551 struct sec_path *sp;
552 void *daddr;
553 __be32 spi;
554 u8 *c_hdr;
555 u8 proto;
556
557
558
559
560
561
562 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
563 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
564 daddr = &ip4->daddr;
565 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
566 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
567 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
568 daddr = &ip6->daddr;
569 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
570 } else {
571 return;
572 }
573
574 switch (pkt_info & ipsec_pkt_types) {
575 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
576 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
577 proto = IPPROTO_AH;
578 break;
579 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
580 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
581 proto = IPPROTO_ESP;
582 break;
583 default:
584 return;
585 }
586
587 xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
588 if (unlikely(!xs))
589 return;
590
591 sp = secpath_set(skb);
592 if (unlikely(!sp))
593 return;
594
595 sp->xvec[sp->len++] = xs;
596 sp->olen++;
597 xo = xfrm_offload(skb);
598 xo->flags = CRYPTO_DONE;
599 xo->status = CRYPTO_SUCCESS;
600
601 adapter->rx_ipsec++;
602}
603
604
605
606
607
608void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
609{
610 struct ixgbevf_ipsec *ipsec;
611 size_t size;
612
613 switch (adapter->hw.api_version) {
614 case ixgbe_mbox_api_14:
615 break;
616 default:
617 return;
618 }
619
620 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
621 if (!ipsec)
622 goto err1;
623 hash_init(ipsec->rx_sa_list);
624
625 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
626 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
627 if (!ipsec->rx_tbl)
628 goto err2;
629
630 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
631 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
632 if (!ipsec->tx_tbl)
633 goto err2;
634
635 ipsec->num_rx_sa = 0;
636 ipsec->num_tx_sa = 0;
637
638 adapter->ipsec = ipsec;
639
640 adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
641
642#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
643 NETIF_F_HW_ESP_TX_CSUM | \
644 NETIF_F_GSO_ESP)
645
646 adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
647 adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
648
649 return;
650
651err2:
652 kfree(ipsec->rx_tbl);
653 kfree(ipsec->tx_tbl);
654 kfree(ipsec);
655err1:
656 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
657}
658
659
660
661
662
663void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
664{
665 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
666
667 adapter->ipsec = NULL;
668 if (ipsec) {
669 kfree(ipsec->rx_tbl);
670 kfree(ipsec->tx_tbl);
671 kfree(ipsec);
672 }
673}
674