1
2
3
4
5#ifndef __CN9K_IPSEC_LA_OPS_H__
6#define __CN9K_IPSEC_LA_OPS_H__
7
8#include <rte_crypto_sym.h>
9#include <rte_esp.h>
10#include <rte_security.h>
11
12#include "cn9k_ipsec.h"
13#include "cnxk_security_ar.h"
14
15static __rte_always_inline int32_t
16ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
17{
18 uint32_t enc_payload_len;
19
20 enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
21 sa->rlens.roundup_byte);
22
23 return sa->custom_hdr_len + sa->rlens.partial_len + enc_payload_len;
24}
25
26static __rte_always_inline int
27process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
28 struct cpt_inst_s *inst)
29{
30 const unsigned int hdr_len = sa->custom_hdr_len;
31 struct rte_crypto_sym_op *sym_op = cop->sym;
32 struct rte_mbuf *m_src = sym_op->m_src;
33 uint32_t dlen, rlen, pkt_len, seq_lo;
34 uint16_t data_off = m_src->data_off;
35 struct roc_ie_on_outb_hdr *hdr;
36 int32_t extend_tail;
37 uint64_t esn;
38
39 pkt_len = rte_pktmbuf_pkt_len(m_src);
40 dlen = pkt_len + hdr_len;
41 rlen = ipsec_po_out_rlen_get(sa, pkt_len);
42
43 extend_tail = rlen - dlen;
44 if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
45 plt_dp_err("Not enough tail room (required: %d, available: %d)",
46 extend_tail, rte_pktmbuf_tailroom(m_src));
47 return -ENOMEM;
48 }
49
50 if (unlikely(hdr_len > data_off)) {
51 plt_dp_err("Not enough head room (required: %d, available: %d)",
52 hdr_len, rte_pktmbuf_headroom(m_src));
53 return -ENOMEM;
54 }
55
56 pkt_len += extend_tail;
57
58 m_src->data_len = pkt_len;
59 m_src->pkt_len = pkt_len;
60
61 hdr = PLT_PTR_ADD(m_src->buf_addr, data_off - hdr_len);
62
63#ifdef LA_IPSEC_DEBUG
64 if (sa->inst.w4 & ROC_IE_ON_PER_PKT_IV) {
65 memcpy(&hdr->iv[0],
66 rte_crypto_op_ctod_offset(cop, uint8_t *,
67 sa->cipher_iv_off),
68 sa->cipher_iv_len);
69 }
70#endif
71
72 esn = ++sa->esn;
73
74
75 hdr->esn = rte_cpu_to_be_32(esn >> 32);
76
77
78 seq_lo = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
79 hdr->seq = seq_lo;
80
81
82 hdr->ip_id = seq_lo;
83
84
85 inst->w4.u64 = sa->inst.w4 | dlen;
86 inst->dptr = PLT_U64_CAST(hdr);
87 inst->rptr = PLT_U64_CAST(hdr);
88 inst->w7.u64 = sa->inst.w7;
89
90 return 0;
91}
92
93static __rte_always_inline void
94process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
95 struct cpt_inst_s *inst)
96{
97 struct rte_crypto_sym_op *sym_op = cop->sym;
98 struct rte_mbuf *m_src = sym_op->m_src;
99
100
101 inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
102 inst->dptr = inst->rptr = rte_pktmbuf_iova(m_src);
103 inst->w7.u64 = sa->inst.w7;
104}
105#endif
106