1
2
3
4
5
6
7
8
9#ifndef __CHTLS_H__
10#define __CHTLS_H__
11
12#include <crypto/aes.h>
13#include <crypto/algapi.h>
14#include <crypto/hash.h>
15#include <crypto/sha.h>
16#include <crypto/authenc.h>
17#include <crypto/ctr.h>
18#include <crypto/gf128mul.h>
19#include <crypto/internal/aead.h>
20#include <crypto/null.h>
21#include <crypto/internal/skcipher.h>
22#include <crypto/aead.h>
23#include <crypto/scatterwalk.h>
24#include <crypto/internal/hash.h>
25#include <linux/tls.h>
26#include <net/tls.h>
27#include <net/tls_toe.h>
28
29#include "t4fw_api.h"
30#include "t4_msg.h"
31#include "cxgb4.h"
32#include "cxgb4_uld.h"
33#include "l2t.h"
34#include "chcr_algo.h"
35#include "chcr_core.h"
36#include "chcr_crypto.h"
37
38#define MAX_IVS_PAGE 256
39#define TLS_KEY_CONTEXT_SZ 64
40#define CIPHER_BLOCK_SIZE 16
41#define GCM_TAG_SIZE 16
42#define KEY_ON_MEM_SZ 16
43#define AEAD_EXPLICIT_DATA_SIZE 8
44#define TLS_HEADER_LENGTH 5
45#define SCMD_CIPH_MODE_AES_GCM 2
46
47#define TLS_MFS 16384
48
49#define RSS_HDR sizeof(struct rss_header)
50#define TLS_WR_CPL_LEN \
51 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo))
52
53enum {
54 CHTLS_KEY_CONTEXT_DSGL,
55 CHTLS_KEY_CONTEXT_IMM,
56 CHTLS_KEY_CONTEXT_DDR,
57};
58
59enum {
60 CHTLS_LISTEN_START,
61 CHTLS_LISTEN_STOP,
62};
63
64
65enum {
66 CPL_RET_BUF_DONE = 1,
67 CPL_RET_BAD_MSG = 2,
68 CPL_RET_UNKNOWN_TID = 4
69};
70
71#define LISTEN_INFO_HASH_SIZE 32
72#define RSPQ_HASH_BITS 5
73struct listen_info {
74 struct listen_info *next;
75 struct sock *sk;
76 unsigned int stid;
77};
78
79enum {
80 T4_LISTEN_START_PENDING,
81 T4_LISTEN_STARTED
82};
83
84enum csk_flags {
85 CSK_CALLBACKS_CHKD,
86 CSK_ABORT_REQ_RCVD,
87 CSK_TX_MORE_DATA,
88 CSK_TX_WAIT_IDLE,
89 CSK_ABORT_SHUTDOWN,
90 CSK_ABORT_RPL_PENDING,
91 CSK_CLOSE_CON_REQUESTED,
92 CSK_TX_DATA_SENT,
93 CSK_TX_FAILOVER,
94 CSK_UPDATE_RCV_WND,
95 CSK_RST_ABORTED,
96 CSK_TLS_HANDSHK,
97 CSK_CONN_INLINE,
98};
99
100struct listen_ctx {
101 struct sock *lsk;
102 struct chtls_dev *cdev;
103 struct sk_buff_head synq;
104 u32 state;
105};
106
107struct key_map {
108 unsigned long *addr;
109 unsigned int start;
110 unsigned int available;
111 unsigned int size;
112 spinlock_t lock;
113} __packed;
114
115struct tls_scmd {
116 u32 seqno_numivs;
117 u32 ivgen_hdrlen;
118};
119
120struct chtls_dev {
121 struct tls_device tlsdev;
122 struct list_head list;
123 struct cxgb4_lld_info *lldi;
124 struct pci_dev *pdev;
125 struct listen_info *listen_hash_tab[LISTEN_INFO_HASH_SIZE];
126 spinlock_t listen_lock;
127 struct net_device **ports;
128 struct tid_info *tids;
129 unsigned int pfvf;
130 const unsigned short *mtus;
131
132 struct idr hwtid_idr;
133 struct idr stid_idr;
134
135 spinlock_t idr_lock ____cacheline_aligned_in_smp;
136
137 struct net_device *egr_dev[NCHAN * 2];
138 struct sk_buff *rspq_skb_cache[1 << RSPQ_HASH_BITS];
139 struct sk_buff *askb;
140
141 struct sk_buff_head deferq;
142 struct work_struct deferq_task;
143
144 struct list_head list_node;
145 struct list_head rcu_node;
146 struct list_head na_node;
147 unsigned int send_page_order;
148 int max_host_sndbuf;
149 struct key_map kmap;
150};
151
152struct chtls_listen {
153 struct chtls_dev *cdev;
154 struct sock *sk;
155};
156
157struct chtls_hws {
158 struct sk_buff_head sk_recv_queue;
159 u8 txqid;
160 u8 ofld;
161 u16 type;
162 u16 rstate;
163 u16 keyrpl;
164 u16 pldlen;
165 u16 rcvpld;
166 u16 compute;
167 u16 expansion;
168 u16 keylen;
169 u16 pdus;
170 u16 adjustlen;
171 u16 ivsize;
172 u16 txleft;
173 u32 mfs;
174 s32 txkey;
175 s32 rxkey;
176 u32 fcplenmax;
177 u32 copied_seq;
178 u64 tx_seq_no;
179 struct tls_scmd scmd;
180 struct tls12_crypto_info_aes_gcm_128 crypto_info;
181};
182
183struct chtls_sock {
184 struct sock *sk;
185 struct chtls_dev *cdev;
186 struct l2t_entry *l2t_entry;
187 struct net_device *egress_dev;
188
189 struct sk_buff_head txq;
190 struct sk_buff *wr_skb_head;
191 struct sk_buff *wr_skb_tail;
192 struct sk_buff *ctrl_skb_cache;
193 struct sk_buff *txdata_skb_cache;
194 struct kref kref;
195 unsigned long flags;
196 u32 opt2;
197 u32 wr_credits;
198 u32 wr_unacked;
199 u32 wr_max_credits;
200 u32 wr_nondata;
201 u32 hwtid;
202 u32 txq_idx;
203 u32 rss_qid;
204 u32 tid;
205 u32 idr;
206 u32 mss;
207 u32 ulp_mode;
208 u32 tx_chan;
209 u32 rx_chan;
210 u32 sndbuf;
211 u32 txplen_max;
212 u32 mtu_idx;
213 u32 smac_idx;
214 u8 port_id;
215 u8 tos;
216 u16 resv2;
217 u32 delack_mode;
218 u32 delack_seq;
219
220 void *passive_reap_next;
221 struct chtls_hws tlshws;
222 struct synq {
223 struct sk_buff *next;
224 struct sk_buff *prev;
225 } synq;
226 struct listen_ctx *listen_ctx;
227};
228
229struct tls_hdr {
230 u8 type;
231 u16 version;
232 u16 length;
233} __packed;
234
235struct tlsrx_cmp_hdr {
236 u8 type;
237 u16 version;
238 u16 length;
239
240 u64 tls_seq;
241 u16 reserved1;
242 u8 res_to_mac_error;
243} __packed;
244
245
246#define TLSRX_HDR_PKT_INT_ERROR_S 4
247#define TLSRX_HDR_PKT_INT_ERROR_M 0x1
248#define TLSRX_HDR_PKT_INT_ERROR_V(x) \
249 ((x) << TLSRX_HDR_PKT_INT_ERROR_S)
250#define TLSRX_HDR_PKT_INT_ERROR_G(x) \
251 (((x) >> TLSRX_HDR_PKT_INT_ERROR_S) & TLSRX_HDR_PKT_INT_ERROR_M)
252#define TLSRX_HDR_PKT_INT_ERROR_F TLSRX_HDR_PKT_INT_ERROR_V(1U)
253
254#define TLSRX_HDR_PKT_SPP_ERROR_S 3
255#define TLSRX_HDR_PKT_SPP_ERROR_M 0x1
256#define TLSRX_HDR_PKT_SPP_ERROR_V(x) ((x) << TLSRX_HDR_PKT_SPP_ERROR)
257#define TLSRX_HDR_PKT_SPP_ERROR_G(x) \
258 (((x) >> TLSRX_HDR_PKT_SPP_ERROR_S) & TLSRX_HDR_PKT_SPP_ERROR_M)
259#define TLSRX_HDR_PKT_SPP_ERROR_F TLSRX_HDR_PKT_SPP_ERROR_V(1U)
260
261#define TLSRX_HDR_PKT_CCDX_ERROR_S 2
262#define TLSRX_HDR_PKT_CCDX_ERROR_M 0x1
263#define TLSRX_HDR_PKT_CCDX_ERROR_V(x) ((x) << TLSRX_HDR_PKT_CCDX_ERROR_S)
264#define TLSRX_HDR_PKT_CCDX_ERROR_G(x) \
265 (((x) >> TLSRX_HDR_PKT_CCDX_ERROR_S) & TLSRX_HDR_PKT_CCDX_ERROR_M)
266#define TLSRX_HDR_PKT_CCDX_ERROR_F TLSRX_HDR_PKT_CCDX_ERROR_V(1U)
267
268#define TLSRX_HDR_PKT_PAD_ERROR_S 1
269#define TLSRX_HDR_PKT_PAD_ERROR_M 0x1
270#define TLSRX_HDR_PKT_PAD_ERROR_V(x) ((x) << TLSRX_HDR_PKT_PAD_ERROR_S)
271#define TLSRX_HDR_PKT_PAD_ERROR_G(x) \
272 (((x) >> TLSRX_HDR_PKT_PAD_ERROR_S) & TLSRX_HDR_PKT_PAD_ERROR_M)
273#define TLSRX_HDR_PKT_PAD_ERROR_F TLSRX_HDR_PKT_PAD_ERROR_V(1U)
274
275#define TLSRX_HDR_PKT_MAC_ERROR_S 0
276#define TLSRX_HDR_PKT_MAC_ERROR_M 0x1
277#define TLSRX_HDR_PKT_MAC_ERROR_V(x) ((x) << TLSRX_HDR_PKT_MAC_ERROR)
278#define TLSRX_HDR_PKT_MAC_ERROR_G(x) \
279 (((x) >> S_TLSRX_HDR_PKT_MAC_ERROR_S) & TLSRX_HDR_PKT_MAC_ERROR_M)
280#define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U)
281
282#define TLSRX_HDR_PKT_ERROR_M 0x1F
283#define CONTENT_TYPE_ERROR 0x7F
284
285struct ulp_mem_rw {
286 __be32 cmd;
287 __be32 len16;
288 __be32 dlen;
289 __be32 lock_addr;
290};
291
292struct tls_key_wr {
293 __be32 op_to_compl;
294 __be32 flowid_len16;
295 __be32 ftid;
296 u8 reneg_to_write_rx;
297 u8 protocol;
298 __be16 mfs;
299};
300
301struct tls_key_req {
302 struct tls_key_wr wr;
303 struct ulp_mem_rw req;
304 struct ulptx_idata sc_imm;
305};
306
307
308
309
310struct wr_skb_cb {
311 struct l2t_skb_cb l2t;
312 struct sk_buff *next_wr;
313};
314
315
316struct blog_skb_cb {
317 void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
318 struct chtls_dev *cdev;
319};
320
321
322
323
324
325struct ulp_skb_cb {
326 struct wr_skb_cb wr;
327 u16 flags;
328 u8 psh;
329 u8 ulp_mode;
330 u32 seq;
331 union {
332 struct {
333 u8 type;
334 u8 ofld;
335 u8 iv;
336 } tls;
337 } ulp;
338};
339
340#define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
341#define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
342
343
344
345
346enum {
347 ULPCB_FLAG_NEED_HDR = 1 << 0,
348 ULPCB_FLAG_NO_APPEND = 1 << 1,
349 ULPCB_FLAG_BARRIER = 1 << 2,
350 ULPCB_FLAG_HOLD = 1 << 3,
351 ULPCB_FLAG_COMPL = 1 << 4,
352 ULPCB_FLAG_URG = 1 << 5,
353 ULPCB_FLAG_TLS_HDR = 1 << 6,
354 ULPCB_FLAG_NO_HDR = 1 << 7,
355};
356
357
358#define skb_ulp_mode(skb) (ULP_SKB_CB(skb)->ulp_mode)
359#define TCP_PAGE(sk) (sk->sk_frag.page)
360#define TCP_OFF(sk) (sk->sk_frag.offset)
361
362static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
363{
364 return container_of(tlsdev, struct chtls_dev, tlsdev);
365}
366
367static inline void csk_set_flag(struct chtls_sock *csk,
368 enum csk_flags flag)
369{
370 __set_bit(flag, &csk->flags);
371}
372
373static inline void csk_reset_flag(struct chtls_sock *csk,
374 enum csk_flags flag)
375{
376 __clear_bit(flag, &csk->flags);
377}
378
379static inline bool csk_conn_inline(const struct chtls_sock *csk)
380{
381 return test_bit(CSK_CONN_INLINE, &csk->flags);
382}
383
384static inline int csk_flag(const struct sock *sk, enum csk_flags flag)
385{
386 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
387
388 if (!csk_conn_inline(csk))
389 return 0;
390 return test_bit(flag, &csk->flags);
391}
392
393static inline int csk_flag_nochk(const struct chtls_sock *csk,
394 enum csk_flags flag)
395{
396 return test_bit(flag, &csk->flags);
397}
398
399static inline void *cplhdr(struct sk_buff *skb)
400{
401 return skb->data;
402}
403
404static inline int is_neg_adv(unsigned int status)
405{
406 return status == CPL_ERR_RTX_NEG_ADVICE ||
407 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
408 status == CPL_ERR_PERSIST_NEG_ADVICE;
409}
410
411static inline void process_cpl_msg(void (*fn)(struct sock *, struct sk_buff *),
412 struct sock *sk,
413 struct sk_buff *skb)
414{
415 skb_reset_mac_header(skb);
416 skb_reset_network_header(skb);
417 skb_reset_transport_header(skb);
418
419 bh_lock_sock(sk);
420 if (unlikely(sock_owned_by_user(sk))) {
421 BLOG_SKB_CB(skb)->backlog_rcv = fn;
422 __sk_add_backlog(sk, skb);
423 } else {
424 fn(sk, skb);
425 }
426 bh_unlock_sock(sk);
427}
428
429static inline void chtls_sock_free(struct kref *ref)
430{
431 struct chtls_sock *csk = container_of(ref, struct chtls_sock,
432 kref);
433 kfree(csk);
434}
435
436static inline void __chtls_sock_put(const char *fn, struct chtls_sock *csk)
437{
438 kref_put(&csk->kref, chtls_sock_free);
439}
440
441static inline void __chtls_sock_get(const char *fn,
442 struct chtls_sock *csk)
443{
444 kref_get(&csk->kref);
445}
446
447static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp,
448 struct sk_buff *skb, int through_l2t)
449{
450 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
451
452 if (through_l2t) {
453
454 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
455 } else {
456
457 cxgb4_ofld_send(csk->egress_dev, skb);
458 }
459}
460
461typedef int (*chtls_handler_func)(struct chtls_dev *, struct sk_buff *);
462extern chtls_handler_func chtls_handlers[NUM_CPL_CMDS];
463void chtls_install_cpl_ops(struct sock *sk);
464int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi);
465void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk);
466int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk);
467void chtls_close(struct sock *sk, long timeout);
468int chtls_disconnect(struct sock *sk, int flags);
469void chtls_shutdown(struct sock *sk, int how);
470void chtls_destroy_sock(struct sock *sk);
471int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
472int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
473 size_t len, int nonblock, int flags, int *addr_len);
474int chtls_sendpage(struct sock *sk, struct page *page,
475 int offset, size_t size, int flags);
476int send_tx_flowc_wr(struct sock *sk, int compl,
477 u32 snd_nxt, u32 rcv_nxt);
478void chtls_tcp_push(struct sock *sk, int flags);
479int chtls_push_frames(struct chtls_sock *csk, int comp);
480int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
481int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
482void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
483unsigned int keyid_to_addr(int start_addr, int keyid);
484void free_tls_keyid(struct sock *sk);
485#endif
486