1
2
3
4
5
6
7
8
9#ifndef __CHTLS_CM_H__
10#define __CHTLS_CM_H__
11
12
13
14
15
16#define TCB_ULP_TYPE_W 0
17#define TCB_ULP_TYPE_S 0
18#define TCB_ULP_TYPE_M 0xfULL
19#define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
20
21
22#define TCB_ULP_RAW_W 0
23#define TCB_ULP_RAW_S 4
24#define TCB_ULP_RAW_M 0xffULL
25#define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
26
27#define TF_TLS_KEY_SIZE_S 7
28#define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
29
30#define TF_TLS_CONTROL_S 2
31#define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
32
33#define TF_TLS_ACTIVE_S 1
34#define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
35
36#define TF_TLS_ENABLE_S 0
37#define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
38
39#define TF_RX_QUIESCE_S 15
40#define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
41
42
43
44
45
46#define MAX_RCV_WND ((1U << 27) - 1)
47#define MAX_MSS 65536
48
49
50
51
52
53#define MIN_RCV_WND (24 * 1024U)
54#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
55
56
57#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
58
59
60#define TX_HEADER_LEN \
61 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
62#define TX_TLSHDR_LEN \
63 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
64 sizeof(struct sge_opaque_hdr))
65#define TXDATA_SKB_LEN 128
66
67enum {
68 CPL_TX_TLS_SFO_TYPE_CCS,
69 CPL_TX_TLS_SFO_TYPE_ALERT,
70 CPL_TX_TLS_SFO_TYPE_HANDSHAKE,
71 CPL_TX_TLS_SFO_TYPE_DATA,
72 CPL_TX_TLS_SFO_TYPE_HEARTBEAT,
73};
74
75enum {
76 TLS_HDR_TYPE_CCS = 20,
77 TLS_HDR_TYPE_ALERT,
78 TLS_HDR_TYPE_HANDSHAKE,
79 TLS_HDR_TYPE_RECORD,
80 TLS_HDR_TYPE_HEARTBEAT,
81};
82
83typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
84extern struct request_sock_ops chtls_rsk_ops;
85
86struct deferred_skb_cb {
87 defer_handler_t handler;
88 struct chtls_dev *dev;
89};
90
91#define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
92#define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
93#define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
94#define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
95
96#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
97#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
98#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
99#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
100#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
101#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
102#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
103#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
104
105
106#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
107#define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
108
109void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
110 defer_handler_t handler);
111
112
113
114
115static inline unsigned int sk_in_state(const struct sock *sk,
116 unsigned int states)
117{
118 return states & (1 << sk->sk_state);
119}
120
121static void chtls_rsk_destructor(struct request_sock *req)
122{
123
124}
125
126static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot,
127 struct request_sock_ops *chtls_tcp_ops,
128 struct proto *tcp_prot, int family)
129{
130 memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops));
131 chtls_tcp_ops->family = family;
132 chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock);
133 chtls_tcp_ops->destructor = chtls_rsk_destructor;
134 chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
135 chtls_tcp_prot->rsk_prot = chtls_tcp_ops;
136}
137
138static inline void chtls_reqsk_free(struct request_sock *req)
139{
140 if (req->rsk_listener)
141 sock_put(req->rsk_listener);
142 kmem_cache_free(req->rsk_ops->slab, req);
143}
144
145#define DECLARE_TASK_FUNC(task, task_param) \
146 static void task(struct work_struct *task_param)
147
148static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
149{
150 struct socket_wq *wq;
151
152 rcu_read_lock();
153 wq = rcu_dereference(sk->sk_wq);
154 if (skwq_has_sleeper(wq)) {
155 if (interruptable)
156 wake_up_interruptible(sk_sleep(sk));
157 else
158 wake_up_all(sk_sleep(sk));
159 }
160 rcu_read_unlock();
161}
162
163static inline void chtls_set_req_port(struct request_sock *oreq,
164 __be16 source, __be16 dest)
165{
166 inet_rsk(oreq)->ir_rmt_port = source;
167 inet_rsk(oreq)->ir_num = ntohs(dest);
168}
169
170static inline void chtls_set_req_addr(struct request_sock *oreq,
171 __be32 local_ip, __be32 peer_ip)
172{
173 inet_rsk(oreq)->ir_loc_addr = local_ip;
174 inet_rsk(oreq)->ir_rmt_addr = peer_ip;
175}
176
177static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
178{
179 skb_dst_set(skb, NULL);
180 __skb_unlink(skb, &sk->sk_receive_queue);
181 __kfree_skb(skb);
182}
183
184static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
185{
186 skb_dst_set(skb, NULL);
187 __skb_unlink(skb, &sk->sk_receive_queue);
188 kfree_skb(skb);
189}
190
191static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
192{
193 WR_SKB_CB(skb)->next_wr = NULL;
194
195 skb_get(skb);
196
197 if (!csk->wr_skb_head)
198 csk->wr_skb_head = skb;
199 else
200 WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
201 csk->wr_skb_tail = skb;
202}
203#endif
204