1
2
3
4
5
6#ifndef __CHTLS_CM_H__
7#define __CHTLS_CM_H__
8
9
10
11
12
13#define TCB_ULP_TYPE_W 0
14#define TCB_ULP_TYPE_S 0
15#define TCB_ULP_TYPE_M 0xfULL
16#define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
17
18
19#define TCB_ULP_RAW_W 0
20#define TCB_ULP_RAW_S 4
21#define TCB_ULP_RAW_M 0xffULL
22#define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
23
24#define TF_TLS_KEY_SIZE_S 7
25#define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
26
27#define TF_TLS_CONTROL_S 2
28#define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
29
30#define TF_TLS_ACTIVE_S 1
31#define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
32
33#define TF_TLS_ENABLE_S 0
34#define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
35
36#define TF_RX_QUIESCE_S 15
37#define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
38
39
40
41
42
43#define MAX_RCV_WND ((1U << 27) - 1)
44#define MAX_MSS 65536
45
46
47
48
49
50#define MIN_RCV_WND (24 * 1024U)
51#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
52
53
54#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
55
56
57#define TX_HEADER_LEN \
58 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
59#define TX_TLSHDR_LEN \
60 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
61 sizeof(struct sge_opaque_hdr))
62#define TXDATA_SKB_LEN 128
63
64enum {
65 CPL_TX_TLS_SFO_TYPE_CCS,
66 CPL_TX_TLS_SFO_TYPE_ALERT,
67 CPL_TX_TLS_SFO_TYPE_HANDSHAKE,
68 CPL_TX_TLS_SFO_TYPE_DATA,
69 CPL_TX_TLS_SFO_TYPE_HEARTBEAT,
70};
71
72enum {
73 TLS_HDR_TYPE_CCS = 20,
74 TLS_HDR_TYPE_ALERT,
75 TLS_HDR_TYPE_HANDSHAKE,
76 TLS_HDR_TYPE_RECORD,
77 TLS_HDR_TYPE_HEARTBEAT,
78};
79
80typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
81extern struct request_sock_ops chtls_rsk_ops;
82
83struct deferred_skb_cb {
84 defer_handler_t handler;
85 struct chtls_dev *dev;
86};
87
88#define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
89#define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
90#define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
91#define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
92
93#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
94#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
95#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
96#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
97#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
98#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
99#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
100#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
101
102
103#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
104#define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
105
106void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
107 defer_handler_t handler);
108
109
110
111
112static inline unsigned int sk_in_state(const struct sock *sk,
113 unsigned int states)
114{
115 return states & (1 << sk->sk_state);
116}
117
118static void chtls_rsk_destructor(struct request_sock *req)
119{
120
121}
122
123static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot,
124 struct request_sock_ops *chtls_tcp_ops,
125 struct proto *tcp_prot, int family)
126{
127 memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops));
128 chtls_tcp_ops->family = family;
129 chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock);
130 chtls_tcp_ops->destructor = chtls_rsk_destructor;
131 chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
132 chtls_tcp_prot->rsk_prot = chtls_tcp_ops;
133}
134
135static inline void chtls_reqsk_free(struct request_sock *req)
136{
137 if (req->rsk_listener)
138 sock_put(req->rsk_listener);
139 kmem_cache_free(req->rsk_ops->slab, req);
140}
141
142#define DECLARE_TASK_FUNC(task, task_param) \
143 static void task(struct work_struct *task_param)
144
145static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
146{
147 struct socket_wq *wq;
148
149 rcu_read_lock();
150 wq = rcu_dereference(sk->sk_wq);
151 if (skwq_has_sleeper(wq)) {
152 if (interruptable)
153 wake_up_interruptible(sk_sleep(sk));
154 else
155 wake_up_all(sk_sleep(sk));
156 }
157 rcu_read_unlock();
158}
159
160static inline void chtls_set_req_port(struct request_sock *oreq,
161 __be16 source, __be16 dest)
162{
163 inet_rsk(oreq)->ir_rmt_port = source;
164 inet_rsk(oreq)->ir_num = ntohs(dest);
165}
166
167static inline void chtls_set_req_addr(struct request_sock *oreq,
168 __be32 local_ip, __be32 peer_ip)
169{
170 inet_rsk(oreq)->ir_loc_addr = local_ip;
171 inet_rsk(oreq)->ir_rmt_addr = peer_ip;
172}
173
174static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
175{
176 skb_dst_set(skb, NULL);
177 __skb_unlink(skb, &sk->sk_receive_queue);
178 __kfree_skb(skb);
179}
180
181static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
182{
183 skb_dst_set(skb, NULL);
184 __skb_unlink(skb, &sk->sk_receive_queue);
185 kfree_skb(skb);
186}
187
188static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
189{
190 WR_SKB_CB(skb)->next_wr = NULL;
191
192 skb_get(skb);
193
194 if (!csk->wr_skb_head)
195 csk->wr_skb_head = skb;
196 else
197 WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
198 csk->wr_skb_tail = skb;
199}
200#endif
201