1
2
3
4
5
6
7
8
9
10
11#ifndef __SMC_H
12#define __SMC_H
13
14#include <linux/socket.h>
15#include <linux/types.h>
16#include <linux/compiler.h>
17#include <net/sock.h>
18
19#include "smc_ib.h"
20
21#define SMC_V1 1
22#define SMC_V2 2
23#define SMC_RELEASE 0
24
25#define SMCPROTO_SMC 0
26#define SMCPROTO_SMC6 1
27
28#define SMC_MAX_ISM_DEVS 8
29
30
31
32#define SMC_MAX_HOSTNAME_LEN 32
33#define SMC_MAX_EID_LEN 32
34
35extern struct proto smc_proto;
36extern struct proto smc_proto6;
37
38#ifdef ATOMIC64_INIT
39#define KERNEL_HAS_ATOMIC64
40#endif
41
42enum smc_state {
43 SMC_ACTIVE = 1,
44 SMC_INIT = 2,
45 SMC_CLOSED = 7,
46 SMC_LISTEN = 10,
47
48 SMC_PEERCLOSEWAIT1 = 20,
49 SMC_PEERCLOSEWAIT2 = 21,
50 SMC_APPFINCLOSEWAIT = 24,
51 SMC_APPCLOSEWAIT1 = 22,
52 SMC_APPCLOSEWAIT2 = 23,
53 SMC_PEERFINCLOSEWAIT = 25,
54
55 SMC_PEERABORTWAIT = 26,
56 SMC_PROCESSABORT = 27,
57};
58
59struct smc_link_group;
60
61struct smc_wr_rx_hdr {
62 u8 type;
63} __aligned(1);
64
65struct smc_cdc_conn_state_flags {
66#if defined(__BIG_ENDIAN_BITFIELD)
67 u8 peer_done_writing : 1;
68 u8 peer_conn_closed : 1;
69 u8 peer_conn_abort : 1;
70 u8 reserved : 5;
71#elif defined(__LITTLE_ENDIAN_BITFIELD)
72 u8 reserved : 5;
73 u8 peer_conn_abort : 1;
74 u8 peer_conn_closed : 1;
75 u8 peer_done_writing : 1;
76#endif
77};
78
79struct smc_cdc_producer_flags {
80#if defined(__BIG_ENDIAN_BITFIELD)
81 u8 write_blocked : 1;
82 u8 urg_data_pending : 1;
83 u8 urg_data_present : 1;
84 u8 cons_curs_upd_req : 1;
85 u8 failover_validation : 1;
86 u8 reserved : 3;
87#elif defined(__LITTLE_ENDIAN_BITFIELD)
88 u8 reserved : 3;
89 u8 failover_validation : 1;
90 u8 cons_curs_upd_req : 1;
91 u8 urg_data_present : 1;
92 u8 urg_data_pending : 1;
93 u8 write_blocked : 1;
94#endif
95};
96
97
98union smc_host_cursor {
99 struct {
100 u16 reserved;
101 u16 wrap;
102 u32 count;
103 };
104#ifdef KERNEL_HAS_ATOMIC64
105 atomic64_t acurs;
106#else
107 u64 acurs;
108#endif
109} __aligned(8);
110
111
112struct smc_host_cdc_msg {
113 struct smc_wr_rx_hdr common;
114 u8 len;
115 u16 seqno;
116 u32 token;
117 union smc_host_cursor prod;
118 union smc_host_cursor cons;
119
120
121 struct smc_cdc_producer_flags prod_flags;
122 struct smc_cdc_conn_state_flags conn_state_flags;
123 u8 reserved[18];
124} __aligned(8);
125
126enum smc_urg_state {
127 SMC_URG_VALID = 1,
128 SMC_URG_NOTYET = 2,
129 SMC_URG_READ = 3,
130};
131
132struct smc_connection {
133 struct rb_node alert_node;
134 struct smc_link_group *lgr;
135 struct smc_link *lnk;
136 u32 alert_token_local;
137 u8 peer_rmbe_idx;
138 int peer_rmbe_size;
139 atomic_t peer_rmbe_space;
140
141
142 int rtoken_idx;
143
144 struct smc_buf_desc *sndbuf_desc;
145 struct smc_buf_desc *rmb_desc;
146 int rmbe_size_short;
147 int rmbe_update_limit;
148
149
150
151
152 struct smc_host_cdc_msg local_tx_ctrl;
153
154
155
156
157 union smc_host_cursor local_tx_ctrl_fin;
158
159
160 union smc_host_cursor tx_curs_prep;
161
162
163 union smc_host_cursor tx_curs_sent;
164
165
166 union smc_host_cursor tx_curs_fin;
167
168
169 atomic_t sndbuf_space;
170 u16 tx_cdc_seq;
171 u16 tx_cdc_seq_fin;
172 spinlock_t send_lock;
173 struct delayed_work tx_work;
174 u32 tx_off;
175
176 struct smc_host_cdc_msg local_rx_ctrl;
177
178
179
180 union smc_host_cursor rx_curs_confirmed;
181
182
183 union smc_host_cursor urg_curs;
184 enum smc_urg_state urg_state;
185 bool urg_tx_pend;
186 bool urg_rx_skip_pend;
187
188
189
190
191 char urg_rx_byte;
192 atomic_t bytes_to_rcv;
193
194
195 atomic_t splice_pending;
196
197
198#ifndef KERNEL_HAS_ATOMIC64
199 spinlock_t acurs_lock;
200#endif
201 struct work_struct close_work;
202 struct work_struct abort_work;
203 struct tasklet_struct rx_tsklet;
204 u8 rx_off;
205
206
207 u64 peer_token;
208 u8 killed : 1;
209 u8 out_of_sync : 1;
210};
211
212struct smc_sock {
213 struct sock sk;
214 struct socket *clcsock;
215 void (*clcsk_data_ready)(struct sock *sk);
216
217 struct smc_connection conn;
218 struct smc_sock *listen_smc;
219 struct work_struct connect_work;
220 struct work_struct tcp_listen_work;
221 struct work_struct smc_listen_work;
222 struct list_head accept_q;
223 spinlock_t accept_q_lock;
224 bool use_fallback;
225 int fallback_rsn;
226 u32 peer_diagnosis;
227 int sockopt_defer_accept;
228
229
230
231 u8 wait_close_tx_prepared : 1;
232
233
234
235
236 u8 connect_nonblock : 1;
237
238
239
240 struct mutex clcsock_release_lock;
241
242
243
244};
245
246static inline struct smc_sock *smc_sk(const struct sock *sk)
247{
248 return (struct smc_sock *)sk;
249}
250
251extern struct workqueue_struct *smc_hs_wq;
252extern struct workqueue_struct *smc_close_wq;
253
254#define SMC_SYSTEMID_LEN 8
255
256extern u8 local_systemid[SMC_SYSTEMID_LEN];
257
258#define ntohll(x) be64_to_cpu(x)
259#define htonll(x) cpu_to_be64(x)
260
261
262static inline void hton24(u8 *net, u32 host)
263{
264 __be32 t;
265
266 t = cpu_to_be32(host);
267 memcpy(net, ((u8 *)&t) + 1, 3);
268}
269
270
271static inline u32 ntoh24(u8 *net)
272{
273 __be32 t = 0;
274
275 memcpy(((u8 *)&t) + 1, net, 3);
276 return be32_to_cpu(t);
277}
278
279#ifdef CONFIG_XFRM
280static inline bool using_ipsec(struct smc_sock *smc)
281{
282 return (smc->clcsock->sk->sk_policy[0] ||
283 smc->clcsock->sk->sk_policy[1]) ? true : false;
284}
285#else
286static inline bool using_ipsec(struct smc_sock *smc)
287{
288 return false;
289}
290#endif
291
292struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
293void smc_close_non_accepted(struct sock *sk);
294
295#endif
296