1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <scsi/scsi_host.h>
20#include <net/tcp.h>
21#include <net/dst.h>
22#include <linux/netdevice.h>
23#include <net/addrconf.h>
24
25#include "t4_regs.h"
26#include "t4_msg.h"
27#include "cxgb4.h"
28#include "cxgb4_uld.h"
29#include "t4fw_api.h"
30#include "l2t.h"
31#include "cxgb4i.h"
32#include "clip_tbl.h"
33
34static unsigned int dbg_level;
35
36#include "../libcxgbi.h"
37
38#ifdef CONFIG_CHELSIO_T4_DCB
39#include <net/dcbevent.h>
40#include "cxgb4_dcb.h"
41#endif
42
43#define DRV_MODULE_NAME "cxgb4i"
44#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
45#define DRV_MODULE_VERSION "0.9.5-ko"
46#define DRV_MODULE_RELDATE "Apr. 2015"
47
48static char version[] =
49 DRV_MODULE_DESC " " DRV_MODULE_NAME
50 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
51
52MODULE_AUTHOR("Chelsio Communications, Inc.");
53MODULE_DESCRIPTION(DRV_MODULE_DESC);
54MODULE_VERSION(DRV_MODULE_VERSION);
55MODULE_LICENSE("GPL");
56
57module_param(dbg_level, uint, 0644);
58MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
59
60#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
61static int cxgb4i_rcv_win = -1;
62module_param(cxgb4i_rcv_win, int, 0644);
63MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
64
65#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
66static int cxgb4i_snd_win = -1;
67module_param(cxgb4i_snd_win, int, 0644);
68MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
69
70static int cxgb4i_rx_credit_thres = 10 * 1024;
71module_param(cxgb4i_rx_credit_thres, int, 0644);
72MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
73 "RX credits return threshold in bytes (default=10KB)");
74
75static unsigned int cxgb4i_max_connect = (8 * 1024);
76module_param(cxgb4i_max_connect, uint, 0644);
77MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
78
79static unsigned short cxgb4i_sport_base = 20000;
80module_param(cxgb4i_sport_base, ushort, 0644);
81MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
82
83typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
84
85static void *t4_uld_add(const struct cxgb4_lld_info *);
86static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
87static int t4_uld_state_change(void *, enum cxgb4_state state);
88static inline int send_tx_flowc_wr(struct cxgbi_sock *);
89
90static const struct cxgb4_uld_info cxgb4i_uld_info = {
91 .name = DRV_MODULE_NAME,
92 .nrxq = MAX_ULD_QSETS,
93 .ntxq = MAX_ULD_QSETS,
94 .rxq_size = 1024,
95 .lro = false,
96 .add = t4_uld_add,
97 .rx_handler = t4_uld_rx_handler,
98 .state_change = t4_uld_state_change,
99};
100
101static struct scsi_host_template cxgb4i_host_template = {
102 .module = THIS_MODULE,
103 .name = DRV_MODULE_NAME,
104 .proc_name = DRV_MODULE_NAME,
105 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
106 .queuecommand = iscsi_queuecommand,
107 .change_queue_depth = scsi_change_queue_depth,
108 .sg_tablesize = SG_ALL,
109 .max_sectors = 0xFFFF,
110 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
111 .eh_timed_out = iscsi_eh_cmd_timed_out,
112 .eh_abort_handler = iscsi_eh_abort,
113 .eh_device_reset_handler = iscsi_eh_device_reset,
114 .eh_target_reset_handler = iscsi_eh_recover_target,
115 .target_alloc = iscsi_target_alloc,
116 .dma_boundary = PAGE_SIZE - 1,
117 .this_id = -1,
118 .track_queue_depth = 1,
119};
120
121static struct iscsi_transport cxgb4i_iscsi_transport = {
122 .owner = THIS_MODULE,
123 .name = DRV_MODULE_NAME,
124 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
125 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
126 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
127 .attr_is_visible = cxgbi_attr_is_visible,
128 .get_host_param = cxgbi_get_host_param,
129 .set_host_param = cxgbi_set_host_param,
130
131 .create_session = cxgbi_create_session,
132 .destroy_session = cxgbi_destroy_session,
133 .get_session_param = iscsi_session_get_param,
134
135 .create_conn = cxgbi_create_conn,
136 .bind_conn = cxgbi_bind_conn,
137 .destroy_conn = iscsi_tcp_conn_teardown,
138 .start_conn = iscsi_conn_start,
139 .stop_conn = iscsi_conn_stop,
140 .get_conn_param = iscsi_conn_get_param,
141 .set_param = cxgbi_set_conn_param,
142 .get_stats = cxgbi_get_conn_stats,
143
144 .send_pdu = iscsi_conn_send_pdu,
145
146 .init_task = iscsi_tcp_task_init,
147 .xmit_task = iscsi_tcp_task_xmit,
148 .cleanup_task = cxgbi_cleanup_task,
149
150 .alloc_pdu = cxgbi_conn_alloc_pdu,
151 .init_pdu = cxgbi_conn_init_pdu,
152 .xmit_pdu = cxgbi_conn_xmit_pdu,
153 .parse_pdu_itt = cxgbi_parse_pdu_itt,
154
155 .get_ep_param = cxgbi_get_ep_param,
156 .ep_connect = cxgbi_ep_connect,
157 .ep_poll = cxgbi_ep_poll,
158 .ep_disconnect = cxgbi_ep_disconnect,
159
160 .session_recovery_timedout = iscsi_session_recovery_timedout,
161};
162
163#ifdef CONFIG_CHELSIO_T4_DCB
164static int
165cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
166
167static struct notifier_block cxgb4_dcb_change = {
168 .notifier_call = cxgb4_dcb_change_notify,
169};
170#endif
171
172static struct scsi_transport_template *cxgb4i_stt;
173
174
175
176
177
178
179
180
181#define RCV_BUFSIZ_MASK 0x3FFU
182#define MAX_IMM_TX_PKT_LEN 256
183
184static int push_tx_frames(struct cxgbi_sock *, int);
185
186
187
188
189
190
191
192
193static inline bool is_ofld_imm(const struct sk_buff *skb)
194{
195 int len = skb->len;
196
197 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
198 len += sizeof(struct fw_ofld_tx_data_wr);
199
200 if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO)))
201 len += sizeof(struct cpl_tx_data_iso);
202
203 return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN);
204}
205
206static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
207 struct l2t_entry *e)
208{
209 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
210 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
211 unsigned long long opt0;
212 unsigned int opt2;
213 unsigned int qid_atid = ((unsigned int)csk->atid) |
214 (((unsigned int)csk->rss_qid) << 14);
215
216 opt0 = KEEP_ALIVE_F |
217 WND_SCALE_V(wscale) |
218 MSS_IDX_V(csk->mss_idx) |
219 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
220 TX_CHAN_V(csk->tx_chan) |
221 SMAC_SEL_V(csk->smac_idx) |
222 ULP_MODE_V(ULP_MODE_ISCSI) |
223 RCV_BUFSIZ_V(csk->rcv_win >> 10);
224
225 opt2 = RX_CHANNEL_V(0) |
226 RSS_QUEUE_VALID_F |
227 RSS_QUEUE_V(csk->rss_qid);
228
229 if (is_t4(lldi->adapter_type)) {
230 struct cpl_act_open_req *req =
231 (struct cpl_act_open_req *)skb->head;
232
233 INIT_TP_WR(req, 0);
234 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
235 qid_atid));
236 req->local_port = csk->saddr.sin_port;
237 req->peer_port = csk->daddr.sin_port;
238 req->local_ip = csk->saddr.sin_addr.s_addr;
239 req->peer_ip = csk->daddr.sin_addr.s_addr;
240 req->opt0 = cpu_to_be64(opt0);
241 req->params = cpu_to_be32(cxgb4_select_ntuple(
242 csk->cdev->ports[csk->port_id],
243 csk->l2t));
244 opt2 |= RX_FC_VALID_F;
245 req->opt2 = cpu_to_be32(opt2);
246
247 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
248 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
249 csk, &req->local_ip, ntohs(req->local_port),
250 &req->peer_ip, ntohs(req->peer_port),
251 csk->atid, csk->rss_qid);
252 } else if (is_t5(lldi->adapter_type)) {
253 struct cpl_t5_act_open_req *req =
254 (struct cpl_t5_act_open_req *)skb->head;
255 u32 isn = (prandom_u32() & ~7UL) - 1;
256
257 INIT_TP_WR(req, 0);
258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
259 qid_atid));
260 req->local_port = csk->saddr.sin_port;
261 req->peer_port = csk->daddr.sin_port;
262 req->local_ip = csk->saddr.sin_addr.s_addr;
263 req->peer_ip = csk->daddr.sin_addr.s_addr;
264 req->opt0 = cpu_to_be64(opt0);
265 req->params = cpu_to_be64(FILTER_TUPLE_V(
266 cxgb4_select_ntuple(
267 csk->cdev->ports[csk->port_id],
268 csk->l2t)));
269 req->rsvd = cpu_to_be32(isn);
270 opt2 |= T5_ISS_VALID;
271 opt2 |= T5_OPT_2_VALID_F;
272
273 req->opt2 = cpu_to_be32(opt2);
274
275 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
276 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
277 csk, &req->local_ip, ntohs(req->local_port),
278 &req->peer_ip, ntohs(req->peer_port),
279 csk->atid, csk->rss_qid);
280 } else {
281 struct cpl_t6_act_open_req *req =
282 (struct cpl_t6_act_open_req *)skb->head;
283 u32 isn = (prandom_u32() & ~7UL) - 1;
284
285 INIT_TP_WR(req, 0);
286 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
287 qid_atid));
288 req->local_port = csk->saddr.sin_port;
289 req->peer_port = csk->daddr.sin_port;
290 req->local_ip = csk->saddr.sin_addr.s_addr;
291 req->peer_ip = csk->daddr.sin_addr.s_addr;
292 req->opt0 = cpu_to_be64(opt0);
293 req->params = cpu_to_be64(FILTER_TUPLE_V(
294 cxgb4_select_ntuple(
295 csk->cdev->ports[csk->port_id],
296 csk->l2t)));
297 req->rsvd = cpu_to_be32(isn);
298
299 opt2 |= T5_ISS_VALID;
300 opt2 |= RX_FC_DISABLE_F;
301 opt2 |= T5_OPT_2_VALID_F;
302
303 req->opt2 = cpu_to_be32(opt2);
304 req->rsvd2 = cpu_to_be32(0);
305 req->opt3 = cpu_to_be32(0);
306
307 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
308 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
309 csk, &req->local_ip, ntohs(req->local_port),
310 &req->peer_ip, ntohs(req->peer_port),
311 csk->atid, csk->rss_qid);
312 }
313
314 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
315
316 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
317 (&csk->saddr), (&csk->daddr),
318 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
319 csk->state, csk->flags, csk->atid, csk->rss_qid);
320
321 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
322}
323
324#if IS_ENABLED(CONFIG_IPV6)
325static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
326 struct l2t_entry *e)
327{
328 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
329 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
330 unsigned long long opt0;
331 unsigned int opt2;
332 unsigned int qid_atid = ((unsigned int)csk->atid) |
333 (((unsigned int)csk->rss_qid) << 14);
334
335 opt0 = KEEP_ALIVE_F |
336 WND_SCALE_V(wscale) |
337 MSS_IDX_V(csk->mss_idx) |
338 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
339 TX_CHAN_V(csk->tx_chan) |
340 SMAC_SEL_V(csk->smac_idx) |
341 ULP_MODE_V(ULP_MODE_ISCSI) |
342 RCV_BUFSIZ_V(csk->rcv_win >> 10);
343
344 opt2 = RX_CHANNEL_V(0) |
345 RSS_QUEUE_VALID_F |
346 RSS_QUEUE_V(csk->rss_qid);
347
348 if (is_t4(lldi->adapter_type)) {
349 struct cpl_act_open_req6 *req =
350 (struct cpl_act_open_req6 *)skb->head;
351
352 INIT_TP_WR(req, 0);
353 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
354 qid_atid));
355 req->local_port = csk->saddr6.sin6_port;
356 req->peer_port = csk->daddr6.sin6_port;
357
358 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
359 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
360 8);
361 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
362 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
363 8);
364
365 req->opt0 = cpu_to_be64(opt0);
366
367 opt2 |= RX_FC_VALID_F;
368 req->opt2 = cpu_to_be32(opt2);
369
370 req->params = cpu_to_be32(cxgb4_select_ntuple(
371 csk->cdev->ports[csk->port_id],
372 csk->l2t));
373 } else if (is_t5(lldi->adapter_type)) {
374 struct cpl_t5_act_open_req6 *req =
375 (struct cpl_t5_act_open_req6 *)skb->head;
376
377 INIT_TP_WR(req, 0);
378 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
379 qid_atid));
380 req->local_port = csk->saddr6.sin6_port;
381 req->peer_port = csk->daddr6.sin6_port;
382 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
383 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
384 8);
385 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
386 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
387 8);
388 req->opt0 = cpu_to_be64(opt0);
389
390 opt2 |= T5_OPT_2_VALID_F;
391 req->opt2 = cpu_to_be32(opt2);
392
393 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
394 csk->cdev->ports[csk->port_id],
395 csk->l2t)));
396 } else {
397 struct cpl_t6_act_open_req6 *req =
398 (struct cpl_t6_act_open_req6 *)skb->head;
399
400 INIT_TP_WR(req, 0);
401 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
402 qid_atid));
403 req->local_port = csk->saddr6.sin6_port;
404 req->peer_port = csk->daddr6.sin6_port;
405 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
406 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
407 8);
408 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
409 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
410 8);
411 req->opt0 = cpu_to_be64(opt0);
412
413 opt2 |= RX_FC_DISABLE_F;
414 opt2 |= T5_OPT_2_VALID_F;
415
416 req->opt2 = cpu_to_be32(opt2);
417
418 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
419 csk->cdev->ports[csk->port_id],
420 csk->l2t)));
421
422 req->rsvd2 = cpu_to_be32(0);
423 req->opt3 = cpu_to_be32(0);
424 }
425
426 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
427
428 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
429 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
430 csk->flags, csk->atid,
431 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
432 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
433 csk->rss_qid);
434
435 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
436}
437#endif
438
439static void send_close_req(struct cxgbi_sock *csk)
440{
441 struct sk_buff *skb = csk->cpl_close;
442 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
443 unsigned int tid = csk->tid;
444
445 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
446 "csk 0x%p,%u,0x%lx, tid %u.\n",
447 csk, csk->state, csk->flags, csk->tid);
448 csk->cpl_close = NULL;
449 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
450 INIT_TP_WR(req, tid);
451 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
452 req->rsvd = 0;
453
454 cxgbi_sock_skb_entail(csk, skb);
455 if (csk->state >= CTP_ESTABLISHED)
456 push_tx_frames(csk, 1);
457}
458
459static void abort_arp_failure(void *handle, struct sk_buff *skb)
460{
461 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
462 struct cpl_abort_req *req;
463
464 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
465 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
466 csk, csk->state, csk->flags, csk->tid);
467 req = (struct cpl_abort_req *)skb->data;
468 req->cmd = CPL_ABORT_NO_RST;
469 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
470}
471
472static void send_abort_req(struct cxgbi_sock *csk)
473{
474 struct cpl_abort_req *req;
475 struct sk_buff *skb = csk->cpl_abort_req;
476
477 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
478 return;
479
480 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
481 send_tx_flowc_wr(csk);
482 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
483 }
484
485 cxgbi_sock_set_state(csk, CTP_ABORTING);
486 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
487 cxgbi_sock_purge_write_queue(csk);
488
489 csk->cpl_abort_req = NULL;
490 req = (struct cpl_abort_req *)skb->head;
491 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
492 req->cmd = CPL_ABORT_SEND_RST;
493 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
494 INIT_TP_WR(req, csk->tid);
495 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
496 req->rsvd0 = htonl(csk->snd_nxt);
497 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
498
499 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
500 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
501 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
502 req->rsvd1);
503
504 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
505}
506
507static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
508{
509 struct sk_buff *skb = csk->cpl_abort_rpl;
510 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
511
512 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
513 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
514 csk, csk->state, csk->flags, csk->tid, rst_status);
515
516 csk->cpl_abort_rpl = NULL;
517 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
518 INIT_TP_WR(rpl, csk->tid);
519 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
520 rpl->cmd = rst_status;
521 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
522}
523
524
525
526
527
528
529static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
530{
531 struct sk_buff *skb;
532 struct cpl_rx_data_ack *req;
533
534 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
535 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
536 csk, csk->state, csk->flags, csk->tid, credits);
537
538 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
539 if (!skb) {
540 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
541 return 0;
542 }
543 req = (struct cpl_rx_data_ack *)skb->head;
544
545 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
546 INIT_TP_WR(req, csk->tid);
547 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
548 csk->tid));
549 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
550 | RX_FORCE_ACK_F);
551 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
552 return credits;
553}
554
555
556
557
558
559
560
561static inline unsigned int sgl_len(unsigned int n)
562{
563 n--;
564 return (3 * n) / 2 + (n & 1) + 2;
565}
566
567
568
569
570
571
572
573
574
575static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
576{
577 unsigned int flits, cnt;
578
579 if (is_ofld_imm(skb))
580 return DIV_ROUND_UP(skb->len, 8);
581 flits = skb_transport_offset(skb) / 8;
582 cnt = skb_shinfo(skb)->nr_frags;
583 if (skb_tail_pointer(skb) != skb_transport_header(skb))
584 cnt++;
585 return flits + sgl_len(cnt);
586}
587
588#define FLOWC_WR_NPARAMS_MIN 9
589static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
590{
591 int nparams, flowclen16, flowclen;
592
593 nparams = FLOWC_WR_NPARAMS_MIN;
594#ifdef CONFIG_CHELSIO_T4_DCB
595 nparams++;
596#endif
597 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
598 flowclen16 = DIV_ROUND_UP(flowclen, 16);
599 flowclen = flowclen16 * 16;
600
601
602
603
604 if (nparamsp)
605 *nparamsp = nparams;
606 if (flowclenp)
607 *flowclenp = flowclen;
608
609 return flowclen16;
610}
611
612static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
613{
614 struct sk_buff *skb;
615 struct fw_flowc_wr *flowc;
616 int nparams, flowclen16, flowclen;
617
618#ifdef CONFIG_CHELSIO_T4_DCB
619 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
620#endif
621 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
622 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
623 flowc = (struct fw_flowc_wr *)skb->head;
624 flowc->op_to_nparams =
625 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
626 flowc->flowid_len16 =
627 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
628 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
629 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
630 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
631 flowc->mnemval[1].val = htonl(csk->tx_chan);
632 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
633 flowc->mnemval[2].val = htonl(csk->tx_chan);
634 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
635 flowc->mnemval[3].val = htonl(csk->rss_qid);
636 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
637 flowc->mnemval[4].val = htonl(csk->snd_nxt);
638 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
639 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
640 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
641 flowc->mnemval[6].val = htonl(csk->snd_win);
642 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
643 flowc->mnemval[7].val = htonl(csk->advmss);
644 flowc->mnemval[8].mnemonic = 0;
645 flowc->mnemval[8].val = 0;
646 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
647 if (csk->cdev->skb_iso_txhdr)
648 flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB);
649 else
650 flowc->mnemval[8].val = cpu_to_be32(16128);
651#ifdef CONFIG_CHELSIO_T4_DCB
652 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
653 if (vlan == CPL_L2T_VLAN_NONE) {
654 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
655 csk->tid);
656 flowc->mnemval[9].val = cpu_to_be32(0);
657 } else {
658 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
659 VLAN_PRIO_SHIFT);
660 }
661#endif
662
663 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
664
665 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
666 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
667 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
668 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
669 csk->advmss);
670
671 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
672
673 return flowclen16;
674}
675
676static void
677cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl)
678{
679 struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head;
680 u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE);
681 u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE);
682 u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE);
683 u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1;
684 u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
685
686 cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
687 CPL_TX_DATA_ISO_FIRST_V(fslice) |
688 CPL_TX_DATA_ISO_LAST_V(lslice) |
689 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
690 CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
691 CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
692 CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) |
693 CPL_TX_DATA_ISO_SCSI_V(pdu_type));
694
695 cpl->ahs_len = info->ahs;
696 cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4));
697 cpl->burst_size = cpu_to_be32(info->burst_size);
698 cpl->len = cpu_to_be32(info->len);
699 cpl->reserved2_seglen_offset =
700 cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset));
701 cpl->datasn_offset = cpu_to_be32(info->datasn_offset);
702 cpl->buffer_offset = cpu_to_be32(info->buffer_offset);
703 cpl->reserved3 = cpu_to_be32(0);
704 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
705 "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, "
706 "burst_size %u, iso_len %u\n",
707 info->flags, info->op, info->ahs, info->num_pdu,
708 info->mpdu, info->burst_size << 2, info->len);
709}
710
711static void
712cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
713 int len, u32 credits, int compl)
714{
715 struct cxgbi_device *cdev = csk->cdev;
716 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
717 struct fw_ofld_tx_data_wr *req;
718 struct cpl_tx_data_iso *cpl;
719 u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
720 u32 wr_ulp_mode = 0;
721 u32 hdr_size = sizeof(*req);
722 u32 opcode = FW_OFLD_TX_DATA_WR;
723 u32 immlen = 0;
724 u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
725 T6_TX_FORCE_F;
726
727 if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
728 hdr_size += sizeof(struct cpl_tx_data_iso);
729 opcode = FW_ISCSI_TX_DATA_WR;
730 immlen += sizeof(struct cpl_tx_data_iso);
731 submode |= 8;
732 }
733
734 if (is_ofld_imm(skb))
735 immlen += dlen;
736
737 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size);
738 req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
739 FW_WR_COMPL_V(compl) |
740 FW_WR_IMMDLEN_V(immlen));
741 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
742 FW_WR_LEN16_V(credits));
743 req->plen = cpu_to_be32(len);
744 cpl = (struct cpl_tx_data_iso *)(req + 1);
745
746 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
747 cxgb4i_make_tx_iso_cpl(skb, cpl);
748
749 if (submode)
750 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
751 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
752
753 req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force |
754 FW_OFLD_TX_DATA_WR_SHOVE_V(1U));
755
756 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
757 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
758}
759
760static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
761{
762 kfree_skb(skb);
763}
764
765static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
766{
767 int total_size = 0;
768 struct sk_buff *skb;
769
770 if (unlikely(csk->state < CTP_ESTABLISHED ||
771 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
772 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
773 1 << CXGBI_DBG_PDU_TX,
774 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
775 csk, csk->state, csk->flags, csk->tid);
776 return 0;
777 }
778
779 while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
780 struct cxgbi_iso_info *iso_cpl;
781 u32 dlen = skb->len;
782 u32 len = skb->len;
783 u32 iso_cpl_len = 0;
784 u32 flowclen16 = 0;
785 u32 credits_needed;
786 u32 num_pdu = 1, hdr_len;
787
788 if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
789 iso_cpl_len = sizeof(struct cpl_tx_data_iso);
790
791 if (is_ofld_imm(skb))
792 credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
793 else
794 credits_needed =
795 DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) +
796 iso_cpl_len, 16);
797
798 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
799 credits_needed +=
800 DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16);
801
802
803
804
805
806 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
807 flowclen16 = send_tx_flowc_wr(csk);
808 csk->wr_cred -= flowclen16;
809 csk->wr_una_cred += flowclen16;
810 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
811 }
812
813 if (csk->wr_cred < credits_needed) {
814 log_debug(1 << CXGBI_DBG_PDU_TX,
815 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
816 csk, skb->len, skb->data_len,
817 credits_needed, csk->wr_cred);
818
819 csk->no_tx_credits++;
820 break;
821 }
822
823 csk->no_tx_credits = 0;
824
825 __skb_unlink(skb, &csk->write_queue);
826 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
827 skb->csum = (__force __wsum)(credits_needed + flowclen16);
828 csk->wr_cred -= credits_needed;
829 csk->wr_una_cred += credits_needed;
830 cxgbi_sock_enqueue_wr(csk, skb);
831
832 log_debug(1 << CXGBI_DBG_PDU_TX,
833 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
834 csk, skb->len, skb->data_len, credits_needed,
835 csk->wr_cred, csk->wr_una_cred);
836
837 if (!req_completion &&
838 ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
839 after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
840 req_completion = 1;
841
842 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
843 u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb);
844
845 if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
846 iso_cpl = (struct cxgbi_iso_info *)skb->head;
847 num_pdu = iso_cpl->num_pdu;
848 hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
849 len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) +
850 (hdr_len * (num_pdu - 1));
851 } else {
852 len += cxgbi_ulp_extra_len(ulp_mode);
853 }
854
855 cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
856 credits_needed, req_completion);
857 csk->snd_nxt += len;
858 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
859 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
860 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
861 struct cpl_close_con_req *req =
862 (struct cpl_close_con_req *)skb->data;
863
864 req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F);
865 }
866
867 total_size += skb->truesize;
868 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
869
870 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
871 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
872 csk, csk->state, csk->flags, csk->tid, skb, len);
873 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
874 }
875 return total_size;
876}
877
878static inline void free_atid(struct cxgbi_sock *csk)
879{
880 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
881
882 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
883 cxgb4_free_atid(lldi->tids, csk->atid);
884 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
885 cxgbi_sock_put(csk);
886 }
887}
888
889static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
890{
891 struct cxgbi_sock *csk;
892 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
893 unsigned short tcp_opt = ntohs(req->tcp_opt);
894 unsigned int tid = GET_TID(req);
895 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
896 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
897 struct tid_info *t = lldi->tids;
898 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
899
900 csk = lookup_atid(t, atid);
901 if (unlikely(!csk)) {
902 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
903 goto rel_skb;
904 }
905
906 if (csk->atid != atid) {
907 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
908 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
909 goto rel_skb;
910 }
911
912 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
913 (&csk->saddr), (&csk->daddr),
914 atid, tid, csk, csk->state, csk->flags, rcv_isn);
915
916 module_put(cdev->owner);
917
918 cxgbi_sock_get(csk);
919 csk->tid = tid;
920 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
921 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
922
923 free_atid(csk);
924
925 spin_lock_bh(&csk->lock);
926 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
927 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
928 csk, csk->state, csk->flags, csk->tid);
929
930 if (csk->retry_timer.function) {
931 del_timer(&csk->retry_timer);
932 csk->retry_timer.function = NULL;
933 }
934
935 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
936
937
938
939
940 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
941 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
942
943 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
944 if (TCPOPT_TSTAMP_G(tcp_opt))
945 csk->advmss -= 12;
946 if (csk->advmss < 128)
947 csk->advmss = 128;
948
949 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
950 "csk 0x%p, mss_idx %u, advmss %u.\n",
951 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
952
953 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
954
955 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
956 send_abort_req(csk);
957 else {
958 if (skb_queue_len(&csk->write_queue))
959 push_tx_frames(csk, 0);
960 cxgbi_conn_tx_open(csk);
961 }
962 spin_unlock_bh(&csk->lock);
963
964rel_skb:
965 __kfree_skb(skb);
966}
967
968static int act_open_rpl_status_to_errno(int status)
969{
970 switch (status) {
971 case CPL_ERR_CONN_RESET:
972 return -ECONNREFUSED;
973 case CPL_ERR_ARP_MISS:
974 return -EHOSTUNREACH;
975 case CPL_ERR_CONN_TIMEDOUT:
976 return -ETIMEDOUT;
977 case CPL_ERR_TCAM_FULL:
978 return -ENOMEM;
979 case CPL_ERR_CONN_EXIST:
980 return -EADDRINUSE;
981 default:
982 return -EIO;
983 }
984}
985
986static void csk_act_open_retry_timer(struct timer_list *t)
987{
988 struct sk_buff *skb = NULL;
989 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
990 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
991 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
992 struct l2t_entry *);
993 int t4 = is_t4(lldi->adapter_type), size, size6;
994
995 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
996 "csk 0x%p,%u,0x%lx,%u.\n",
997 csk, csk->state, csk->flags, csk->tid);
998
999 cxgbi_sock_get(csk);
1000 spin_lock_bh(&csk->lock);
1001
1002 if (t4) {
1003 size = sizeof(struct cpl_act_open_req);
1004 size6 = sizeof(struct cpl_act_open_req6);
1005 } else {
1006 size = sizeof(struct cpl_t5_act_open_req);
1007 size6 = sizeof(struct cpl_t5_act_open_req6);
1008 }
1009
1010 if (csk->csk_family == AF_INET) {
1011 send_act_open_func = send_act_open_req;
1012 skb = alloc_wr(size, 0, GFP_ATOMIC);
1013#if IS_ENABLED(CONFIG_IPV6)
1014 } else {
1015 send_act_open_func = send_act_open_req6;
1016 skb = alloc_wr(size6, 0, GFP_ATOMIC);
1017#endif
1018 }
1019
1020 if (!skb)
1021 cxgbi_sock_fail_act_open(csk, -ENOMEM);
1022 else {
1023 skb->sk = (struct sock *)csk;
1024 t4_set_arp_err_handler(skb, csk,
1025 cxgbi_sock_act_open_req_arp_failure);
1026 send_act_open_func(csk, skb, csk->l2t);
1027 }
1028
1029 spin_unlock_bh(&csk->lock);
1030 cxgbi_sock_put(csk);
1031
1032}
1033
1034static inline bool is_neg_adv(unsigned int status)
1035{
1036 return status == CPL_ERR_RTX_NEG_ADVICE ||
1037 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
1038 status == CPL_ERR_PERSIST_NEG_ADVICE;
1039}
1040
1041static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1042{
1043 struct cxgbi_sock *csk;
1044 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
1045 unsigned int tid = GET_TID(rpl);
1046 unsigned int atid =
1047 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
1048 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
1049 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1050 struct tid_info *t = lldi->tids;
1051
1052 csk = lookup_atid(t, atid);
1053 if (unlikely(!csk)) {
1054 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
1055 goto rel_skb;
1056 }
1057
1058 pr_info_ipaddr("tid %u/%u, status %u.\n"
1059 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
1060 atid, tid, status, csk, csk->state, csk->flags);
1061
1062 if (is_neg_adv(status))
1063 goto rel_skb;
1064
1065 module_put(cdev->owner);
1066
1067 if (status && status != CPL_ERR_TCAM_FULL &&
1068 status != CPL_ERR_CONN_EXIST &&
1069 status != CPL_ERR_ARP_MISS)
1070 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
1071 csk->csk_family);
1072
1073 cxgbi_sock_get(csk);
1074 spin_lock_bh(&csk->lock);
1075
1076 if (status == CPL_ERR_CONN_EXIST &&
1077 csk->retry_timer.function != csk_act_open_retry_timer) {
1078 csk->retry_timer.function = csk_act_open_retry_timer;
1079 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
1080 } else
1081 cxgbi_sock_fail_act_open(csk,
1082 act_open_rpl_status_to_errno(status));
1083
1084 spin_unlock_bh(&csk->lock);
1085 cxgbi_sock_put(csk);
1086rel_skb:
1087 __kfree_skb(skb);
1088}
1089
1090static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
1091{
1092 struct cxgbi_sock *csk;
1093 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
1094 unsigned int tid = GET_TID(req);
1095 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1096 struct tid_info *t = lldi->tids;
1097
1098 csk = lookup_tid(t, tid);
1099 if (unlikely(!csk)) {
1100 pr_err("can't find connection for tid %u.\n", tid);
1101 goto rel_skb;
1102 }
1103 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1104 (&csk->saddr), (&csk->daddr),
1105 csk, csk->state, csk->flags, csk->tid);
1106 cxgbi_sock_rcv_peer_close(csk);
1107rel_skb:
1108 __kfree_skb(skb);
1109}
1110
1111static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1112{
1113 struct cxgbi_sock *csk;
1114 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
1115 unsigned int tid = GET_TID(rpl);
1116 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1117 struct tid_info *t = lldi->tids;
1118
1119 csk = lookup_tid(t, tid);
1120 if (unlikely(!csk)) {
1121 pr_err("can't find connection for tid %u.\n", tid);
1122 goto rel_skb;
1123 }
1124 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1125 (&csk->saddr), (&csk->daddr),
1126 csk, csk->state, csk->flags, csk->tid);
1127 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1128rel_skb:
1129 __kfree_skb(skb);
1130}
1131
1132static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1133 int *need_rst)
1134{
1135 switch (abort_reason) {
1136 case CPL_ERR_BAD_SYN:
1137 case CPL_ERR_CONN_RESET:
1138 return csk->state > CTP_ESTABLISHED ?
1139 -EPIPE : -ECONNRESET;
1140 case CPL_ERR_XMIT_TIMEDOUT:
1141 case CPL_ERR_PERSIST_TIMEDOUT:
1142 case CPL_ERR_FINWAIT2_TIMEDOUT:
1143 case CPL_ERR_KEEPALIVE_TIMEDOUT:
1144 return -ETIMEDOUT;
1145 default:
1146 return -EIO;
1147 }
1148}
1149
1150static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1151{
1152 struct cxgbi_sock *csk;
1153 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
1154 unsigned int tid = GET_TID(req);
1155 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1156 struct tid_info *t = lldi->tids;
1157 int rst_status = CPL_ABORT_NO_RST;
1158
1159 csk = lookup_tid(t, tid);
1160 if (unlikely(!csk)) {
1161 pr_err("can't find connection for tid %u.\n", tid);
1162 goto rel_skb;
1163 }
1164
1165 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1166 (&csk->saddr), (&csk->daddr),
1167 csk, csk->state, csk->flags, csk->tid, req->status);
1168
1169 if (is_neg_adv(req->status))
1170 goto rel_skb;
1171
1172 cxgbi_sock_get(csk);
1173 spin_lock_bh(&csk->lock);
1174
1175 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1176
1177 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1178 send_tx_flowc_wr(csk);
1179 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1180 }
1181
1182 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1183 cxgbi_sock_set_state(csk, CTP_ABORTING);
1184
1185 send_abort_rpl(csk, rst_status);
1186
1187 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1188 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1189 cxgbi_sock_closed(csk);
1190 }
1191
1192 spin_unlock_bh(&csk->lock);
1193 cxgbi_sock_put(csk);
1194rel_skb:
1195 __kfree_skb(skb);
1196}
1197
1198static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1199{
1200 struct cxgbi_sock *csk;
1201 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1202 unsigned int tid = GET_TID(rpl);
1203 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1204 struct tid_info *t = lldi->tids;
1205
1206 csk = lookup_tid(t, tid);
1207 if (!csk)
1208 goto rel_skb;
1209
1210 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1211 (&csk->saddr), (&csk->daddr), csk,
1212 csk->state, csk->flags, csk->tid, rpl->status);
1213
1214 if (rpl->status == CPL_ERR_ABORT_FAILED)
1215 goto rel_skb;
1216
1217 cxgbi_sock_rcv_abort_rpl(csk);
1218rel_skb:
1219 __kfree_skb(skb);
1220}
1221
1222static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1223{
1224 struct cxgbi_sock *csk;
1225 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1226 unsigned int tid = GET_TID(cpl);
1227 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1228 struct tid_info *t = lldi->tids;
1229
1230 csk = lookup_tid(t, tid);
1231 if (!csk) {
1232 pr_err("can't find connection for tid %u.\n", tid);
1233 } else {
1234
1235 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1236 spin_lock_bh(&csk->lock);
1237 send_abort_req(csk);
1238 spin_unlock_bh(&csk->lock);
1239 }
1240 __kfree_skb(skb);
1241}
1242
1243static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1244{
1245 struct cxgbi_sock *csk;
1246 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1247 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1248 unsigned int tid = GET_TID(cpl);
1249 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1250 struct tid_info *t = lldi->tids;
1251
1252 csk = lookup_tid(t, tid);
1253 if (unlikely(!csk)) {
1254 pr_err("can't find conn. for tid %u.\n", tid);
1255 goto rel_skb;
1256 }
1257
1258 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1259 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1260 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1261 pdu_len_ddp);
1262
1263 spin_lock_bh(&csk->lock);
1264
1265 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1266 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1267 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1268 csk, csk->state, csk->flags, csk->tid);
1269 if (csk->state != CTP_ABORTING)
1270 goto abort_conn;
1271 else
1272 goto discard;
1273 }
1274
1275 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1276 cxgbi_skcb_flags(skb) = 0;
1277
1278 skb_reset_transport_header(skb);
1279 __skb_pull(skb, sizeof(*cpl));
1280 __pskb_trim(skb, ntohs(cpl->len));
1281
1282 if (!csk->skb_ulp_lhdr) {
1283 unsigned char *bhs;
1284 unsigned int hlen, dlen, plen;
1285
1286 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1287 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1288 csk, csk->state, csk->flags, csk->tid, skb);
1289 csk->skb_ulp_lhdr = skb;
1290 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1291
1292 if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
1293 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
1294 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1295 csk->tid, cxgbi_skcb_tcp_seq(skb),
1296 csk->rcv_nxt);
1297 goto abort_conn;
1298 }
1299
1300 bhs = skb->data;
1301 hlen = ntohs(cpl->len);
1302 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1303
1304 plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
1305 if (is_t4(lldi->adapter_type))
1306 plen -= 40;
1307
1308 if ((hlen + dlen) != plen) {
1309 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1310 "mismatch %u != %u + %u, seq 0x%x.\n",
1311 csk->tid, plen, hlen, dlen,
1312 cxgbi_skcb_tcp_seq(skb));
1313 goto abort_conn;
1314 }
1315
1316 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1317 if (dlen)
1318 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1319 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1320
1321 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1322 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1323 csk, skb, *bhs, hlen, dlen,
1324 ntohl(*((unsigned int *)(bhs + 16))),
1325 ntohl(*((unsigned int *)(bhs + 24))));
1326
1327 } else {
1328 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1329
1330 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1331 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1332 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1333 csk, csk->state, csk->flags, skb, lskb);
1334 }
1335
1336 __skb_queue_tail(&csk->receive_queue, skb);
1337 spin_unlock_bh(&csk->lock);
1338 return;
1339
1340abort_conn:
1341 send_abort_req(csk);
1342discard:
1343 spin_unlock_bh(&csk->lock);
1344rel_skb:
1345 __kfree_skb(skb);
1346}
1347
1348static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1349{
1350 struct cxgbi_sock *csk;
1351 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1352 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1353 struct tid_info *t = lldi->tids;
1354 struct sk_buff *lskb;
1355 u32 tid = GET_TID(cpl);
1356 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1357
1358 csk = lookup_tid(t, tid);
1359 if (unlikely(!csk)) {
1360 pr_err("can't find conn. for tid %u.\n", tid);
1361 goto rel_skb;
1362 }
1363
1364 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1365 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1366 csk, csk->state, csk->flags, csk->tid, skb,
1367 skb->len, pdu_len_ddp);
1368
1369 spin_lock_bh(&csk->lock);
1370
1371 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1372 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1373 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1374 csk, csk->state, csk->flags, csk->tid);
1375
1376 if (csk->state != CTP_ABORTING)
1377 goto abort_conn;
1378 else
1379 goto discard;
1380 }
1381
1382 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1383 cxgbi_skcb_flags(skb) = 0;
1384
1385 skb_reset_transport_header(skb);
1386 __skb_pull(skb, sizeof(*cpl));
1387 __pskb_trim(skb, ntohs(cpl->len));
1388
1389 if (!csk->skb_ulp_lhdr)
1390 csk->skb_ulp_lhdr = skb;
1391
1392 lskb = csk->skb_ulp_lhdr;
1393 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1394
1395 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1396 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1397 csk, csk->state, csk->flags, skb, lskb);
1398
1399 __skb_queue_tail(&csk->receive_queue, skb);
1400 spin_unlock_bh(&csk->lock);
1401 return;
1402
1403abort_conn:
1404 send_abort_req(csk);
1405discard:
1406 spin_unlock_bh(&csk->lock);
1407rel_skb:
1408 __kfree_skb(skb);
1409}
1410
1411static void
1412cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1413 struct sk_buff *skb, u32 ddpvld)
1414{
1415 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1416 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1417 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1418 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1419 }
1420
1421 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1422 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1423 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1424 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1425 }
1426
1427 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1428 log_debug(1 << CXGBI_DBG_PDU_RX,
1429 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1430 csk, skb, ddpvld);
1431 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1432 }
1433
1434 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1435 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1436 log_debug(1 << CXGBI_DBG_PDU_RX,
1437 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1438 csk, skb, ddpvld);
1439 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1440 }
1441}
1442
1443static void do_rx_data_ddp(struct cxgbi_device *cdev,
1444 struct sk_buff *skb)
1445{
1446 struct cxgbi_sock *csk;
1447 struct sk_buff *lskb;
1448 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1449 unsigned int tid = GET_TID(rpl);
1450 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1451 struct tid_info *t = lldi->tids;
1452 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1453
1454 csk = lookup_tid(t, tid);
1455 if (unlikely(!csk)) {
1456 pr_err("can't find connection for tid %u.\n", tid);
1457 goto rel_skb;
1458 }
1459
1460 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1461 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1462 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1463
1464 spin_lock_bh(&csk->lock);
1465
1466 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1467 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1468 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1469 csk, csk->state, csk->flags, csk->tid);
1470 if (csk->state != CTP_ABORTING)
1471 goto abort_conn;
1472 else
1473 goto discard;
1474 }
1475
1476 if (!csk->skb_ulp_lhdr) {
1477 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1478 goto abort_conn;
1479 }
1480
1481 lskb = csk->skb_ulp_lhdr;
1482 csk->skb_ulp_lhdr = NULL;
1483
1484 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1485
1486 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1487 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1488 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1489
1490 cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1491
1492 log_debug(1 << CXGBI_DBG_PDU_RX,
1493 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1494 csk, lskb, cxgbi_skcb_flags(lskb));
1495
1496 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1497 cxgbi_conn_pdu_ready(csk);
1498 spin_unlock_bh(&csk->lock);
1499 goto rel_skb;
1500
1501abort_conn:
1502 send_abort_req(csk);
1503discard:
1504 spin_unlock_bh(&csk->lock);
1505rel_skb:
1506 __kfree_skb(skb);
1507}
1508
1509static void
1510do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1511{
1512 struct cxgbi_sock *csk;
1513 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1514 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1515 struct tid_info *t = lldi->tids;
1516 struct sk_buff *data_skb = NULL;
1517 u32 tid = GET_TID(rpl);
1518 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1519 u32 seq = be32_to_cpu(rpl->seq);
1520 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1521
1522 csk = lookup_tid(t, tid);
1523 if (unlikely(!csk)) {
1524 pr_err("can't find connection for tid %u.\n", tid);
1525 goto rel_skb;
1526 }
1527
1528 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1529 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1530 "pdu_len_ddp %u, status %u.\n",
1531 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1532 ntohs(rpl->len), pdu_len_ddp, rpl->status);
1533
1534 spin_lock_bh(&csk->lock);
1535
1536 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1537 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1538 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1539 csk, csk->state, csk->flags, csk->tid);
1540
1541 if (csk->state != CTP_ABORTING)
1542 goto abort_conn;
1543 else
1544 goto discard;
1545 }
1546
1547 cxgbi_skcb_tcp_seq(skb) = seq;
1548 cxgbi_skcb_flags(skb) = 0;
1549 cxgbi_skcb_rx_pdulen(skb) = 0;
1550
1551 skb_reset_transport_header(skb);
1552 __skb_pull(skb, sizeof(*rpl));
1553 __pskb_trim(skb, be16_to_cpu(rpl->len));
1554
1555 csk->rcv_nxt = seq + pdu_len_ddp;
1556
1557 if (csk->skb_ulp_lhdr) {
1558 data_skb = skb_peek(&csk->receive_queue);
1559 if (!data_skb ||
1560 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1561 pr_err("Error! freelist data not found 0x%p, tid %u\n",
1562 data_skb, tid);
1563
1564 goto abort_conn;
1565 }
1566 __skb_unlink(data_skb, &csk->receive_queue);
1567
1568 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1569
1570 __skb_queue_tail(&csk->receive_queue, skb);
1571 __skb_queue_tail(&csk->receive_queue, data_skb);
1572 } else {
1573 __skb_queue_tail(&csk->receive_queue, skb);
1574 }
1575
1576 csk->skb_ulp_lhdr = NULL;
1577
1578 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1579 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1580 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1581 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1582
1583 cxgb4i_process_ddpvld(csk, skb, ddpvld);
1584
1585 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1586 csk, skb, cxgbi_skcb_flags(skb));
1587
1588 cxgbi_conn_pdu_ready(csk);
1589 spin_unlock_bh(&csk->lock);
1590
1591 return;
1592
1593abort_conn:
1594 send_abort_req(csk);
1595discard:
1596 spin_unlock_bh(&csk->lock);
1597rel_skb:
1598 __kfree_skb(skb);
1599}
1600
1601static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1602{
1603 struct cxgbi_sock *csk;
1604 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1605 unsigned int tid = GET_TID(rpl);
1606 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1607 struct tid_info *t = lldi->tids;
1608
1609 csk = lookup_tid(t, tid);
1610 if (unlikely(!csk))
1611 pr_err("can't find connection for tid %u.\n", tid);
1612 else {
1613 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1614 "csk 0x%p,%u,0x%lx,%u.\n",
1615 csk, csk->state, csk->flags, csk->tid);
1616 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1617 rpl->seq_vld);
1618 }
1619 __kfree_skb(skb);
1620}
1621
1622static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1623{
1624 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1625 unsigned int tid = GET_TID(rpl);
1626 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1627 struct tid_info *t = lldi->tids;
1628 struct cxgbi_sock *csk;
1629
1630 csk = lookup_tid(t, tid);
1631 if (!csk) {
1632 pr_err("can't find conn. for tid %u.\n", tid);
1633 return;
1634 }
1635
1636 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1637 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1638 csk, csk->state, csk->flags, csk->tid, rpl->status);
1639
1640 if (rpl->status != CPL_ERR_NONE) {
1641 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1642 csk, tid, rpl->status);
1643 csk->err = -EINVAL;
1644 }
1645
1646 complete(&csk->cmpl);
1647
1648 __kfree_skb(skb);
1649}
1650
1651static int alloc_cpls(struct cxgbi_sock *csk)
1652{
1653 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1654 0, GFP_KERNEL);
1655 if (!csk->cpl_close)
1656 return -ENOMEM;
1657
1658 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1659 0, GFP_KERNEL);
1660 if (!csk->cpl_abort_req)
1661 goto free_cpls;
1662
1663 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1664 0, GFP_KERNEL);
1665 if (!csk->cpl_abort_rpl)
1666 goto free_cpls;
1667 return 0;
1668
1669free_cpls:
1670 cxgbi_sock_free_cpl_skbs(csk);
1671 return -ENOMEM;
1672}
1673
1674static inline void l2t_put(struct cxgbi_sock *csk)
1675{
1676 if (csk->l2t) {
1677 cxgb4_l2t_release(csk->l2t);
1678 csk->l2t = NULL;
1679 cxgbi_sock_put(csk);
1680 }
1681}
1682
1683static void release_offload_resources(struct cxgbi_sock *csk)
1684{
1685 struct cxgb4_lld_info *lldi;
1686#if IS_ENABLED(CONFIG_IPV6)
1687 struct net_device *ndev = csk->cdev->ports[csk->port_id];
1688#endif
1689
1690 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1691 "csk 0x%p,%u,0x%lx,%u.\n",
1692 csk, csk->state, csk->flags, csk->tid);
1693
1694 cxgbi_sock_free_cpl_skbs(csk);
1695 cxgbi_sock_purge_write_queue(csk);
1696 if (csk->wr_cred != csk->wr_max_cred) {
1697 cxgbi_sock_purge_wr_queue(csk);
1698 cxgbi_sock_reset_wr_list(csk);
1699 }
1700
1701 l2t_put(csk);
1702#if IS_ENABLED(CONFIG_IPV6)
1703 if (csk->csk_family == AF_INET6)
1704 cxgb4_clip_release(ndev,
1705 (const u32 *)&csk->saddr6.sin6_addr, 1);
1706#endif
1707
1708 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1709 free_atid(csk);
1710 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1711 lldi = cxgbi_cdev_priv(csk->cdev);
1712 cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1713 csk->csk_family);
1714 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1715 cxgbi_sock_put(csk);
1716 }
1717 csk->dst = NULL;
1718}
1719
1720#ifdef CONFIG_CHELSIO_T4_DCB
1721static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
1722{
1723 return ndev->dcbnl_ops->getstate(ndev);
1724}
1725
1726static int select_priority(int pri_mask)
1727{
1728 if (!pri_mask)
1729 return 0;
1730 return (ffs(pri_mask) - 1);
1731}
1732
1733static u8 get_iscsi_dcb_priority(struct net_device *ndev)
1734{
1735 int rv;
1736 u8 caps;
1737
1738 struct dcb_app iscsi_dcb_app = {
1739 .protocol = 3260
1740 };
1741
1742 rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
1743 if (rv)
1744 return 0;
1745
1746 if (caps & DCB_CAP_DCBX_VER_IEEE) {
1747 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
1748 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1749 if (!rv) {
1750 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
1751 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1752 }
1753 } else if (caps & DCB_CAP_DCBX_VER_CEE) {
1754 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
1755 rv = dcb_getapp(ndev, &iscsi_dcb_app);
1756 }
1757
1758 log_debug(1 << CXGBI_DBG_ISCSI,
1759 "iSCSI priority is set to %u\n", select_priority(rv));
1760 return select_priority(rv);
1761}
1762#endif
1763
1764static int init_act_open(struct cxgbi_sock *csk)
1765{
1766 struct cxgbi_device *cdev = csk->cdev;
1767 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1768 struct net_device *ndev = cdev->ports[csk->port_id];
1769 struct sk_buff *skb = NULL;
1770 struct neighbour *n = NULL;
1771 void *daddr;
1772 unsigned int step;
1773 unsigned int rxq_idx;
1774 unsigned int size, size6;
1775 unsigned int linkspeed;
1776 unsigned int rcv_winf, snd_winf;
1777#ifdef CONFIG_CHELSIO_T4_DCB
1778 u8 priority = 0;
1779#endif
1780 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1781 "csk 0x%p,%u,0x%lx,%u.\n",
1782 csk, csk->state, csk->flags, csk->tid);
1783
1784 if (csk->csk_family == AF_INET)
1785 daddr = &csk->daddr.sin_addr.s_addr;
1786#if IS_ENABLED(CONFIG_IPV6)
1787 else if (csk->csk_family == AF_INET6)
1788 daddr = &csk->daddr6.sin6_addr;
1789#endif
1790 else {
1791 pr_err("address family 0x%x not supported\n", csk->csk_family);
1792 goto rel_resource;
1793 }
1794
1795 n = dst_neigh_lookup(csk->dst, daddr);
1796
1797 if (!n) {
1798 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1799 goto rel_resource;
1800 }
1801
1802 if (!(n->nud_state & NUD_VALID))
1803 neigh_event_send(n, NULL);
1804
1805 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1806 if (csk->atid < 0) {
1807 pr_err("%s, NO atid available.\n", ndev->name);
1808 goto rel_resource_without_clip;
1809 }
1810 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1811 cxgbi_sock_get(csk);
1812
1813#ifdef CONFIG_CHELSIO_T4_DCB
1814 if (get_iscsi_dcb_state(ndev))
1815 priority = get_iscsi_dcb_priority(ndev);
1816
1817 csk->dcb_priority = priority;
1818 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1819#else
1820 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1821#endif
1822 if (!csk->l2t) {
1823 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1824 goto rel_resource_without_clip;
1825 }
1826 cxgbi_sock_get(csk);
1827
1828#if IS_ENABLED(CONFIG_IPV6)
1829 if (csk->csk_family == AF_INET6)
1830 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1831#endif
1832
1833 if (is_t4(lldi->adapter_type)) {
1834 size = sizeof(struct cpl_act_open_req);
1835 size6 = sizeof(struct cpl_act_open_req6);
1836 } else if (is_t5(lldi->adapter_type)) {
1837 size = sizeof(struct cpl_t5_act_open_req);
1838 size6 = sizeof(struct cpl_t5_act_open_req6);
1839 } else {
1840 size = sizeof(struct cpl_t6_act_open_req);
1841 size6 = sizeof(struct cpl_t6_act_open_req6);
1842 }
1843
1844 if (csk->csk_family == AF_INET)
1845 skb = alloc_wr(size, 0, GFP_NOIO);
1846#if IS_ENABLED(CONFIG_IPV6)
1847 else
1848 skb = alloc_wr(size6, 0, GFP_NOIO);
1849#endif
1850
1851 if (!skb)
1852 goto rel_resource;
1853 skb->sk = (struct sock *)csk;
1854 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1855
1856 if (!csk->mtu)
1857 csk->mtu = dst_mtu(csk->dst);
1858 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1859 csk->tx_chan = cxgb4_port_chan(ndev);
1860 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1861 step = lldi->ntxq / lldi->nchan;
1862 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1863 step = lldi->nrxq / lldi->nchan;
1864 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1865 cdev->rxq_idx_cntr++;
1866 csk->rss_qid = lldi->rxq_ids[rxq_idx];
1867 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1868 csk->snd_win = cxgb4i_snd_win;
1869 csk->rcv_win = cxgb4i_rcv_win;
1870 if (cxgb4i_rcv_win <= 0) {
1871 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1872 rcv_winf = linkspeed / SPEED_10000;
1873 if (rcv_winf)
1874 csk->rcv_win *= rcv_winf;
1875 }
1876 if (cxgb4i_snd_win <= 0) {
1877 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1878 snd_winf = linkspeed / SPEED_10000;
1879 if (snd_winf)
1880 csk->snd_win *= snd_winf;
1881 }
1882 csk->wr_cred = lldi->wr_cred -
1883 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1884 csk->wr_max_cred = csk->wr_cred;
1885 csk->wr_una_cred = 0;
1886 cxgbi_sock_reset_wr_list(csk);
1887 csk->err = 0;
1888
1889 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1890 (&csk->saddr), (&csk->daddr), csk, csk->state,
1891 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1892 csk->mtu, csk->mss_idx, csk->smac_idx);
1893
1894
1895 if (!try_module_get(cdev->owner)) {
1896 pr_err("%s, try_module_get failed.\n", ndev->name);
1897 goto rel_resource;
1898 }
1899
1900 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1901 if (csk->csk_family == AF_INET)
1902 send_act_open_req(csk, skb, csk->l2t);
1903#if IS_ENABLED(CONFIG_IPV6)
1904 else
1905 send_act_open_req6(csk, skb, csk->l2t);
1906#endif
1907 neigh_release(n);
1908
1909 return 0;
1910
1911rel_resource:
1912#if IS_ENABLED(CONFIG_IPV6)
1913 if (csk->csk_family == AF_INET6)
1914 cxgb4_clip_release(ndev,
1915 (const u32 *)&csk->saddr6.sin6_addr, 1);
1916#endif
1917rel_resource_without_clip:
1918 if (n)
1919 neigh_release(n);
1920 if (skb)
1921 __kfree_skb(skb);
1922 return -EINVAL;
1923}
1924
1925static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1926 [CPL_ACT_ESTABLISH] = do_act_establish,
1927 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1928 [CPL_PEER_CLOSE] = do_peer_close,
1929 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1930 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1931 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1932 [CPL_FW4_ACK] = do_fw4_ack,
1933 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1934 [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1935 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1936 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1937 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1938 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1939 [CPL_RX_DATA] = do_rx_data,
1940};
1941
1942static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1943{
1944 int rc;
1945
1946 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1947 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1948
1949 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1950 cxgb4i_max_connect);
1951 if (rc < 0)
1952 return rc;
1953
1954 cdev->csk_release_offload_resources = release_offload_resources;
1955 cdev->csk_push_tx_frames = push_tx_frames;
1956 cdev->csk_send_abort_req = send_abort_req;
1957 cdev->csk_send_close_req = send_close_req;
1958 cdev->csk_send_rx_credits = send_rx_credits;
1959 cdev->csk_alloc_cpls = alloc_cpls;
1960 cdev->csk_init_act_open = init_act_open;
1961
1962 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1963 return 0;
1964}
1965
1966static inline void
1967ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1968 struct ulp_mem_io *req,
1969 unsigned int wr_len, unsigned int dlen,
1970 unsigned int pm_addr,
1971 int tid)
1972{
1973 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1974 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1975
1976 INIT_ULPTX_WR(req, wr_len, 0, tid);
1977 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
1978 FW_WR_ATOMIC_V(0));
1979 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1980 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
1981 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
1982 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1983 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1984 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1985
1986 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1987 idata->len = htonl(dlen);
1988}
1989
1990static struct sk_buff *
1991ddp_ppod_init_idata(struct cxgbi_device *cdev,
1992 struct cxgbi_ppm *ppm,
1993 unsigned int idx, unsigned int npods,
1994 unsigned int tid)
1995{
1996 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1997 unsigned int dlen = npods << PPOD_SIZE_SHIFT;
1998 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1999 sizeof(struct ulptx_idata) + dlen, 16);
2000 struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
2001
2002 if (!skb) {
2003 pr_err("%s: %s idx %u, npods %u, OOM.\n",
2004 __func__, ppm->ndev->name, idx, npods);
2005 return NULL;
2006 }
2007
2008 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
2009 pm_addr, tid);
2010
2011 return skb;
2012}
2013
2014static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2015 struct cxgbi_task_tag_info *ttinfo,
2016 unsigned int idx, unsigned int npods,
2017 struct scatterlist **sg_pp,
2018 unsigned int *sg_off)
2019{
2020 struct cxgbi_device *cdev = csk->cdev;
2021 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
2022 csk->tid);
2023 struct ulp_mem_io *req;
2024 struct ulptx_idata *idata;
2025 struct cxgbi_pagepod *ppod;
2026 int i;
2027
2028 if (!skb)
2029 return -ENOMEM;
2030
2031 req = (struct ulp_mem_io *)skb->head;
2032 idata = (struct ulptx_idata *)(req + 1);
2033 ppod = (struct cxgbi_pagepod *)(idata + 1);
2034
2035 for (i = 0; i < npods; i++, ppod++)
2036 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
2037
2038 cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
2039 cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
2040 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
2041
2042 spin_lock_bh(&csk->lock);
2043 cxgbi_sock_skb_entail(csk, skb);
2044 spin_unlock_bh(&csk->lock);
2045
2046 return 0;
2047}
2048
2049static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2050 struct cxgbi_task_tag_info *ttinfo)
2051{
2052 unsigned int pidx = ttinfo->idx;
2053 unsigned int npods = ttinfo->npods;
2054 unsigned int i, cnt;
2055 int err = 0;
2056 struct scatterlist *sg = ttinfo->sgl;
2057 unsigned int offset = 0;
2058
2059 ttinfo->cid = csk->port_id;
2060
2061 for (i = 0; i < npods; i += cnt, pidx += cnt) {
2062 cnt = npods - i;
2063
2064 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
2065 cnt = ULPMEM_IDATA_MAX_NPPODS;
2066 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
2067 &sg, &offset);
2068 if (err < 0)
2069 break;
2070 }
2071
2072 return err;
2073}
2074
2075static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2076 int pg_idx)
2077{
2078 struct sk_buff *skb;
2079 struct cpl_set_tcb_field *req;
2080
2081 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
2082 return 0;
2083
2084 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2085 if (!skb)
2086 return -ENOMEM;
2087
2088
2089 req = (struct cpl_set_tcb_field *)skb->head;
2090 INIT_TP_WR(req, csk->tid);
2091 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2092 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2093 req->word_cookie = htons(0);
2094 req->mask = cpu_to_be64(0x3 << 8);
2095 req->val = cpu_to_be64(pg_idx << 8);
2096 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2097
2098 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2099 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2100
2101 reinit_completion(&csk->cmpl);
2102 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2103 wait_for_completion(&csk->cmpl);
2104
2105 return csk->err;
2106}
2107
2108static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2109 int hcrc, int dcrc)
2110{
2111 struct sk_buff *skb;
2112 struct cpl_set_tcb_field *req;
2113
2114 if (!hcrc && !dcrc)
2115 return 0;
2116
2117 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2118 if (!skb)
2119 return -ENOMEM;
2120
2121 csk->hcrc_len = (hcrc ? 4 : 0);
2122 csk->dcrc_len = (dcrc ? 4 : 0);
2123
2124 req = (struct cpl_set_tcb_field *)skb->head;
2125 INIT_TP_WR(req, tid);
2126 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2127 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2128 req->word_cookie = htons(0);
2129 req->mask = cpu_to_be64(0x3 << 4);
2130 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2131 (dcrc ? ULP_CRC_DATA : 0)) << 4);
2132 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2133
2134 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2135 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2136
2137 reinit_completion(&csk->cmpl);
2138 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2139 wait_for_completion(&csk->cmpl);
2140
2141 return csk->err;
2142}
2143
2144static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2145{
2146 return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
2147 (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
2148}
2149
2150static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
2151{
2152 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
2153 struct net_device *ndev = cdev->ports[0];
2154 struct cxgbi_tag_format tformat;
2155 int i, err;
2156
2157 if (!lldi->vr->iscsi.size) {
2158 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
2159 return -EACCES;
2160 }
2161
2162 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
2163
2164 memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
2165 for (i = 0; i < 4; i++)
2166 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
2167 & 0xF;
2168 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
2169
2170 pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
2171 lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
2172
2173 err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
2174 lldi->vr->iscsi.size, lldi->iscsi_llimit,
2175 lldi->vr->iscsi.start, 2,
2176 lldi->vr->ppod_edram.start,
2177 lldi->vr->ppod_edram.size);
2178
2179 if (err < 0)
2180 return err;
2181
2182 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2183 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2184 cdev->csk_ddp_set_map = ddp_set_map;
2185 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2186 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2187 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2188 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2189 cdev->cdev2ppm = cdev2ppm;
2190
2191 return 0;
2192}
2193
2194static bool is_memfree(struct adapter *adap)
2195{
2196 u32 io;
2197
2198 io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
2199 if (is_t5(adap->params.chip)) {
2200 if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F))
2201 return false;
2202 } else if (io & EXT_MEM_ENABLE_F) {
2203 return false;
2204 }
2205
2206 return true;
2207}
2208
2209static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
2210{
2211 struct cxgbi_device *cdev;
2212 struct port_info *pi;
2213 struct net_device *ndev;
2214 struct adapter *adap;
2215 struct tid_info *t;
2216 u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH;
2217 u32 max_conn = CXGBI_MAX_CONN;
2218 int i, rc;
2219
2220 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2221 if (!cdev) {
2222 pr_info("t4 device 0x%p, register failed.\n", lldi);
2223 return NULL;
2224 }
2225 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2226 cdev, lldi->adapter_type, lldi->nports,
2227 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
2228 lldi->nrxq, lldi->wr_cred);
2229 for (i = 0; i < lldi->nrxq; i++)
2230 log_debug(1 << CXGBI_DBG_DEV,
2231 "t4 0x%p, rxq id #%d: %u.\n",
2232 cdev, i, lldi->rxq_ids[i]);
2233
2234 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2235 cdev->flags = CXGBI_FLAG_DEV_T4;
2236 cdev->pdev = lldi->pdev;
2237 cdev->ports = lldi->ports;
2238 cdev->nports = lldi->nports;
2239 cdev->mtus = lldi->mtus;
2240 cdev->nmtus = NMTUS;
2241 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2242 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
2243 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2244 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2245 cdev->itp = &cxgb4i_iscsi_transport;
2246 cdev->owner = THIS_MODULE;
2247
2248 cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
2249 pr_info("cdev 0x%p,%s, pfvf %u.\n",
2250 cdev, lldi->ports[0]->name, cdev->pfvf);
2251
2252 rc = cxgb4i_ddp_init(cdev);
2253 if (rc) {
2254 pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
2255 goto err_out;
2256 }
2257
2258 ndev = cdev->ports[0];
2259 adap = netdev2adap(ndev);
2260 if (adap) {
2261 t = &adap->tids;
2262 if (t->ntids <= CXGBI_MAX_CONN)
2263 max_conn = t->ntids;
2264
2265 if (is_memfree(adap)) {
2266 cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF;
2267 max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2;
2268
2269 pr_info("%s: 0x%p, tid %u, SO adapter.\n",
2270 ndev->name, cdev, t->ntids);
2271 }
2272 } else {
2273 pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
2274 }
2275
2276
2277 if (!is_t4(lldi->adapter_type) &&
2278 (lldi->fw_vers >= 0x10d2b00) &&
2279 !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
2280 cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
2281
2282 rc = cxgb4i_ofld_init(cdev);
2283 if (rc) {
2284 pr_info("t4 0x%p ofld init failed.\n", cdev);
2285 goto err_out;
2286 }
2287
2288 cxgb4i_host_template.can_queue = max_cmds;
2289 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
2290 &cxgb4i_host_template, cxgb4i_stt);
2291 if (rc)
2292 goto err_out;
2293
2294 for (i = 0; i < cdev->nports; i++) {
2295 pi = netdev_priv(lldi->ports[i]);
2296 cdev->hbas[i]->port_id = pi->port_id;
2297 }
2298 return cdev;
2299
2300err_out:
2301 cxgbi_device_unregister(cdev);
2302 return ERR_PTR(-ENOMEM);
2303}
2304
2305#define RX_PULL_LEN 128
2306static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
2307 const struct pkt_gl *pgl)
2308{
2309 const struct cpl_act_establish *rpl;
2310 struct sk_buff *skb;
2311 unsigned int opc;
2312 struct cxgbi_device *cdev = handle;
2313
2314 if (pgl == NULL) {
2315 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
2316
2317 skb = alloc_wr(len, 0, GFP_ATOMIC);
2318 if (!skb)
2319 goto nomem;
2320 skb_copy_to_linear_data(skb, &rsp[1], len);
2321 } else {
2322 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
2323 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2324 pgl->va, be64_to_cpu(*rsp),
2325 be64_to_cpu(*(u64 *)pgl->va),
2326 pgl->tot_len);
2327 return 0;
2328 }
2329 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
2330 if (unlikely(!skb))
2331 goto nomem;
2332 }
2333
2334 rpl = (struct cpl_act_establish *)skb->data;
2335 opc = rpl->ot.opcode;
2336 log_debug(1 << CXGBI_DBG_TOE,
2337 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2338 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2339 if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
2340 pr_err("No handler for opcode 0x%x.\n", opc);
2341 __kfree_skb(skb);
2342 } else
2343 cxgb4i_cplhandlers[opc](cdev, skb);
2344
2345 return 0;
2346nomem:
2347 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
2348 return 1;
2349}
2350
2351static int t4_uld_state_change(void *handle, enum cxgb4_state state)
2352{
2353 struct cxgbi_device *cdev = handle;
2354
2355 switch (state) {
2356 case CXGB4_STATE_UP:
2357 pr_info("cdev 0x%p, UP.\n", cdev);
2358 break;
2359 case CXGB4_STATE_START_RECOVERY:
2360 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2361
2362 break;
2363 case CXGB4_STATE_DOWN:
2364 pr_info("cdev 0x%p, DOWN.\n", cdev);
2365 break;
2366 case CXGB4_STATE_DETACH:
2367 pr_info("cdev 0x%p, DETACH.\n", cdev);
2368 cxgbi_device_unregister(cdev);
2369 break;
2370 default:
2371 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2372 break;
2373 }
2374 return 0;
2375}
2376
2377#ifdef CONFIG_CHELSIO_T4_DCB
2378static int
2379cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
2380 void *data)
2381{
2382 int i, port = 0xFF;
2383 struct net_device *ndev;
2384 struct cxgbi_device *cdev = NULL;
2385 struct dcb_app_type *iscsi_app = data;
2386 struct cxgbi_ports_map *pmap;
2387 u8 priority;
2388
2389 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
2390 if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
2391 (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
2392 return NOTIFY_DONE;
2393
2394 priority = iscsi_app->app.priority;
2395 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
2396 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
2397 return NOTIFY_DONE;
2398
2399 if (!iscsi_app->app.priority)
2400 return NOTIFY_DONE;
2401
2402 priority = ffs(iscsi_app->app.priority) - 1;
2403 } else {
2404 return NOTIFY_DONE;
2405 }
2406
2407 if (iscsi_app->app.protocol != 3260)
2408 return NOTIFY_DONE;
2409
2410 log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
2411 iscsi_app->ifindex, priority);
2412
2413 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
2414 if (!ndev)
2415 return NOTIFY_DONE;
2416
2417 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2418
2419 dev_put(ndev);
2420 if (!cdev)
2421 return NOTIFY_DONE;
2422
2423 pmap = &cdev->pmap;
2424
2425 for (i = 0; i < pmap->used; i++) {
2426 if (pmap->port_csk[i]) {
2427 struct cxgbi_sock *csk = pmap->port_csk[i];
2428
2429 if (csk->dcb_priority != priority) {
2430 iscsi_conn_failure(csk->user_data,
2431 ISCSI_ERR_CONN_FAILED);
2432 pr_info("Restarting iSCSI connection %p with "
2433 "priority %u->%u.\n", csk,
2434 csk->dcb_priority, priority);
2435 }
2436 }
2437 }
2438 return NOTIFY_OK;
2439}
2440#endif
2441
2442static int __init cxgb4i_init_module(void)
2443{
2444 int rc;
2445
2446 printk(KERN_INFO "%s", version);
2447
2448 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2449 if (rc < 0)
2450 return rc;
2451 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
2452
2453#ifdef CONFIG_CHELSIO_T4_DCB
2454 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
2455 register_dcbevent_notifier(&cxgb4_dcb_change);
2456#endif
2457 return 0;
2458}
2459
2460static void __exit cxgb4i_exit_module(void)
2461{
2462#ifdef CONFIG_CHELSIO_T4_DCB
2463 unregister_dcbevent_notifier(&cxgb4_dcb_change);
2464#endif
2465 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
2466 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
2467 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2468}
2469
2470module_init(cxgb4i_init_module);
2471module_exit(cxgb4i_exit_module);
2472