1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include "qedi_hsi.h"
12#include <linux/qed/qed_if.h>
13
14#include "qedi_fw_iscsi.h"
15#include "qedi_fw_scsi.h"
16
17#define SCSI_NUM_SGES_IN_CACHE 0x4
18
19static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
20{
21 return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
22}
23
24static
25void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
26 struct scsi_cached_sges *ctx_data_desc,
27 struct scsi_sgl_task_params *sgl_task_params)
28{
29 u8 sge_index;
30 u8 num_sges;
31 u32 val;
32
33 num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
34 SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
35
36
37 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
38 ctx_sgl_params->sgl_addr.lo = val;
39 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
40 ctx_sgl_params->sgl_addr.hi = val;
41 val = cpu_to_le32(sgl_task_params->total_buffer_size);
42 ctx_sgl_params->sgl_total_length = val;
43 ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
44
45 for (sge_index = 0; sge_index < num_sges; sge_index++) {
46 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
47 ctx_data_desc->sge[sge_index].sge_addr.lo = val;
48 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
49 ctx_data_desc->sge[sge_index].sge_addr.hi = val;
50 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
51 ctx_data_desc->sge[sge_index].sge_len = val;
52 }
53}
54
55static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
56 enum iscsi_task_type task_type,
57 struct scsi_sgl_task_params *sgl_task_params,
58 struct scsi_dif_task_params *dif_task_params)
59{
60 u32 io_size;
61
62 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
63 task_type == ISCSI_TASK_TYPE_TARGET_READ)
64 io_size = task_params->tx_io_size;
65 else
66 io_size = task_params->rx_io_size;
67
68 if (!io_size)
69 return 0;
70
71 if (!dif_task_params)
72 return io_size;
73
74 return !dif_task_params->dif_on_network ?
75 io_size : sgl_task_params->total_buffer_size;
76}
77
78static void
79init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
80 struct scsi_dif_task_params *dif_task_params)
81{
82 if (!dif_task_params)
83 return;
84
85 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
86 dif_task_params->dif_block_size_log);
87 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
88 dif_task_params->dif_on_network ? 1 : 0);
89 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
90 dif_task_params->dif_on_host ? 1 : 0);
91}
92
93static void init_sqe(struct iscsi_task_params *task_params,
94 struct scsi_sgl_task_params *sgl_task_params,
95 struct scsi_dif_task_params *dif_task_params,
96 struct iscsi_common_hdr *pdu_header,
97 struct scsi_initiator_cmd_params *cmd_params,
98 enum iscsi_task_type task_type,
99 bool is_cleanup)
100{
101 if (!task_params->sqe)
102 return;
103
104 memset(task_params->sqe, 0, sizeof(*task_params->sqe));
105 task_params->sqe->task_id = cpu_to_le16(task_params->itid);
106 if (is_cleanup) {
107 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
108 ISCSI_WQE_TYPE_TASK_CLEANUP);
109 return;
110 }
111
112 switch (task_type) {
113 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
114 {
115 u32 buf_size = 0;
116 u32 num_sges = 0;
117
118 init_dif_context_flags(&task_params->sqe->prot_flags,
119 dif_task_params);
120
121 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
122 ISCSI_WQE_TYPE_NORMAL);
123
124 if (task_params->tx_io_size) {
125 buf_size = calc_rw_task_size(task_params, task_type,
126 sgl_task_params,
127 dif_task_params);
128
129 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
130 sgl_task_params->small_mid_sge))
131 num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
132 else
133 num_sges = min(sgl_task_params->num_sges,
134 (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
135 }
136
137 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
138 num_sges);
139 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
140 buf_size);
141
142 if (GET_FIELD(pdu_header->hdr_second_dword,
143 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
144 SET_FIELD(task_params->sqe->contlen_cdbsize,
145 ISCSI_WQE_CDB_SIZE,
146 cmd_params->extended_cdb_sge.sge_len);
147 }
148 break;
149 case ISCSI_TASK_TYPE_INITIATOR_READ:
150 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
151 ISCSI_WQE_TYPE_NORMAL);
152
153 if (GET_FIELD(pdu_header->hdr_second_dword,
154 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
155 SET_FIELD(task_params->sqe->contlen_cdbsize,
156 ISCSI_WQE_CDB_SIZE,
157 cmd_params->extended_cdb_sge.sge_len);
158 break;
159 case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
160 case ISCSI_TASK_TYPE_MIDPATH:
161 {
162 bool advance_statsn = true;
163
164 if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
165 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
166 ISCSI_WQE_TYPE_LOGIN);
167 else
168 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
169 ISCSI_WQE_TYPE_MIDDLE_PATH);
170
171 if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
172 u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
173 ISCSI_COMMON_HDR_OPCODE);
174
175 if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
176 (opcode != ISCSI_OPCODE_NOP_IN ||
177 pdu_header->itt == ISCSI_TTT_ALL_ONES))
178 advance_statsn = false;
179 }
180
181 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
182 advance_statsn ? 1 : 0);
183
184 if (task_params->tx_io_size) {
185 SET_FIELD(task_params->sqe->contlen_cdbsize,
186 ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
187
188 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
189 sgl_task_params->small_mid_sge))
190 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
191 ISCSI_WQE_NUM_SGES_SLOWIO);
192 else
193 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
194 min(sgl_task_params->num_sges,
195 (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
196 }
197 }
198 break;
199 default:
200 break;
201 }
202}
203
204static void init_default_iscsi_task(struct iscsi_task_params *task_params,
205 struct data_hdr *pdu_header,
206 enum iscsi_task_type task_type)
207{
208 struct e4_iscsi_task_context *context;
209 u32 val;
210 u16 index;
211 u8 val_byte;
212
213 context = task_params->context;
214 val_byte = context->mstorm_ag_context.cdu_validation;
215 memset(context, 0, sizeof(*context));
216 context->mstorm_ag_context.cdu_validation = val_byte;
217
218 for (index = 0; index <
219 ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
220 index++) {
221 val = cpu_to_le32(pdu_header->data[index]);
222 context->ystorm_st_context.pdu_hdr.data.data[index] = val;
223 }
224
225 context->mstorm_st_context.task_type = task_type;
226 context->mstorm_ag_context.task_cid =
227 cpu_to_le16(task_params->conn_icid);
228
229 SET_FIELD(context->ustorm_ag_context.flags1,
230 E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
231
232 context->ustorm_st_context.task_type = task_type;
233 context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
234 context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
235}
236
237static
238void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
239 struct scsi_initiator_cmd_params *cmd)
240{
241 union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
242 u32 val;
243
244 if (!cmd->extended_cdb_sge.sge_len)
245 return;
246
247 SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
248 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
249 cmd->extended_cdb_sge.sge_len);
250 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
251 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
252 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
253 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
254 val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
255 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
256}
257
258static
259void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
260 struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
261 u32 remaining_recv_len, u32 expected_data_transfer_len,
262 u8 num_sges, bool tx_dif_conn_err_en)
263{
264 u32 val;
265
266 ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
267 ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
268 val = cpu_to_le32(expected_data_transfer_len);
269 ustorm_st_cxt->exp_data_transfer_len = val;
270 SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
271 SET_FIELD(ustorm_ag_cxt->flags2,
272 E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
273 tx_dif_conn_err_en ? 1 : 0);
274}
275
276static
277void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
278 struct iscsi_conn_params *conn_params,
279 enum iscsi_task_type task_type,
280 u32 task_size,
281 u32 exp_data_transfer_len,
282 u8 total_ahs_length)
283{
284 u32 max_unsolicited_data = 0, val;
285
286 if (total_ahs_length &&
287 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
288 task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
289 SET_FIELD(context->ustorm_st_context.flags2,
290 USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
291
292 switch (task_type) {
293 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
294 if (!conn_params->initial_r2t)
295 max_unsolicited_data = conn_params->first_burst_length;
296 else if (conn_params->immediate_data)
297 max_unsolicited_data =
298 min(conn_params->first_burst_length,
299 conn_params->max_send_pdu_length);
300
301 context->ustorm_ag_context.exp_data_acked =
302 cpu_to_le32(total_ahs_length == 0 ?
303 min(exp_data_transfer_len,
304 max_unsolicited_data) :
305 ((u32)(total_ahs_length +
306 ISCSI_AHS_CNTL_SIZE)));
307 break;
308 case ISCSI_TASK_TYPE_TARGET_READ:
309 val = cpu_to_le32(exp_data_transfer_len);
310 context->ustorm_ag_context.exp_data_acked = val;
311 break;
312 case ISCSI_TASK_TYPE_INITIATOR_READ:
313 context->ustorm_ag_context.exp_data_acked =
314 cpu_to_le32((total_ahs_length == 0 ? 0 :
315 total_ahs_length +
316 ISCSI_AHS_CNTL_SIZE));
317 break;
318 case ISCSI_TASK_TYPE_TARGET_WRITE:
319 val = cpu_to_le32(task_size);
320 context->ustorm_ag_context.exp_cont_len = val;
321 break;
322 default:
323 break;
324 }
325}
326
327static
328void init_rtdif_task_context(struct rdif_task_context *rdif_context,
329 struct tdif_task_context *tdif_context,
330 struct scsi_dif_task_params *dif_task_params,
331 enum iscsi_task_type task_type)
332{
333 u32 val;
334
335 if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
336 return;
337
338 if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
339 task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
340 rdif_context->app_tag_value =
341 cpu_to_le16(dif_task_params->application_tag);
342 rdif_context->partial_crc_value = cpu_to_le16(0xffff);
343 val = cpu_to_le32(dif_task_params->initial_ref_tag);
344 rdif_context->initial_ref_tag = val;
345 rdif_context->app_tag_mask =
346 cpu_to_le16(dif_task_params->application_tag_mask);
347 SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
348 dif_task_params->crc_seed ? 1 : 0);
349 SET_FIELD(rdif_context->flags0,
350 RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
351 dif_task_params->host_guard_type);
352 SET_FIELD(rdif_context->flags0,
353 RDIF_TASK_CONTEXT_PROTECTION_TYPE,
354 dif_task_params->protection_type);
355 SET_FIELD(rdif_context->flags0,
356 RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
357 SET_FIELD(rdif_context->flags0,
358 RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
359 dif_task_params->keep_ref_tag_const ? 1 : 0);
360 SET_FIELD(rdif_context->flags1,
361 RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
362 (dif_task_params->validate_app_tag &&
363 dif_task_params->dif_on_network) ? 1 : 0);
364 SET_FIELD(rdif_context->flags1,
365 RDIF_TASK_CONTEXT_VALIDATE_GUARD,
366 (dif_task_params->validate_guard &&
367 dif_task_params->dif_on_network) ? 1 : 0);
368 SET_FIELD(rdif_context->flags1,
369 RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
370 (dif_task_params->validate_ref_tag &&
371 dif_task_params->dif_on_network) ? 1 : 0);
372 SET_FIELD(rdif_context->flags1,
373 RDIF_TASK_CONTEXT_HOST_INTERFACE,
374 dif_task_params->dif_on_host ? 1 : 0);
375 SET_FIELD(rdif_context->flags1,
376 RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
377 dif_task_params->dif_on_network ? 1 : 0);
378 SET_FIELD(rdif_context->flags1,
379 RDIF_TASK_CONTEXT_FORWARD_GUARD,
380 dif_task_params->forward_guard ? 1 : 0);
381 SET_FIELD(rdif_context->flags1,
382 RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
383 dif_task_params->forward_app_tag ? 1 : 0);
384 SET_FIELD(rdif_context->flags1,
385 RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
386 dif_task_params->forward_ref_tag ? 1 : 0);
387 SET_FIELD(rdif_context->flags1,
388 RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
389 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
390 SET_FIELD(rdif_context->flags1,
391 RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
392 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
393 SET_FIELD(rdif_context->flags1,
394 RDIF_TASK_CONTEXT_INTERVAL_SIZE,
395 dif_task_params->dif_block_size_log - 9);
396 SET_FIELD(rdif_context->state,
397 RDIF_TASK_CONTEXT_REF_TAG_MASK,
398 dif_task_params->ref_tag_mask);
399 SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
400 dif_task_params->ignore_app_tag);
401 }
402
403 if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
404 task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
405 tdif_context->app_tag_value =
406 cpu_to_le16(dif_task_params->application_tag);
407 tdif_context->partial_crc_value_b =
408 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
409 tdif_context->partial_crc_value_a =
410 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
411 SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
412 dif_task_params->crc_seed ? 1 : 0);
413
414 SET_FIELD(tdif_context->flags0,
415 TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
416 dif_task_params->tx_dif_conn_err_en ? 1 : 0);
417 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
418 dif_task_params->forward_guard ? 1 : 0);
419 SET_FIELD(tdif_context->flags1,
420 TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
421 dif_task_params->forward_app_tag ? 1 : 0);
422 SET_FIELD(tdif_context->flags1,
423 TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
424 dif_task_params->forward_ref_tag ? 1 : 0);
425 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
426 dif_task_params->dif_block_size_log - 9);
427 SET_FIELD(tdif_context->flags1,
428 TDIF_TASK_CONTEXT_HOST_INTERFACE,
429 dif_task_params->dif_on_host ? 1 : 0);
430 SET_FIELD(tdif_context->flags1,
431 TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
432 dif_task_params->dif_on_network ? 1 : 0);
433 val = cpu_to_le32(dif_task_params->initial_ref_tag);
434 tdif_context->initial_ref_tag = val;
435 tdif_context->app_tag_mask =
436 cpu_to_le16(dif_task_params->application_tag_mask);
437 SET_FIELD(tdif_context->flags0,
438 TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
439 dif_task_params->host_guard_type);
440 SET_FIELD(tdif_context->flags0,
441 TDIF_TASK_CONTEXT_PROTECTION_TYPE,
442 dif_task_params->protection_type);
443 SET_FIELD(tdif_context->flags0,
444 TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
445 dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
446 SET_FIELD(tdif_context->flags0,
447 TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
448 dif_task_params->keep_ref_tag_const ? 1 : 0);
449 SET_FIELD(tdif_context->flags1,
450 TDIF_TASK_CONTEXT_VALIDATE_GUARD,
451 (dif_task_params->validate_guard &&
452 dif_task_params->dif_on_host) ? 1 : 0);
453 SET_FIELD(tdif_context->flags1,
454 TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
455 (dif_task_params->validate_app_tag &&
456 dif_task_params->dif_on_host) ? 1 : 0);
457 SET_FIELD(tdif_context->flags1,
458 TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
459 (dif_task_params->validate_ref_tag &&
460 dif_task_params->dif_on_host) ? 1 : 0);
461 SET_FIELD(tdif_context->flags1,
462 TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
463 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
464 SET_FIELD(tdif_context->flags1,
465 TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
466 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
467 SET_FIELD(tdif_context->flags1,
468 TDIF_TASK_CONTEXT_REF_TAG_MASK,
469 dif_task_params->ref_tag_mask);
470 SET_FIELD(tdif_context->flags0,
471 TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
472 dif_task_params->ignore_app_tag ? 1 : 0);
473 }
474}
475
476static void set_local_completion_context(struct e4_iscsi_task_context *context)
477{
478 SET_FIELD(context->ystorm_st_context.state.flags,
479 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
480 SET_FIELD(context->ustorm_st_context.flags,
481 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
482}
483
484static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
485 enum iscsi_task_type task_type,
486 struct iscsi_conn_params *conn_params,
487 struct iscsi_common_hdr *pdu_header,
488 struct scsi_sgl_task_params *sgl_task_params,
489 struct scsi_initiator_cmd_params *cmd_params,
490 struct scsi_dif_task_params *dif_task_params)
491{
492 u32 exp_data_transfer_len = conn_params->max_burst_length;
493 struct e4_iscsi_task_context *cxt;
494 bool slow_io = false;
495 u32 task_size, val;
496 u8 num_sges = 0;
497
498 task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
499 dif_task_params);
500
501 init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
502 task_type);
503
504 cxt = task_params->context;
505
506
507 if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
508 set_local_completion_context(cxt);
509 } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
510 val = cpu_to_le32(task_size +
511 ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
512 cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
513 cxt->mstorm_st_context.expected_itt =
514 cpu_to_le32(pdu_header->itt);
515 } else {
516 val = cpu_to_le32(task_size);
517 cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
518 val;
519 init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
520 cmd_params);
521 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
522 cxt->mstorm_st_context.sense_db.lo = val;
523
524 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
525 cxt->mstorm_st_context.sense_db.hi = val;
526 }
527
528 if (task_params->tx_io_size) {
529 init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
530 dif_task_params);
531 init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
532 dif_task_params);
533 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
534 &cxt->ystorm_st_context.state.data_desc,
535 sgl_task_params);
536
537 slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
538 sgl_task_params->small_mid_sge);
539
540 num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
541 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
542 ISCSI_WQE_NUM_SGES_SLOWIO;
543
544 if (slow_io) {
545 SET_FIELD(cxt->ystorm_st_context.state.flags,
546 YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
547 }
548 } else if (task_params->rx_io_size) {
549 init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
550 dif_task_params);
551 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
552 &cxt->mstorm_st_context.data_desc,
553 sgl_task_params);
554 num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
555 sgl_task_params->small_mid_sge) ?
556 min_t(u16, sgl_task_params->num_sges,
557 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
558 ISCSI_WQE_NUM_SGES_SLOWIO;
559 cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
560 }
561
562 if (exp_data_transfer_len > task_size ||
563 task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
564 exp_data_transfer_len = task_size;
565
566 init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
567 &task_params->context->ustorm_ag_context,
568 task_size, exp_data_transfer_len, num_sges,
569 dif_task_params ?
570 dif_task_params->tx_dif_conn_err_en : false);
571
572 set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
573 task_type, task_size,
574 exp_data_transfer_len,
575 GET_FIELD(pdu_header->hdr_second_dword,
576 ISCSI_CMD_HDR_TOTAL_AHS_LEN));
577
578 if (dif_task_params)
579 init_rtdif_task_context(&task_params->context->rdif_context,
580 &task_params->context->tdif_context,
581 dif_task_params, task_type);
582
583 init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
584 cmd_params, task_type, false);
585
586 return 0;
587}
588
589int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
590 struct iscsi_conn_params *conn_params,
591 struct scsi_initiator_cmd_params *cmd_params,
592 struct iscsi_cmd_hdr *cmd_header,
593 struct scsi_sgl_task_params *tx_sgl_params,
594 struct scsi_sgl_task_params *rx_sgl_params,
595 struct scsi_dif_task_params *dif_task_params)
596{
597 if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
598 return init_rw_iscsi_task(task_params,
599 ISCSI_TASK_TYPE_INITIATOR_WRITE,
600 conn_params,
601 (struct iscsi_common_hdr *)cmd_header,
602 tx_sgl_params, cmd_params,
603 dif_task_params);
604 else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
605 (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
606 return init_rw_iscsi_task(task_params,
607 ISCSI_TASK_TYPE_INITIATOR_READ,
608 conn_params,
609 (struct iscsi_common_hdr *)cmd_header,
610 rx_sgl_params, cmd_params,
611 dif_task_params);
612 else
613 return -1;
614}
615
616int init_initiator_login_request_task(struct iscsi_task_params *task_params,
617 struct iscsi_login_req_hdr *login_header,
618 struct scsi_sgl_task_params *tx_params,
619 struct scsi_sgl_task_params *rx_params)
620{
621 struct e4_iscsi_task_context *cxt;
622
623 cxt = task_params->context;
624
625 init_default_iscsi_task(task_params,
626 (struct data_hdr *)login_header,
627 ISCSI_TASK_TYPE_MIDPATH);
628
629 init_ustorm_task_contexts(&cxt->ustorm_st_context,
630 &cxt->ustorm_ag_context,
631 task_params->rx_io_size ?
632 rx_params->total_buffer_size : 0,
633 task_params->tx_io_size ?
634 tx_params->total_buffer_size : 0, 0,
635 0);
636
637 if (task_params->tx_io_size)
638 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
639 &cxt->ystorm_st_context.state.data_desc,
640 tx_params);
641
642 if (task_params->rx_io_size)
643 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
644 &cxt->mstorm_st_context.data_desc,
645 rx_params);
646
647 cxt->mstorm_st_context.rem_task_size =
648 cpu_to_le32(task_params->rx_io_size ?
649 rx_params->total_buffer_size : 0);
650
651 init_sqe(task_params, tx_params, NULL,
652 (struct iscsi_common_hdr *)login_header, NULL,
653 ISCSI_TASK_TYPE_MIDPATH, false);
654
655 return 0;
656}
657
658int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
659 struct iscsi_nop_out_hdr *nop_out_pdu_header,
660 struct scsi_sgl_task_params *tx_sgl_task_params,
661 struct scsi_sgl_task_params *rx_sgl_task_params)
662{
663 struct e4_iscsi_task_context *cxt;
664
665 cxt = task_params->context;
666
667 init_default_iscsi_task(task_params,
668 (struct data_hdr *)nop_out_pdu_header,
669 ISCSI_TASK_TYPE_MIDPATH);
670
671 if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
672 set_local_completion_context(task_params->context);
673
674 if (task_params->tx_io_size)
675 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
676 &cxt->ystorm_st_context.state.data_desc,
677 tx_sgl_task_params);
678
679 if (task_params->rx_io_size)
680 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
681 &cxt->mstorm_st_context.data_desc,
682 rx_sgl_task_params);
683
684 init_ustorm_task_contexts(&cxt->ustorm_st_context,
685 &cxt->ustorm_ag_context,
686 task_params->rx_io_size ?
687 rx_sgl_task_params->total_buffer_size : 0,
688 task_params->tx_io_size ?
689 tx_sgl_task_params->total_buffer_size : 0,
690 0, 0);
691
692 cxt->mstorm_st_context.rem_task_size =
693 cpu_to_le32(task_params->rx_io_size ?
694 rx_sgl_task_params->total_buffer_size :
695 0);
696
697 init_sqe(task_params, tx_sgl_task_params, NULL,
698 (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
699 ISCSI_TASK_TYPE_MIDPATH, false);
700
701 return 0;
702}
703
704int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
705 struct iscsi_logout_req_hdr *logout_hdr,
706 struct scsi_sgl_task_params *tx_params,
707 struct scsi_sgl_task_params *rx_params)
708{
709 struct e4_iscsi_task_context *cxt;
710
711 cxt = task_params->context;
712
713 init_default_iscsi_task(task_params,
714 (struct data_hdr *)logout_hdr,
715 ISCSI_TASK_TYPE_MIDPATH);
716
717 if (task_params->tx_io_size)
718 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
719 &cxt->ystorm_st_context.state.data_desc,
720 tx_params);
721
722 if (task_params->rx_io_size)
723 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
724 &cxt->mstorm_st_context.data_desc,
725 rx_params);
726
727 init_ustorm_task_contexts(&cxt->ustorm_st_context,
728 &cxt->ustorm_ag_context,
729 task_params->rx_io_size ?
730 rx_params->total_buffer_size : 0,
731 task_params->tx_io_size ?
732 tx_params->total_buffer_size : 0,
733 0, 0);
734
735 cxt->mstorm_st_context.rem_task_size =
736 cpu_to_le32(task_params->rx_io_size ?
737 rx_params->total_buffer_size : 0);
738
739 init_sqe(task_params, tx_params, NULL,
740 (struct iscsi_common_hdr *)logout_hdr, NULL,
741 ISCSI_TASK_TYPE_MIDPATH, false);
742
743 return 0;
744}
745
746int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
747 struct iscsi_tmf_request_hdr *tmf_header)
748{
749 init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
750 ISCSI_TASK_TYPE_MIDPATH);
751
752 init_sqe(task_params, NULL, NULL,
753 (struct iscsi_common_hdr *)tmf_header, NULL,
754 ISCSI_TASK_TYPE_MIDPATH, false);
755
756 return 0;
757}
758
759int init_initiator_text_request_task(struct iscsi_task_params *task_params,
760 struct iscsi_text_request_hdr *text_header,
761 struct scsi_sgl_task_params *tx_params,
762 struct scsi_sgl_task_params *rx_params)
763{
764 struct e4_iscsi_task_context *cxt;
765
766 cxt = task_params->context;
767
768 init_default_iscsi_task(task_params,
769 (struct data_hdr *)text_header,
770 ISCSI_TASK_TYPE_MIDPATH);
771
772 if (task_params->tx_io_size)
773 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
774 &cxt->ystorm_st_context.state.data_desc,
775 tx_params);
776
777 if (task_params->rx_io_size)
778 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
779 &cxt->mstorm_st_context.data_desc,
780 rx_params);
781
782 cxt->mstorm_st_context.rem_task_size =
783 cpu_to_le32(task_params->rx_io_size ?
784 rx_params->total_buffer_size : 0);
785
786 init_ustorm_task_contexts(&cxt->ustorm_st_context,
787 &cxt->ustorm_ag_context,
788 task_params->rx_io_size ?
789 rx_params->total_buffer_size : 0,
790 task_params->tx_io_size ?
791 tx_params->total_buffer_size : 0, 0, 0);
792
793 init_sqe(task_params, tx_params, NULL,
794 (struct iscsi_common_hdr *)text_header, NULL,
795 ISCSI_TASK_TYPE_MIDPATH, false);
796
797 return 0;
798}
799
800int init_cleanup_task(struct iscsi_task_params *task_params)
801{
802 init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
803 true);
804 return 0;
805}
806