1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/list.h>
20#include <linux/percpu_ida.h>
21#include <net/ipv6.h>
22#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h>
25#include <target/target_core_fabric.h>
26#include <target/iscsi/iscsi_transport.h>
27
28#include <target/iscsi/iscsi_target_core.h>
29#include "iscsi_target_parameters.h"
30#include "iscsi_target_seq_pdu_list.h"
31#include "iscsi_target_datain_values.h"
32#include "iscsi_target_erl0.h"
33#include "iscsi_target_erl1.h"
34#include "iscsi_target_erl2.h"
35#include "iscsi_target_tpg.h"
36#include "iscsi_target_util.h"
37#include "iscsi_target.h"
38
39#define PRINT_BUFF(buff, len) \
40{ \
41 int zzz; \
42 \
43 pr_debug("%d:\n", __LINE__); \
44 for (zzz = 0; zzz < len; zzz++) { \
45 if (zzz % 16 == 0) { \
46 if (zzz) \
47 pr_debug("\n"); \
48 pr_debug("%4i: ", zzz); \
49 } \
50 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
51 } \
52 if ((len + 1) % 16) \
53 pr_debug("\n"); \
54}
55
56extern struct list_head g_tiqn_list;
57extern spinlock_t tiqn_lock;
58
59
60
61
62int iscsit_add_r2t_to_list(
63 struct iscsi_cmd *cmd,
64 u32 offset,
65 u32 xfer_len,
66 int recovery,
67 u32 r2t_sn)
68{
69 struct iscsi_r2t *r2t;
70
71 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
72 if (!r2t) {
73 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
74 return -1;
75 }
76 INIT_LIST_HEAD(&r2t->r2t_list);
77
78 r2t->recovery_r2t = recovery;
79 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
80 r2t->offset = offset;
81 r2t->xfer_len = xfer_len;
82 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
83 spin_unlock_bh(&cmd->r2t_lock);
84
85 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
86
87 spin_lock_bh(&cmd->r2t_lock);
88 return 0;
89}
90
91struct iscsi_r2t *iscsit_get_r2t_for_eos(
92 struct iscsi_cmd *cmd,
93 u32 offset,
94 u32 length)
95{
96 struct iscsi_r2t *r2t;
97
98 spin_lock_bh(&cmd->r2t_lock);
99 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
100 if ((r2t->offset <= offset) &&
101 (r2t->offset + r2t->xfer_len) >= (offset + length)) {
102 spin_unlock_bh(&cmd->r2t_lock);
103 return r2t;
104 }
105 }
106 spin_unlock_bh(&cmd->r2t_lock);
107
108 pr_err("Unable to locate R2T for Offset: %u, Length:"
109 " %u\n", offset, length);
110 return NULL;
111}
112
113struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
114{
115 struct iscsi_r2t *r2t;
116
117 spin_lock_bh(&cmd->r2t_lock);
118 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
119 if (!r2t->sent_r2t) {
120 spin_unlock_bh(&cmd->r2t_lock);
121 return r2t;
122 }
123 }
124 spin_unlock_bh(&cmd->r2t_lock);
125
126 pr_err("Unable to locate next R2T to send for ITT:"
127 " 0x%08x.\n", cmd->init_task_tag);
128 return NULL;
129}
130
131
132
133
134void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
135{
136 list_del(&r2t->r2t_list);
137 kmem_cache_free(lio_r2t_cache, r2t);
138}
139
140void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
141{
142 struct iscsi_r2t *r2t, *r2t_tmp;
143
144 spin_lock_bh(&cmd->r2t_lock);
145 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
146 iscsit_free_r2t(r2t, cmd);
147 spin_unlock_bh(&cmd->r2t_lock);
148}
149
150
151
152
153
154struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
155{
156 struct iscsi_cmd *cmd;
157 struct se_session *se_sess = conn->sess->se_sess;
158 int size, tag;
159
160 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
161 if (tag < 0)
162 return NULL;
163
164 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
165 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
166 memset(cmd, 0, size);
167
168 cmd->se_cmd.map_tag = tag;
169 cmd->conn = conn;
170 cmd->data_direction = DMA_NONE;
171 INIT_LIST_HEAD(&cmd->i_conn_node);
172 INIT_LIST_HEAD(&cmd->datain_list);
173 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
174 spin_lock_init(&cmd->datain_lock);
175 spin_lock_init(&cmd->dataout_timeout_lock);
176 spin_lock_init(&cmd->istate_lock);
177 spin_lock_init(&cmd->error_lock);
178 spin_lock_init(&cmd->r2t_lock);
179
180 return cmd;
181}
182EXPORT_SYMBOL(iscsit_allocate_cmd);
183
184struct iscsi_seq *iscsit_get_seq_holder_for_datain(
185 struct iscsi_cmd *cmd,
186 u32 seq_send_order)
187{
188 u32 i;
189
190 for (i = 0; i < cmd->seq_count; i++)
191 if (cmd->seq_list[i].seq_send_order == seq_send_order)
192 return &cmd->seq_list[i];
193
194 return NULL;
195}
196
197struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
198{
199 u32 i;
200
201 if (!cmd->seq_list) {
202 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
203 return NULL;
204 }
205
206 for (i = 0; i < cmd->seq_count; i++) {
207 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
208 continue;
209 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
210 cmd->seq_send_order++;
211 return &cmd->seq_list[i];
212 }
213 }
214
215 return NULL;
216}
217
218struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
219 struct iscsi_cmd *cmd,
220 u32 r2t_sn)
221{
222 struct iscsi_r2t *r2t;
223
224 spin_lock_bh(&cmd->r2t_lock);
225 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
226 if (r2t->r2t_sn == r2t_sn) {
227 spin_unlock_bh(&cmd->r2t_lock);
228 return r2t;
229 }
230 }
231 spin_unlock_bh(&cmd->r2t_lock);
232
233 return NULL;
234}
235
236static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
237{
238 u32 max_cmdsn;
239 int ret;
240
241
242
243
244
245
246
247 max_cmdsn = atomic_read(&sess->max_cmd_sn);
248 if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
249 pr_err("Received CmdSN: 0x%08x is greater than"
250 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
251 ret = CMDSN_MAXCMDSN_OVERRUN;
252
253 } else if (cmdsn == sess->exp_cmd_sn) {
254 sess->exp_cmd_sn++;
255 pr_debug("Received CmdSN matches ExpCmdSN,"
256 " incremented ExpCmdSN to: 0x%08x\n",
257 sess->exp_cmd_sn);
258 ret = CMDSN_NORMAL_OPERATION;
259
260 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
261 pr_debug("Received CmdSN: 0x%08x is greater"
262 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
263 cmdsn, sess->exp_cmd_sn);
264 ret = CMDSN_HIGHER_THAN_EXP;
265
266 } else {
267 pr_err("Received CmdSN: 0x%08x is less than"
268 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
269 sess->exp_cmd_sn);
270 ret = CMDSN_LOWER_THAN_EXP;
271 }
272
273 return ret;
274}
275
276
277
278
279
280int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
281 unsigned char *buf, __be32 cmdsn)
282{
283 int ret, cmdsn_ret;
284 bool reject = false;
285 u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
286
287 mutex_lock(&conn->sess->cmdsn_mutex);
288
289 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
290 switch (cmdsn_ret) {
291 case CMDSN_NORMAL_OPERATION:
292 ret = iscsit_execute_cmd(cmd, 0);
293 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
294 iscsit_execute_ooo_cmdsns(conn->sess);
295 else if (ret < 0) {
296 reject = true;
297 ret = CMDSN_ERROR_CANNOT_RECOVER;
298 }
299 break;
300 case CMDSN_HIGHER_THAN_EXP:
301 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
302 if (ret < 0) {
303 reject = true;
304 ret = CMDSN_ERROR_CANNOT_RECOVER;
305 break;
306 }
307 ret = CMDSN_HIGHER_THAN_EXP;
308 break;
309 case CMDSN_LOWER_THAN_EXP:
310 case CMDSN_MAXCMDSN_OVERRUN:
311 default:
312 cmd->i_state = ISTATE_REMOVE;
313 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
314
315
316
317
318
319 ret = CMDSN_LOWER_THAN_EXP;
320 break;
321 }
322 mutex_unlock(&conn->sess->cmdsn_mutex);
323
324 if (reject)
325 iscsit_reject_cmd(cmd, reason, buf);
326
327 return ret;
328}
329EXPORT_SYMBOL(iscsit_sequence_cmd);
330
331int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
332{
333 struct iscsi_conn *conn = cmd->conn;
334 struct se_cmd *se_cmd = &cmd->se_cmd;
335 struct iscsi_data *hdr = (struct iscsi_data *) buf;
336 u32 payload_length = ntoh24(hdr->dlength);
337
338 if (conn->sess->sess_ops->InitialR2T) {
339 pr_err("Received unexpected unsolicited data"
340 " while InitialR2T=Yes, protocol error.\n");
341 transport_send_check_condition_and_sense(se_cmd,
342 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
343 return -1;
344 }
345
346 if ((cmd->first_burst_len + payload_length) >
347 conn->sess->sess_ops->FirstBurstLength) {
348 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
349 " for this Unsolicited DataOut Burst.\n",
350 (cmd->first_burst_len + payload_length),
351 conn->sess->sess_ops->FirstBurstLength);
352 transport_send_check_condition_and_sense(se_cmd,
353 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
354 return -1;
355 }
356
357 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
358 return 0;
359
360 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
361 ((cmd->first_burst_len + payload_length) !=
362 conn->sess->sess_ops->FirstBurstLength)) {
363 pr_err("Unsolicited non-immediate data received %u"
364 " does not equal FirstBurstLength: %u, and does"
365 " not equal ExpXferLen %u.\n",
366 (cmd->first_burst_len + payload_length),
367 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
368 transport_send_check_condition_and_sense(se_cmd,
369 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
370 return -1;
371 }
372 return 0;
373}
374
375struct iscsi_cmd *iscsit_find_cmd_from_itt(
376 struct iscsi_conn *conn,
377 itt_t init_task_tag)
378{
379 struct iscsi_cmd *cmd;
380
381 spin_lock_bh(&conn->cmd_lock);
382 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
383 if (cmd->init_task_tag == init_task_tag) {
384 spin_unlock_bh(&conn->cmd_lock);
385 return cmd;
386 }
387 }
388 spin_unlock_bh(&conn->cmd_lock);
389
390 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
391 init_task_tag, conn->cid);
392 return NULL;
393}
394EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
395
396struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
397 struct iscsi_conn *conn,
398 itt_t init_task_tag,
399 u32 length)
400{
401 struct iscsi_cmd *cmd;
402
403 spin_lock_bh(&conn->cmd_lock);
404 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
405 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
406 continue;
407 if (cmd->init_task_tag == init_task_tag) {
408 spin_unlock_bh(&conn->cmd_lock);
409 return cmd;
410 }
411 }
412 spin_unlock_bh(&conn->cmd_lock);
413
414 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
415 " dumping payload\n", init_task_tag, conn->cid);
416 if (length)
417 iscsit_dump_data_payload(conn, length, 1);
418
419 return NULL;
420}
421EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
422
423struct iscsi_cmd *iscsit_find_cmd_from_ttt(
424 struct iscsi_conn *conn,
425 u32 targ_xfer_tag)
426{
427 struct iscsi_cmd *cmd = NULL;
428
429 spin_lock_bh(&conn->cmd_lock);
430 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
431 if (cmd->targ_xfer_tag == targ_xfer_tag) {
432 spin_unlock_bh(&conn->cmd_lock);
433 return cmd;
434 }
435 }
436 spin_unlock_bh(&conn->cmd_lock);
437
438 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
439 targ_xfer_tag, conn->cid);
440 return NULL;
441}
442
443int iscsit_find_cmd_for_recovery(
444 struct iscsi_session *sess,
445 struct iscsi_cmd **cmd_ptr,
446 struct iscsi_conn_recovery **cr_ptr,
447 itt_t init_task_tag)
448{
449 struct iscsi_cmd *cmd = NULL;
450 struct iscsi_conn_recovery *cr;
451
452
453
454
455 spin_lock(&sess->cr_i_lock);
456 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
457 spin_lock(&cr->conn_recovery_cmd_lock);
458 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
459 if (cmd->init_task_tag == init_task_tag) {
460 spin_unlock(&cr->conn_recovery_cmd_lock);
461 spin_unlock(&sess->cr_i_lock);
462
463 *cr_ptr = cr;
464 *cmd_ptr = cmd;
465 return -2;
466 }
467 }
468 spin_unlock(&cr->conn_recovery_cmd_lock);
469 }
470 spin_unlock(&sess->cr_i_lock);
471
472
473
474
475 spin_lock(&sess->cr_a_lock);
476 list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
477 spin_lock(&cr->conn_recovery_cmd_lock);
478 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
479 if (cmd->init_task_tag == init_task_tag) {
480 spin_unlock(&cr->conn_recovery_cmd_lock);
481 spin_unlock(&sess->cr_a_lock);
482
483 *cr_ptr = cr;
484 *cmd_ptr = cmd;
485 return 0;
486 }
487 }
488 spin_unlock(&cr->conn_recovery_cmd_lock);
489 }
490 spin_unlock(&sess->cr_a_lock);
491
492 return -1;
493}
494
495void iscsit_add_cmd_to_immediate_queue(
496 struct iscsi_cmd *cmd,
497 struct iscsi_conn *conn,
498 u8 state)
499{
500 struct iscsi_queue_req *qr;
501
502 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
503 if (!qr) {
504 pr_err("Unable to allocate memory for"
505 " struct iscsi_queue_req\n");
506 return;
507 }
508 INIT_LIST_HEAD(&qr->qr_list);
509 qr->cmd = cmd;
510 qr->state = state;
511
512 spin_lock_bh(&conn->immed_queue_lock);
513 list_add_tail(&qr->qr_list, &conn->immed_queue_list);
514 atomic_inc(&cmd->immed_queue_count);
515 atomic_set(&conn->check_immediate_queue, 1);
516 spin_unlock_bh(&conn->immed_queue_lock);
517
518 wake_up(&conn->queues_wq);
519}
520EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
521
522struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
523{
524 struct iscsi_queue_req *qr;
525
526 spin_lock_bh(&conn->immed_queue_lock);
527 if (list_empty(&conn->immed_queue_list)) {
528 spin_unlock_bh(&conn->immed_queue_lock);
529 return NULL;
530 }
531 qr = list_first_entry(&conn->immed_queue_list,
532 struct iscsi_queue_req, qr_list);
533
534 list_del(&qr->qr_list);
535 if (qr->cmd)
536 atomic_dec(&qr->cmd->immed_queue_count);
537 spin_unlock_bh(&conn->immed_queue_lock);
538
539 return qr;
540}
541
542static void iscsit_remove_cmd_from_immediate_queue(
543 struct iscsi_cmd *cmd,
544 struct iscsi_conn *conn)
545{
546 struct iscsi_queue_req *qr, *qr_tmp;
547
548 spin_lock_bh(&conn->immed_queue_lock);
549 if (!atomic_read(&cmd->immed_queue_count)) {
550 spin_unlock_bh(&conn->immed_queue_lock);
551 return;
552 }
553
554 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
555 if (qr->cmd != cmd)
556 continue;
557
558 atomic_dec(&qr->cmd->immed_queue_count);
559 list_del(&qr->qr_list);
560 kmem_cache_free(lio_qr_cache, qr);
561 }
562 spin_unlock_bh(&conn->immed_queue_lock);
563
564 if (atomic_read(&cmd->immed_queue_count)) {
565 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
566 cmd->init_task_tag,
567 atomic_read(&cmd->immed_queue_count));
568 }
569}
570
571int iscsit_add_cmd_to_response_queue(
572 struct iscsi_cmd *cmd,
573 struct iscsi_conn *conn,
574 u8 state)
575{
576 struct iscsi_queue_req *qr;
577
578 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
579 if (!qr) {
580 pr_err("Unable to allocate memory for"
581 " struct iscsi_queue_req\n");
582 return -ENOMEM;
583 }
584 INIT_LIST_HEAD(&qr->qr_list);
585 qr->cmd = cmd;
586 qr->state = state;
587
588 spin_lock_bh(&conn->response_queue_lock);
589 list_add_tail(&qr->qr_list, &conn->response_queue_list);
590 atomic_inc(&cmd->response_queue_count);
591 spin_unlock_bh(&conn->response_queue_lock);
592
593 wake_up(&conn->queues_wq);
594 return 0;
595}
596
597struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
598{
599 struct iscsi_queue_req *qr;
600
601 spin_lock_bh(&conn->response_queue_lock);
602 if (list_empty(&conn->response_queue_list)) {
603 spin_unlock_bh(&conn->response_queue_lock);
604 return NULL;
605 }
606
607 qr = list_first_entry(&conn->response_queue_list,
608 struct iscsi_queue_req, qr_list);
609
610 list_del(&qr->qr_list);
611 if (qr->cmd)
612 atomic_dec(&qr->cmd->response_queue_count);
613 spin_unlock_bh(&conn->response_queue_lock);
614
615 return qr;
616}
617
618static void iscsit_remove_cmd_from_response_queue(
619 struct iscsi_cmd *cmd,
620 struct iscsi_conn *conn)
621{
622 struct iscsi_queue_req *qr, *qr_tmp;
623
624 spin_lock_bh(&conn->response_queue_lock);
625 if (!atomic_read(&cmd->response_queue_count)) {
626 spin_unlock_bh(&conn->response_queue_lock);
627 return;
628 }
629
630 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
631 qr_list) {
632 if (qr->cmd != cmd)
633 continue;
634
635 atomic_dec(&qr->cmd->response_queue_count);
636 list_del(&qr->qr_list);
637 kmem_cache_free(lio_qr_cache, qr);
638 }
639 spin_unlock_bh(&conn->response_queue_lock);
640
641 if (atomic_read(&cmd->response_queue_count)) {
642 pr_err("ITT: 0x%08x response_queue_count: %d\n",
643 cmd->init_task_tag,
644 atomic_read(&cmd->response_queue_count));
645 }
646}
647
648bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
649{
650 bool empty;
651
652 spin_lock_bh(&conn->immed_queue_lock);
653 empty = list_empty(&conn->immed_queue_list);
654 spin_unlock_bh(&conn->immed_queue_lock);
655
656 if (!empty)
657 return empty;
658
659 spin_lock_bh(&conn->response_queue_lock);
660 empty = list_empty(&conn->response_queue_list);
661 spin_unlock_bh(&conn->response_queue_lock);
662
663 return empty;
664}
665
666void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
667{
668 struct iscsi_queue_req *qr, *qr_tmp;
669
670 spin_lock_bh(&conn->immed_queue_lock);
671 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
672 list_del(&qr->qr_list);
673 if (qr->cmd)
674 atomic_dec(&qr->cmd->immed_queue_count);
675
676 kmem_cache_free(lio_qr_cache, qr);
677 }
678 spin_unlock_bh(&conn->immed_queue_lock);
679
680 spin_lock_bh(&conn->response_queue_lock);
681 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
682 qr_list) {
683 list_del(&qr->qr_list);
684 if (qr->cmd)
685 atomic_dec(&qr->cmd->response_queue_count);
686
687 kmem_cache_free(lio_qr_cache, qr);
688 }
689 spin_unlock_bh(&conn->response_queue_lock);
690}
691
692void iscsit_release_cmd(struct iscsi_cmd *cmd)
693{
694 struct iscsi_session *sess;
695 struct se_cmd *se_cmd = &cmd->se_cmd;
696
697 if (cmd->conn)
698 sess = cmd->conn->sess;
699 else
700 sess = cmd->sess;
701
702 BUG_ON(!sess || !sess->se_sess);
703
704 kfree(cmd->buf_ptr);
705 kfree(cmd->pdu_list);
706 kfree(cmd->seq_list);
707 kfree(cmd->tmr_req);
708 kfree(cmd->iov_data);
709 kfree(cmd->text_in_ptr);
710
711 percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
712}
713EXPORT_SYMBOL(iscsit_release_cmd);
714
715void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
716{
717 struct iscsi_conn *conn = cmd->conn;
718
719 if (cmd->data_direction == DMA_TO_DEVICE) {
720 iscsit_stop_dataout_timer(cmd);
721 iscsit_free_r2ts_from_list(cmd);
722 }
723 if (cmd->data_direction == DMA_FROM_DEVICE)
724 iscsit_free_all_datain_reqs(cmd);
725
726 if (conn && check_queues) {
727 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
728 iscsit_remove_cmd_from_response_queue(cmd, conn);
729 }
730
731 if (conn && conn->conn_transport->iscsit_release_cmd)
732 conn->conn_transport->iscsit_release_cmd(conn, cmd);
733}
734
735void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736{
737 struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
738 int rc;
739
740 __iscsit_free_cmd(cmd, shutdown);
741 if (se_cmd) {
742 rc = transport_generic_free_cmd(se_cmd, shutdown);
743 if (!rc && shutdown && se_cmd->se_sess) {
744 __iscsit_free_cmd(cmd, shutdown);
745 target_put_sess_cmd(se_cmd);
746 }
747 } else {
748 iscsit_release_cmd(cmd);
749 }
750}
751EXPORT_SYMBOL(iscsit_free_cmd);
752
753int iscsit_check_session_usage_count(struct iscsi_session *sess)
754{
755 spin_lock_bh(&sess->session_usage_lock);
756 if (sess->session_usage_count != 0) {
757 sess->session_waiting_on_uc = 1;
758 spin_unlock_bh(&sess->session_usage_lock);
759 if (in_interrupt())
760 return 2;
761
762 wait_for_completion(&sess->session_waiting_on_uc_comp);
763 return 1;
764 }
765 spin_unlock_bh(&sess->session_usage_lock);
766
767 return 0;
768}
769
770void iscsit_dec_session_usage_count(struct iscsi_session *sess)
771{
772 spin_lock_bh(&sess->session_usage_lock);
773 sess->session_usage_count--;
774
775 if (!sess->session_usage_count && sess->session_waiting_on_uc)
776 complete(&sess->session_waiting_on_uc_comp);
777
778 spin_unlock_bh(&sess->session_usage_lock);
779}
780
781void iscsit_inc_session_usage_count(struct iscsi_session *sess)
782{
783 spin_lock_bh(&sess->session_usage_lock);
784 sess->session_usage_count++;
785 spin_unlock_bh(&sess->session_usage_lock);
786}
787
788struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
789{
790 struct iscsi_conn *conn;
791
792 spin_lock_bh(&sess->conn_lock);
793 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
794 if ((conn->cid == cid) &&
795 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
796 iscsit_inc_conn_usage_count(conn);
797 spin_unlock_bh(&sess->conn_lock);
798 return conn;
799 }
800 }
801 spin_unlock_bh(&sess->conn_lock);
802
803 return NULL;
804}
805
806struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
807{
808 struct iscsi_conn *conn;
809
810 spin_lock_bh(&sess->conn_lock);
811 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
812 if (conn->cid == cid) {
813 iscsit_inc_conn_usage_count(conn);
814 spin_lock(&conn->state_lock);
815 atomic_set(&conn->connection_wait_rcfr, 1);
816 spin_unlock(&conn->state_lock);
817 spin_unlock_bh(&sess->conn_lock);
818 return conn;
819 }
820 }
821 spin_unlock_bh(&sess->conn_lock);
822
823 return NULL;
824}
825
826void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
827{
828 spin_lock_bh(&conn->conn_usage_lock);
829 if (conn->conn_usage_count != 0) {
830 conn->conn_waiting_on_uc = 1;
831 spin_unlock_bh(&conn->conn_usage_lock);
832
833 wait_for_completion(&conn->conn_waiting_on_uc_comp);
834 return;
835 }
836 spin_unlock_bh(&conn->conn_usage_lock);
837}
838
839void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
840{
841 spin_lock_bh(&conn->conn_usage_lock);
842 conn->conn_usage_count--;
843
844 if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
845 complete(&conn->conn_waiting_on_uc_comp);
846
847 spin_unlock_bh(&conn->conn_usage_lock);
848}
849
850void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
851{
852 spin_lock_bh(&conn->conn_usage_lock);
853 conn->conn_usage_count++;
854 spin_unlock_bh(&conn->conn_usage_lock);
855}
856
857static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
858{
859 u8 state;
860 struct iscsi_cmd *cmd;
861
862 cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
863 if (!cmd)
864 return -1;
865
866 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
867 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
868 ISTATE_SEND_NOPIN_NO_RESPONSE;
869 cmd->init_task_tag = RESERVED_ITT;
870 cmd->targ_xfer_tag = (want_response) ?
871 session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
872 spin_lock_bh(&conn->cmd_lock);
873 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
874 spin_unlock_bh(&conn->cmd_lock);
875
876 if (want_response)
877 iscsit_start_nopin_response_timer(conn);
878 iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
879
880 return 0;
881}
882
883static void iscsit_handle_nopin_response_timeout(unsigned long data)
884{
885 struct iscsi_conn *conn = (struct iscsi_conn *) data;
886
887 iscsit_inc_conn_usage_count(conn);
888
889 spin_lock_bh(&conn->nopin_timer_lock);
890 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
891 spin_unlock_bh(&conn->nopin_timer_lock);
892 iscsit_dec_conn_usage_count(conn);
893 return;
894 }
895
896 pr_debug("Did not receive response to NOPIN on CID: %hu on"
897 " SID: %u, failing connection.\n", conn->cid,
898 conn->sess->sid);
899 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
900 spin_unlock_bh(&conn->nopin_timer_lock);
901
902 {
903 struct iscsi_portal_group *tpg = conn->sess->tpg;
904 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
905
906 if (tiqn) {
907 spin_lock_bh(&tiqn->sess_err_stats.lock);
908 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
909 conn->sess->sess_ops->InitiatorName);
910 tiqn->sess_err_stats.last_sess_failure_type =
911 ISCSI_SESS_ERR_CXN_TIMEOUT;
912 tiqn->sess_err_stats.cxn_timeout_errors++;
913 atomic_long_inc(&conn->sess->conn_timeout_errors);
914 spin_unlock_bh(&tiqn->sess_err_stats.lock);
915 }
916 }
917
918 iscsit_cause_connection_reinstatement(conn, 0);
919 iscsit_dec_conn_usage_count(conn);
920}
921
922void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
923{
924 struct iscsi_session *sess = conn->sess;
925 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
926
927 spin_lock_bh(&conn->nopin_timer_lock);
928 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
929 spin_unlock_bh(&conn->nopin_timer_lock);
930 return;
931 }
932
933 mod_timer(&conn->nopin_response_timer,
934 (get_jiffies_64() + na->nopin_response_timeout * HZ));
935 spin_unlock_bh(&conn->nopin_timer_lock);
936}
937
938
939
940
941void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
942{
943 struct iscsi_session *sess = conn->sess;
944 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
945
946 spin_lock_bh(&conn->nopin_timer_lock);
947 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
948 spin_unlock_bh(&conn->nopin_timer_lock);
949 return;
950 }
951
952 init_timer(&conn->nopin_response_timer);
953 conn->nopin_response_timer.expires =
954 (get_jiffies_64() + na->nopin_response_timeout * HZ);
955 conn->nopin_response_timer.data = (unsigned long)conn;
956 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
957 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
958 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
959 add_timer(&conn->nopin_response_timer);
960
961 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
962 " seconds\n", conn->cid, na->nopin_response_timeout);
963 spin_unlock_bh(&conn->nopin_timer_lock);
964}
965
966void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
967{
968 spin_lock_bh(&conn->nopin_timer_lock);
969 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
970 spin_unlock_bh(&conn->nopin_timer_lock);
971 return;
972 }
973 conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
974 spin_unlock_bh(&conn->nopin_timer_lock);
975
976 del_timer_sync(&conn->nopin_response_timer);
977
978 spin_lock_bh(&conn->nopin_timer_lock);
979 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
980 spin_unlock_bh(&conn->nopin_timer_lock);
981}
982
983static void iscsit_handle_nopin_timeout(unsigned long data)
984{
985 struct iscsi_conn *conn = (struct iscsi_conn *) data;
986
987 iscsit_inc_conn_usage_count(conn);
988
989 spin_lock_bh(&conn->nopin_timer_lock);
990 if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
991 spin_unlock_bh(&conn->nopin_timer_lock);
992 iscsit_dec_conn_usage_count(conn);
993 return;
994 }
995 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
996 spin_unlock_bh(&conn->nopin_timer_lock);
997
998 iscsit_add_nopin(conn, 1);
999 iscsit_dec_conn_usage_count(conn);
1000}
1001
1002
1003
1004
1005void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1006{
1007 struct iscsi_session *sess = conn->sess;
1008 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1009
1010
1011
1012 if (!na->nopin_timeout)
1013 return;
1014
1015 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1016 return;
1017
1018 init_timer(&conn->nopin_timer);
1019 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1020 conn->nopin_timer.data = (unsigned long)conn;
1021 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1022 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1023 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1024 add_timer(&conn->nopin_timer);
1025
1026 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1027 " interval\n", conn->cid, na->nopin_timeout);
1028}
1029
1030void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1031{
1032 struct iscsi_session *sess = conn->sess;
1033 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1034
1035
1036
1037 if (!na->nopin_timeout)
1038 return;
1039
1040 spin_lock_bh(&conn->nopin_timer_lock);
1041 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1042 spin_unlock_bh(&conn->nopin_timer_lock);
1043 return;
1044 }
1045
1046 init_timer(&conn->nopin_timer);
1047 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1048 conn->nopin_timer.data = (unsigned long)conn;
1049 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1050 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1051 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1052 add_timer(&conn->nopin_timer);
1053
1054 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1055 " interval\n", conn->cid, na->nopin_timeout);
1056 spin_unlock_bh(&conn->nopin_timer_lock);
1057}
1058
1059void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1060{
1061 spin_lock_bh(&conn->nopin_timer_lock);
1062 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1063 spin_unlock_bh(&conn->nopin_timer_lock);
1064 return;
1065 }
1066 conn->nopin_timer_flags |= ISCSI_TF_STOP;
1067 spin_unlock_bh(&conn->nopin_timer_lock);
1068
1069 del_timer_sync(&conn->nopin_timer);
1070
1071 spin_lock_bh(&conn->nopin_timer_lock);
1072 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1073 spin_unlock_bh(&conn->nopin_timer_lock);
1074}
1075
1076int iscsit_send_tx_data(
1077 struct iscsi_cmd *cmd,
1078 struct iscsi_conn *conn,
1079 int use_misc)
1080{
1081 int tx_sent, tx_size;
1082 u32 iov_count;
1083 struct kvec *iov;
1084
1085send_data:
1086 tx_size = cmd->tx_size;
1087
1088 if (!use_misc) {
1089 iov = &cmd->iov_data[0];
1090 iov_count = cmd->iov_data_count;
1091 } else {
1092 iov = &cmd->iov_misc[0];
1093 iov_count = cmd->iov_misc_count;
1094 }
1095
1096 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1097 if (tx_size != tx_sent) {
1098 if (tx_sent == -EAGAIN) {
1099 pr_err("tx_data() returned -EAGAIN\n");
1100 goto send_data;
1101 } else
1102 return -1;
1103 }
1104 cmd->tx_size = 0;
1105
1106 return 0;
1107}
1108
1109int iscsit_fe_sendpage_sg(
1110 struct iscsi_cmd *cmd,
1111 struct iscsi_conn *conn)
1112{
1113 struct scatterlist *sg = cmd->first_data_sg;
1114 struct kvec iov;
1115 u32 tx_hdr_size, data_len;
1116 u32 offset = cmd->first_data_sg_off;
1117 int tx_sent, iov_off;
1118
1119send_hdr:
1120 tx_hdr_size = ISCSI_HDR_LEN;
1121 if (conn->conn_ops->HeaderDigest)
1122 tx_hdr_size += ISCSI_CRC_LEN;
1123
1124 iov.iov_base = cmd->pdu;
1125 iov.iov_len = tx_hdr_size;
1126
1127 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1128 if (tx_hdr_size != tx_sent) {
1129 if (tx_sent == -EAGAIN) {
1130 pr_err("tx_data() returned -EAGAIN\n");
1131 goto send_hdr;
1132 }
1133 return -1;
1134 }
1135
1136 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1137
1138
1139
1140
1141 if (conn->conn_ops->DataDigest) {
1142 data_len -= ISCSI_CRC_LEN;
1143 if (cmd->padding)
1144 iov_off = (cmd->iov_data_count - 2);
1145 else
1146 iov_off = (cmd->iov_data_count - 1);
1147 } else {
1148 iov_off = (cmd->iov_data_count - 1);
1149 }
1150
1151
1152
1153 while (data_len) {
1154 u32 space = (sg->length - offset);
1155 u32 sub_len = min_t(u32, data_len, space);
1156send_pg:
1157 tx_sent = conn->sock->ops->sendpage(conn->sock,
1158 sg_page(sg), sg->offset + offset, sub_len, 0);
1159 if (tx_sent != sub_len) {
1160 if (tx_sent == -EAGAIN) {
1161 pr_err("tcp_sendpage() returned"
1162 " -EAGAIN\n");
1163 goto send_pg;
1164 }
1165
1166 pr_err("tcp_sendpage() failure: %d\n",
1167 tx_sent);
1168 return -1;
1169 }
1170
1171 data_len -= sub_len;
1172 offset = 0;
1173 sg = sg_next(sg);
1174 }
1175
1176send_padding:
1177 if (cmd->padding) {
1178 struct kvec *iov_p = &cmd->iov_data[iov_off++];
1179
1180 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1181 if (cmd->padding != tx_sent) {
1182 if (tx_sent == -EAGAIN) {
1183 pr_err("tx_data() returned -EAGAIN\n");
1184 goto send_padding;
1185 }
1186 return -1;
1187 }
1188 }
1189
1190send_datacrc:
1191 if (conn->conn_ops->DataDigest) {
1192 struct kvec *iov_d = &cmd->iov_data[iov_off];
1193
1194 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1195 if (ISCSI_CRC_LEN != tx_sent) {
1196 if (tx_sent == -EAGAIN) {
1197 pr_err("tx_data() returned -EAGAIN\n");
1198 goto send_datacrc;
1199 }
1200 return -1;
1201 }
1202 }
1203
1204 return 0;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1216{
1217 struct iscsi_login_rsp *hdr;
1218 struct iscsi_login *login = conn->conn_login;
1219
1220 login->login_failed = 1;
1221 iscsit_collect_login_stats(conn, status_class, status_detail);
1222
1223 memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1224
1225 hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1226 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1227 hdr->status_class = status_class;
1228 hdr->status_detail = status_detail;
1229 hdr->itt = conn->login_itt;
1230
1231 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
1232}
1233
1234void iscsit_print_session_params(struct iscsi_session *sess)
1235{
1236 struct iscsi_conn *conn;
1237
1238 pr_debug("-----------------------------[Session Params for"
1239 " SID: %u]-----------------------------\n", sess->sid);
1240 spin_lock_bh(&sess->conn_lock);
1241 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1242 iscsi_dump_conn_ops(conn->conn_ops);
1243 spin_unlock_bh(&sess->conn_lock);
1244
1245 iscsi_dump_sess_ops(sess->sess_ops);
1246}
1247
1248static int iscsit_do_rx_data(
1249 struct iscsi_conn *conn,
1250 struct iscsi_data_count *count)
1251{
1252 int data = count->data_length, rx_loop = 0, total_rx = 0;
1253 struct msghdr msg;
1254
1255 if (!conn || !conn->sock || !conn->conn_ops)
1256 return -1;
1257
1258 memset(&msg, 0, sizeof(struct msghdr));
1259 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
1260 count->iov, count->iov_count, data);
1261
1262 while (msg_data_left(&msg)) {
1263 rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
1264 if (rx_loop <= 0) {
1265 pr_debug("rx_loop: %d total_rx: %d\n",
1266 rx_loop, total_rx);
1267 return rx_loop;
1268 }
1269 total_rx += rx_loop;
1270 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1271 rx_loop, total_rx, data);
1272 }
1273
1274 return total_rx;
1275}
1276
1277int rx_data(
1278 struct iscsi_conn *conn,
1279 struct kvec *iov,
1280 int iov_count,
1281 int data)
1282{
1283 struct iscsi_data_count c;
1284
1285 if (!conn || !conn->sock || !conn->conn_ops)
1286 return -1;
1287
1288 memset(&c, 0, sizeof(struct iscsi_data_count));
1289 c.iov = iov;
1290 c.iov_count = iov_count;
1291 c.data_length = data;
1292 c.type = ISCSI_RX_DATA;
1293
1294 return iscsit_do_rx_data(conn, &c);
1295}
1296
1297int tx_data(
1298 struct iscsi_conn *conn,
1299 struct kvec *iov,
1300 int iov_count,
1301 int data)
1302{
1303 struct msghdr msg;
1304 int total_tx = 0;
1305
1306 if (!conn || !conn->sock || !conn->conn_ops)
1307 return -1;
1308
1309 if (data <= 0) {
1310 pr_err("Data length is: %d\n", data);
1311 return -1;
1312 }
1313
1314 memset(&msg, 0, sizeof(struct msghdr));
1315
1316 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
1317 iov, iov_count, data);
1318
1319 while (msg_data_left(&msg)) {
1320 int tx_loop = sock_sendmsg(conn->sock, &msg);
1321 if (tx_loop <= 0) {
1322 pr_debug("tx_loop: %d total_tx %d\n",
1323 tx_loop, total_tx);
1324 return tx_loop;
1325 }
1326 total_tx += tx_loop;
1327 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1328 tx_loop, total_tx, data);
1329 }
1330
1331 return total_tx;
1332}
1333
1334void iscsit_collect_login_stats(
1335 struct iscsi_conn *conn,
1336 u8 status_class,
1337 u8 status_detail)
1338{
1339 struct iscsi_param *intrname = NULL;
1340 struct iscsi_tiqn *tiqn;
1341 struct iscsi_login_stats *ls;
1342
1343 tiqn = iscsit_snmp_get_tiqn(conn);
1344 if (!tiqn)
1345 return;
1346
1347 ls = &tiqn->login_stats;
1348
1349 spin_lock(&ls->lock);
1350 if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1351 ls->accepts++;
1352 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1353 ls->redirects++;
1354 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1355 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1356 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1357 ls->authenticate_fails++;
1358 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
1359 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1360 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1361 ls->authorize_fails++;
1362 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1363 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1364 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1365 ls->negotiate_fails++;
1366 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1367 } else {
1368 ls->other_fails++;
1369 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1370 }
1371
1372
1373 if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1374 if (conn->param_list)
1375 intrname = iscsi_find_param_from_key(INITIATORNAME,
1376 conn->param_list);
1377 strlcpy(ls->last_intr_fail_name,
1378 (intrname ? intrname->value : "Unknown"),
1379 sizeof(ls->last_intr_fail_name));
1380
1381 ls->last_intr_fail_ip_family = conn->login_family;
1382
1383 ls->last_intr_fail_sockaddr = conn->login_sockaddr;
1384 ls->last_fail_time = get_jiffies_64();
1385 }
1386
1387 spin_unlock(&ls->lock);
1388}
1389
1390struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1391{
1392 struct iscsi_portal_group *tpg;
1393
1394 if (!conn)
1395 return NULL;
1396
1397 tpg = conn->tpg;
1398 if (!tpg)
1399 return NULL;
1400
1401 if (!tpg->tpg_tiqn)
1402 return NULL;
1403
1404 return tpg->tpg_tiqn;
1405}
1406