1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
35#include <linux/module.h>
36#include <linux/ratelimit.h>
37#include <linux/vmalloc.h>
38#include <asm/unaligned.h>
39#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi_proto.h>
42#include <scsi/scsi_common.h>
43
44#include <target/target_core_base.h>
45#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
47
48#include "target_core_internal.h"
49#include "target_core_alua.h"
50#include "target_core_pr.h"
51#include "target_core_ua.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
56static struct workqueue_struct *target_completion_wq;
57static struct kmem_cache *se_sess_cache;
58struct kmem_cache *se_ua_cache;
59struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
63struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
68static void transport_handle_queue_full(struct se_cmd *cmd,
69 struct se_device *dev, int err, bool write_pending);
70static int transport_put_cmd(struct se_cmd *cmd);
71static void target_complete_ok_work(struct work_struct *work);
72
73int init_se_kmem_caches(void)
74{
75 se_sess_cache = kmem_cache_create("se_sess_cache",
76 sizeof(struct se_session), __alignof__(struct se_session),
77 0, NULL);
78 if (!se_sess_cache) {
79 pr_err("kmem_cache_create() for struct se_session"
80 " failed\n");
81 goto out;
82 }
83 se_ua_cache = kmem_cache_create("se_ua_cache",
84 sizeof(struct se_ua), __alignof__(struct se_ua),
85 0, NULL);
86 if (!se_ua_cache) {
87 pr_err("kmem_cache_create() for struct se_ua failed\n");
88 goto out_free_sess_cache;
89 }
90 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
91 sizeof(struct t10_pr_registration),
92 __alignof__(struct t10_pr_registration), 0, NULL);
93 if (!t10_pr_reg_cache) {
94 pr_err("kmem_cache_create() for struct t10_pr_registration"
95 " failed\n");
96 goto out_free_ua_cache;
97 }
98 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
99 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
100 0, NULL);
101 if (!t10_alua_lu_gp_cache) {
102 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
103 " failed\n");
104 goto out_free_pr_reg_cache;
105 }
106 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
107 sizeof(struct t10_alua_lu_gp_member),
108 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
109 if (!t10_alua_lu_gp_mem_cache) {
110 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
111 "cache failed\n");
112 goto out_free_lu_gp_cache;
113 }
114 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
115 sizeof(struct t10_alua_tg_pt_gp),
116 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
117 if (!t10_alua_tg_pt_gp_cache) {
118 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
119 "cache failed\n");
120 goto out_free_lu_gp_mem_cache;
121 }
122 t10_alua_lba_map_cache = kmem_cache_create(
123 "t10_alua_lba_map_cache",
124 sizeof(struct t10_alua_lba_map),
125 __alignof__(struct t10_alua_lba_map), 0, NULL);
126 if (!t10_alua_lba_map_cache) {
127 pr_err("kmem_cache_create() for t10_alua_lba_map_"
128 "cache failed\n");
129 goto out_free_tg_pt_gp_cache;
130 }
131 t10_alua_lba_map_mem_cache = kmem_cache_create(
132 "t10_alua_lba_map_mem_cache",
133 sizeof(struct t10_alua_lba_map_member),
134 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
135 if (!t10_alua_lba_map_mem_cache) {
136 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
137 "cache failed\n");
138 goto out_free_lba_map_cache;
139 }
140
141 target_completion_wq = alloc_workqueue("target_completion",
142 WQ_MEM_RECLAIM, 0);
143 if (!target_completion_wq)
144 goto out_free_lba_map_mem_cache;
145
146 return 0;
147
148out_free_lba_map_mem_cache:
149 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
150out_free_lba_map_cache:
151 kmem_cache_destroy(t10_alua_lba_map_cache);
152out_free_tg_pt_gp_cache:
153 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
154out_free_lu_gp_mem_cache:
155 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
156out_free_lu_gp_cache:
157 kmem_cache_destroy(t10_alua_lu_gp_cache);
158out_free_pr_reg_cache:
159 kmem_cache_destroy(t10_pr_reg_cache);
160out_free_ua_cache:
161 kmem_cache_destroy(se_ua_cache);
162out_free_sess_cache:
163 kmem_cache_destroy(se_sess_cache);
164out:
165 return -ENOMEM;
166}
167
168void release_se_kmem_caches(void)
169{
170 destroy_workqueue(target_completion_wq);
171 kmem_cache_destroy(se_sess_cache);
172 kmem_cache_destroy(se_ua_cache);
173 kmem_cache_destroy(t10_pr_reg_cache);
174 kmem_cache_destroy(t10_alua_lu_gp_cache);
175 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
176 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
177 kmem_cache_destroy(t10_alua_lba_map_cache);
178 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
179}
180
181
182static DEFINE_SPINLOCK(scsi_mib_index_lock);
183static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
184
185
186
187
188u32 scsi_get_new_index(scsi_index_t type)
189{
190 u32 new_index;
191
192 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
193
194 spin_lock(&scsi_mib_index_lock);
195 new_index = ++scsi_mib_index[type];
196 spin_unlock(&scsi_mib_index_lock);
197
198 return new_index;
199}
200
201void transport_subsystem_check_init(void)
202{
203 int ret;
204 static int sub_api_initialized;
205
206 if (sub_api_initialized)
207 return;
208
209 ret = request_module("target_core_iblock");
210 if (ret != 0)
211 pr_err("Unable to load target_core_iblock\n");
212
213 ret = request_module("target_core_file");
214 if (ret != 0)
215 pr_err("Unable to load target_core_file\n");
216
217 ret = request_module("target_core_pscsi");
218 if (ret != 0)
219 pr_err("Unable to load target_core_pscsi\n");
220
221 ret = request_module("target_core_user");
222 if (ret != 0)
223 pr_err("Unable to load target_core_user\n");
224
225 sub_api_initialized = 1;
226}
227
228struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
229{
230 struct se_session *se_sess;
231
232 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
233 if (!se_sess) {
234 pr_err("Unable to allocate struct se_session from"
235 " se_sess_cache\n");
236 return ERR_PTR(-ENOMEM);
237 }
238 INIT_LIST_HEAD(&se_sess->sess_list);
239 INIT_LIST_HEAD(&se_sess->sess_acl_list);
240 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
241 INIT_LIST_HEAD(&se_sess->sess_wait_list);
242 spin_lock_init(&se_sess->sess_cmd_lock);
243 se_sess->sup_prot_ops = sup_prot_ops;
244
245 return se_sess;
246}
247EXPORT_SYMBOL(transport_init_session);
248
249int transport_alloc_session_tags(struct se_session *se_sess,
250 unsigned int tag_num, unsigned int tag_size)
251{
252 int rc;
253
254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
255 GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
256 if (!se_sess->sess_cmd_map) {
257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
258 if (!se_sess->sess_cmd_map) {
259 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
260 return -ENOMEM;
261 }
262 }
263
264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
265 if (rc < 0) {
266 pr_err("Unable to init se_sess->sess_tag_pool,"
267 " tag_num: %u\n", tag_num);
268 kvfree(se_sess->sess_cmd_map);
269 se_sess->sess_cmd_map = NULL;
270 return -ENOMEM;
271 }
272
273 return 0;
274}
275EXPORT_SYMBOL(transport_alloc_session_tags);
276
277struct se_session *transport_init_session_tags(unsigned int tag_num,
278 unsigned int tag_size,
279 enum target_prot_op sup_prot_ops)
280{
281 struct se_session *se_sess;
282 int rc;
283
284 if (tag_num != 0 && !tag_size) {
285 pr_err("init_session_tags called with percpu-ida tag_num:"
286 " %u, but zero tag_size\n", tag_num);
287 return ERR_PTR(-EINVAL);
288 }
289 if (!tag_num && tag_size) {
290 pr_err("init_session_tags called with percpu-ida tag_size:"
291 " %u, but zero tag_num\n", tag_size);
292 return ERR_PTR(-EINVAL);
293 }
294
295 se_sess = transport_init_session(sup_prot_ops);
296 if (IS_ERR(se_sess))
297 return se_sess;
298
299 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
300 if (rc < 0) {
301 transport_free_session(se_sess);
302 return ERR_PTR(-ENOMEM);
303 }
304
305 return se_sess;
306}
307EXPORT_SYMBOL(transport_init_session_tags);
308
309
310
311
312void __transport_register_session(
313 struct se_portal_group *se_tpg,
314 struct se_node_acl *se_nacl,
315 struct se_session *se_sess,
316 void *fabric_sess_ptr)
317{
318 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
319 unsigned char buf[PR_REG_ISID_LEN];
320
321 se_sess->se_tpg = se_tpg;
322 se_sess->fabric_sess_ptr = fabric_sess_ptr;
323
324
325
326
327
328
329 if (se_nacl) {
330
331
332
333
334
335
336
337
338
339
340 if (se_nacl->saved_prot_type)
341 se_sess->sess_prot_type = se_nacl->saved_prot_type;
342 else if (tfo->tpg_check_prot_fabric_only)
343 se_sess->sess_prot_type = se_nacl->saved_prot_type =
344 tfo->tpg_check_prot_fabric_only(se_tpg);
345
346
347
348
349 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
350 memset(&buf[0], 0, PR_REG_ISID_LEN);
351 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
352 &buf[0], PR_REG_ISID_LEN);
353 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
354 }
355
356 spin_lock_irq(&se_nacl->nacl_sess_lock);
357
358
359
360
361 se_nacl->nacl_sess = se_sess;
362
363 list_add_tail(&se_sess->sess_acl_list,
364 &se_nacl->acl_sess_list);
365 spin_unlock_irq(&se_nacl->nacl_sess_lock);
366 }
367 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
368
369 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
370 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
371}
372EXPORT_SYMBOL(__transport_register_session);
373
374void transport_register_session(
375 struct se_portal_group *se_tpg,
376 struct se_node_acl *se_nacl,
377 struct se_session *se_sess,
378 void *fabric_sess_ptr)
379{
380 unsigned long flags;
381
382 spin_lock_irqsave(&se_tpg->session_lock, flags);
383 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
384 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
385}
386EXPORT_SYMBOL(transport_register_session);
387
388struct se_session *
389target_alloc_session(struct se_portal_group *tpg,
390 unsigned int tag_num, unsigned int tag_size,
391 enum target_prot_op prot_op,
392 const char *initiatorname, void *private,
393 int (*callback)(struct se_portal_group *,
394 struct se_session *, void *))
395{
396 struct se_session *sess;
397
398
399
400
401
402 if (tag_num != 0)
403 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
404 else
405 sess = transport_init_session(prot_op);
406
407 if (IS_ERR(sess))
408 return sess;
409
410 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
411 (unsigned char *)initiatorname);
412 if (!sess->se_node_acl) {
413 transport_free_session(sess);
414 return ERR_PTR(-EACCES);
415 }
416
417
418
419
420 if (callback != NULL) {
421 int rc = callback(tpg, sess, private);
422 if (rc) {
423 transport_free_session(sess);
424 return ERR_PTR(rc);
425 }
426 }
427
428 transport_register_session(tpg, sess->se_node_acl, sess, private);
429 return sess;
430}
431EXPORT_SYMBOL(target_alloc_session);
432
433ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
434{
435 struct se_session *se_sess;
436 ssize_t len = 0;
437
438 spin_lock_bh(&se_tpg->session_lock);
439 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
440 if (!se_sess->se_node_acl)
441 continue;
442 if (!se_sess->se_node_acl->dynamic_node_acl)
443 continue;
444 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
445 break;
446
447 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
448 se_sess->se_node_acl->initiatorname);
449 len += 1;
450 }
451 spin_unlock_bh(&se_tpg->session_lock);
452
453 return len;
454}
455EXPORT_SYMBOL(target_show_dynamic_sessions);
456
457static void target_complete_nacl(struct kref *kref)
458{
459 struct se_node_acl *nacl = container_of(kref,
460 struct se_node_acl, acl_kref);
461 struct se_portal_group *se_tpg = nacl->se_tpg;
462
463 if (!nacl->dynamic_stop) {
464 complete(&nacl->acl_free_comp);
465 return;
466 }
467
468 mutex_lock(&se_tpg->acl_node_mutex);
469 list_del_init(&nacl->acl_list);
470 mutex_unlock(&se_tpg->acl_node_mutex);
471
472 core_tpg_wait_for_nacl_pr_ref(nacl);
473 core_free_device_list_for_node(nacl, se_tpg);
474 kfree(nacl);
475}
476
477void target_put_nacl(struct se_node_acl *nacl)
478{
479 kref_put(&nacl->acl_kref, target_complete_nacl);
480}
481EXPORT_SYMBOL(target_put_nacl);
482
483void transport_deregister_session_configfs(struct se_session *se_sess)
484{
485 struct se_node_acl *se_nacl;
486 unsigned long flags;
487
488
489
490 se_nacl = se_sess->se_node_acl;
491 if (se_nacl) {
492 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
493 if (!list_empty(&se_sess->sess_acl_list))
494 list_del_init(&se_sess->sess_acl_list);
495
496
497
498
499
500 if (list_empty(&se_nacl->acl_sess_list))
501 se_nacl->nacl_sess = NULL;
502 else {
503 se_nacl->nacl_sess = container_of(
504 se_nacl->acl_sess_list.prev,
505 struct se_session, sess_acl_list);
506 }
507 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
508 }
509}
510EXPORT_SYMBOL(transport_deregister_session_configfs);
511
512void transport_free_session(struct se_session *se_sess)
513{
514 struct se_node_acl *se_nacl = se_sess->se_node_acl;
515
516
517
518
519
520 if (se_nacl) {
521 struct se_portal_group *se_tpg = se_nacl->se_tpg;
522 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
523 unsigned long flags;
524
525 se_sess->se_node_acl = NULL;
526
527
528
529
530
531
532 mutex_lock(&se_tpg->acl_node_mutex);
533 if (se_nacl->dynamic_node_acl &&
534 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
535 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
536 if (list_empty(&se_nacl->acl_sess_list))
537 se_nacl->dynamic_stop = true;
538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
539
540 if (se_nacl->dynamic_stop)
541 list_del_init(&se_nacl->acl_list);
542 }
543 mutex_unlock(&se_tpg->acl_node_mutex);
544
545 if (se_nacl->dynamic_stop)
546 target_put_nacl(se_nacl);
547
548 target_put_nacl(se_nacl);
549 }
550 if (se_sess->sess_cmd_map) {
551 percpu_ida_destroy(&se_sess->sess_tag_pool);
552 kvfree(se_sess->sess_cmd_map);
553 }
554 kmem_cache_free(se_sess_cache, se_sess);
555}
556EXPORT_SYMBOL(transport_free_session);
557
558void transport_deregister_session(struct se_session *se_sess)
559{
560 struct se_portal_group *se_tpg = se_sess->se_tpg;
561 unsigned long flags;
562
563 if (!se_tpg) {
564 transport_free_session(se_sess);
565 return;
566 }
567
568 spin_lock_irqsave(&se_tpg->session_lock, flags);
569 list_del(&se_sess->sess_list);
570 se_sess->se_tpg = NULL;
571 se_sess->fabric_sess_ptr = NULL;
572 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
573
574 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
575 se_tpg->se_tpg_tfo->get_fabric_name());
576
577
578
579
580
581
582
583
584
585 transport_free_session(se_sess);
586}
587EXPORT_SYMBOL(transport_deregister_session);
588
589static void target_remove_from_state_list(struct se_cmd *cmd)
590{
591 struct se_device *dev = cmd->se_dev;
592 unsigned long flags;
593
594 if (!dev)
595 return;
596
597 spin_lock_irqsave(&dev->execute_task_lock, flags);
598 if (cmd->state_active) {
599 list_del(&cmd->state_list);
600 cmd->state_active = false;
601 }
602 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
603}
604
605static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
606{
607 unsigned long flags;
608
609 target_remove_from_state_list(cmd);
610
611
612
613
614 cmd->se_lun = NULL;
615
616 spin_lock_irqsave(&cmd->t_state_lock, flags);
617
618
619
620
621 if (cmd->transport_state & CMD_T_STOP) {
622 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
623 __func__, __LINE__, cmd->tag);
624
625 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
626
627 complete_all(&cmd->t_transport_stop_comp);
628 return 1;
629 }
630 cmd->transport_state &= ~CMD_T_ACTIVE;
631 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
632
633
634
635
636
637
638
639
640 return cmd->se_tfo->check_stop_free(cmd);
641}
642
643static void transport_lun_remove_cmd(struct se_cmd *cmd)
644{
645 struct se_lun *lun = cmd->se_lun;
646
647 if (!lun)
648 return;
649
650 if (cmpxchg(&cmd->lun_ref_active, true, false))
651 percpu_ref_put(&lun->lun_ref);
652}
653
654int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
655{
656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
657 int ret = 0;
658
659 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
660 transport_lun_remove_cmd(cmd);
661
662
663
664
665 if (remove)
666 cmd->se_tfo->aborted_task(cmd);
667
668 if (transport_cmd_check_stop_to_fabric(cmd))
669 return 1;
670 if (remove && ack_kref)
671 ret = transport_put_cmd(cmd);
672
673 return ret;
674}
675
676static void target_complete_failure_work(struct work_struct *work)
677{
678 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
679
680 transport_generic_request_failure(cmd,
681 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
682}
683
684
685
686
687
688static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
689{
690 struct se_device *dev = cmd->se_dev;
691
692 WARN_ON(!cmd->se_lun);
693
694 if (!dev)
695 return NULL;
696
697 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
698 return NULL;
699
700 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
701
702 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
703 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
704 return cmd->sense_buffer;
705}
706
707void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
708{
709 unsigned char *cmd_sense_buf;
710 unsigned long flags;
711
712 spin_lock_irqsave(&cmd->t_state_lock, flags);
713 cmd_sense_buf = transport_get_sense_buffer(cmd);
714 if (!cmd_sense_buf) {
715 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
716 return;
717 }
718
719 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
720 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
721 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
722}
723EXPORT_SYMBOL(transport_copy_sense_to_cmd);
724
725void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
726{
727 struct se_device *dev = cmd->se_dev;
728 int success;
729 unsigned long flags;
730
731 cmd->scsi_status = scsi_status;
732
733 spin_lock_irqsave(&cmd->t_state_lock, flags);
734 switch (cmd->scsi_status) {
735 case SAM_STAT_CHECK_CONDITION:
736 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
737 success = 1;
738 else
739 success = 0;
740 break;
741 default:
742 success = 1;
743 break;
744 }
745
746
747
748
749
750 if (cmd->transport_state & CMD_T_ABORTED ||
751 cmd->transport_state & CMD_T_STOP) {
752 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
753
754
755
756
757
758
759 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
760 up(&dev->caw_sem);
761 }
762 complete_all(&cmd->t_transport_stop_comp);
763 return;
764 } else if (!success) {
765 INIT_WORK(&cmd->work, target_complete_failure_work);
766 } else {
767 INIT_WORK(&cmd->work, target_complete_ok_work);
768 }
769
770 cmd->t_state = TRANSPORT_COMPLETE;
771 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
772 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
773
774 if (cmd->se_cmd_flags & SCF_USE_CPUID)
775 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
776 else
777 queue_work(target_completion_wq, &cmd->work);
778}
779EXPORT_SYMBOL(target_complete_cmd);
780
781void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
782{
783 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
784 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
785 cmd->residual_count += cmd->data_length - length;
786 } else {
787 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
788 cmd->residual_count = cmd->data_length - length;
789 }
790
791 cmd->data_length = length;
792 }
793
794 target_complete_cmd(cmd, scsi_status);
795}
796EXPORT_SYMBOL(target_complete_cmd_with_length);
797
798static void target_add_to_state_list(struct se_cmd *cmd)
799{
800 struct se_device *dev = cmd->se_dev;
801 unsigned long flags;
802
803 spin_lock_irqsave(&dev->execute_task_lock, flags);
804 if (!cmd->state_active) {
805 list_add_tail(&cmd->state_list, &dev->state_list);
806 cmd->state_active = true;
807 }
808 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
809}
810
811
812
813
814static void transport_write_pending_qf(struct se_cmd *cmd);
815static void transport_complete_qf(struct se_cmd *cmd);
816
817void target_qf_do_work(struct work_struct *work)
818{
819 struct se_device *dev = container_of(work, struct se_device,
820 qf_work_queue);
821 LIST_HEAD(qf_cmd_list);
822 struct se_cmd *cmd, *cmd_tmp;
823
824 spin_lock_irq(&dev->qf_cmd_lock);
825 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
826 spin_unlock_irq(&dev->qf_cmd_lock);
827
828 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
829 list_del(&cmd->se_qf_node);
830 atomic_dec_mb(&dev->dev_qf_count);
831
832 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
833 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
834 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
835 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
836 : "UNKNOWN");
837
838 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
839 transport_write_pending_qf(cmd);
840 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
841 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
842 transport_complete_qf(cmd);
843 }
844}
845
846unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
847{
848 switch (cmd->data_direction) {
849 case DMA_NONE:
850 return "NONE";
851 case DMA_FROM_DEVICE:
852 return "READ";
853 case DMA_TO_DEVICE:
854 return "WRITE";
855 case DMA_BIDIRECTIONAL:
856 return "BIDI";
857 default:
858 break;
859 }
860
861 return "UNKNOWN";
862}
863
864void transport_dump_dev_state(
865 struct se_device *dev,
866 char *b,
867 int *bl)
868{
869 *bl += sprintf(b + *bl, "Status: ");
870 if (dev->export_count)
871 *bl += sprintf(b + *bl, "ACTIVATED");
872 else
873 *bl += sprintf(b + *bl, "DEACTIVATED");
874
875 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
876 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
877 dev->dev_attrib.block_size,
878 dev->dev_attrib.hw_max_sectors);
879 *bl += sprintf(b + *bl, " ");
880}
881
882void transport_dump_vpd_proto_id(
883 struct t10_vpd *vpd,
884 unsigned char *p_buf,
885 int p_buf_len)
886{
887 unsigned char buf[VPD_TMP_BUF_SIZE];
888 int len;
889
890 memset(buf, 0, VPD_TMP_BUF_SIZE);
891 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
892
893 switch (vpd->protocol_identifier) {
894 case 0x00:
895 sprintf(buf+len, "Fibre Channel\n");
896 break;
897 case 0x10:
898 sprintf(buf+len, "Parallel SCSI\n");
899 break;
900 case 0x20:
901 sprintf(buf+len, "SSA\n");
902 break;
903 case 0x30:
904 sprintf(buf+len, "IEEE 1394\n");
905 break;
906 case 0x40:
907 sprintf(buf+len, "SCSI Remote Direct Memory Access"
908 " Protocol\n");
909 break;
910 case 0x50:
911 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
912 break;
913 case 0x60:
914 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
915 break;
916 case 0x70:
917 sprintf(buf+len, "Automation/Drive Interface Transport"
918 " Protocol\n");
919 break;
920 case 0x80:
921 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
922 break;
923 default:
924 sprintf(buf+len, "Unknown 0x%02x\n",
925 vpd->protocol_identifier);
926 break;
927 }
928
929 if (p_buf)
930 strncpy(p_buf, buf, p_buf_len);
931 else
932 pr_debug("%s", buf);
933}
934
935void
936transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
937{
938
939
940
941
942
943 if (page_83[1] & 0x80) {
944 vpd->protocol_identifier = (page_83[0] & 0xf0);
945 vpd->protocol_identifier_set = 1;
946 transport_dump_vpd_proto_id(vpd, NULL, 0);
947 }
948}
949EXPORT_SYMBOL(transport_set_vpd_proto_id);
950
951int transport_dump_vpd_assoc(
952 struct t10_vpd *vpd,
953 unsigned char *p_buf,
954 int p_buf_len)
955{
956 unsigned char buf[VPD_TMP_BUF_SIZE];
957 int ret = 0;
958 int len;
959
960 memset(buf, 0, VPD_TMP_BUF_SIZE);
961 len = sprintf(buf, "T10 VPD Identifier Association: ");
962
963 switch (vpd->association) {
964 case 0x00:
965 sprintf(buf+len, "addressed logical unit\n");
966 break;
967 case 0x10:
968 sprintf(buf+len, "target port\n");
969 break;
970 case 0x20:
971 sprintf(buf+len, "SCSI target device\n");
972 break;
973 default:
974 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
975 ret = -EINVAL;
976 break;
977 }
978
979 if (p_buf)
980 strncpy(p_buf, buf, p_buf_len);
981 else
982 pr_debug("%s", buf);
983
984 return ret;
985}
986
987int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
988{
989
990
991
992
993
994 vpd->association = (page_83[1] & 0x30);
995 return transport_dump_vpd_assoc(vpd, NULL, 0);
996}
997EXPORT_SYMBOL(transport_set_vpd_assoc);
998
999int transport_dump_vpd_ident_type(
1000 struct t10_vpd *vpd,
1001 unsigned char *p_buf,
1002 int p_buf_len)
1003{
1004 unsigned char buf[VPD_TMP_BUF_SIZE];
1005 int ret = 0;
1006 int len;
1007
1008 memset(buf, 0, VPD_TMP_BUF_SIZE);
1009 len = sprintf(buf, "T10 VPD Identifier Type: ");
1010
1011 switch (vpd->device_identifier_type) {
1012 case 0x00:
1013 sprintf(buf+len, "Vendor specific\n");
1014 break;
1015 case 0x01:
1016 sprintf(buf+len, "T10 Vendor ID based\n");
1017 break;
1018 case 0x02:
1019 sprintf(buf+len, "EUI-64 based\n");
1020 break;
1021 case 0x03:
1022 sprintf(buf+len, "NAA\n");
1023 break;
1024 case 0x04:
1025 sprintf(buf+len, "Relative target port identifier\n");
1026 break;
1027 case 0x08:
1028 sprintf(buf+len, "SCSI name string\n");
1029 break;
1030 default:
1031 sprintf(buf+len, "Unsupported: 0x%02x\n",
1032 vpd->device_identifier_type);
1033 ret = -EINVAL;
1034 break;
1035 }
1036
1037 if (p_buf) {
1038 if (p_buf_len < strlen(buf)+1)
1039 return -EINVAL;
1040 strncpy(p_buf, buf, p_buf_len);
1041 } else {
1042 pr_debug("%s", buf);
1043 }
1044
1045 return ret;
1046}
1047
1048int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1049{
1050
1051
1052
1053
1054
1055 vpd->device_identifier_type = (page_83[1] & 0x0f);
1056 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1057}
1058EXPORT_SYMBOL(transport_set_vpd_ident_type);
1059
1060int transport_dump_vpd_ident(
1061 struct t10_vpd *vpd,
1062 unsigned char *p_buf,
1063 int p_buf_len)
1064{
1065 unsigned char buf[VPD_TMP_BUF_SIZE];
1066 int ret = 0;
1067
1068 memset(buf, 0, VPD_TMP_BUF_SIZE);
1069
1070 switch (vpd->device_identifier_code_set) {
1071 case 0x01:
1072 snprintf(buf, sizeof(buf),
1073 "T10 VPD Binary Device Identifier: %s\n",
1074 &vpd->device_identifier[0]);
1075 break;
1076 case 0x02:
1077 snprintf(buf, sizeof(buf),
1078 "T10 VPD ASCII Device Identifier: %s\n",
1079 &vpd->device_identifier[0]);
1080 break;
1081 case 0x03:
1082 snprintf(buf, sizeof(buf),
1083 "T10 VPD UTF-8 Device Identifier: %s\n",
1084 &vpd->device_identifier[0]);
1085 break;
1086 default:
1087 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1088 " 0x%02x", vpd->device_identifier_code_set);
1089 ret = -EINVAL;
1090 break;
1091 }
1092
1093 if (p_buf)
1094 strncpy(p_buf, buf, p_buf_len);
1095 else
1096 pr_debug("%s", buf);
1097
1098 return ret;
1099}
1100
1101int
1102transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1103{
1104 static const char hex_str[] = "0123456789abcdef";
1105 int j = 0, i = 4;
1106
1107
1108
1109
1110
1111
1112 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1113 switch (vpd->device_identifier_code_set) {
1114 case 0x01:
1115 vpd->device_identifier[j++] =
1116 hex_str[vpd->device_identifier_type];
1117 while (i < (4 + page_83[3])) {
1118 vpd->device_identifier[j++] =
1119 hex_str[(page_83[i] & 0xf0) >> 4];
1120 vpd->device_identifier[j++] =
1121 hex_str[page_83[i] & 0x0f];
1122 i++;
1123 }
1124 break;
1125 case 0x02:
1126 case 0x03:
1127 while (i < (4 + page_83[3]))
1128 vpd->device_identifier[j++] = page_83[i++];
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 return transport_dump_vpd_ident(vpd, NULL, 0);
1135}
1136EXPORT_SYMBOL(transport_set_vpd_ident);
1137
1138static sense_reason_t
1139target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1140 unsigned int size)
1141{
1142 u32 mtl;
1143
1144 if (!cmd->se_tfo->max_data_sg_nents)
1145 return TCM_NO_SENSE;
1146
1147
1148
1149
1150
1151
1152 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1153 if (cmd->data_length > mtl) {
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1166 cmd->residual_count = (size - mtl);
1167 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1168 u32 orig_dl = size + cmd->residual_count;
1169 cmd->residual_count = (orig_dl - mtl);
1170 } else {
1171 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1172 cmd->residual_count = (cmd->data_length - mtl);
1173 }
1174 cmd->data_length = mtl;
1175
1176
1177
1178
1179 if (cmd->prot_length) {
1180 u32 sectors = (mtl / dev->dev_attrib.block_size);
1181 cmd->prot_length = dev->prot_length * sectors;
1182 }
1183 }
1184 return TCM_NO_SENSE;
1185}
1186
1187sense_reason_t
1188target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1189{
1190 struct se_device *dev = cmd->se_dev;
1191
1192 if (cmd->unknown_data_length) {
1193 cmd->data_length = size;
1194 } else if (size != cmd->data_length) {
1195 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1196 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1197 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1198 cmd->data_length, size, cmd->t_task_cdb[0]);
1199
1200 if (cmd->data_direction == DMA_TO_DEVICE) {
1201 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1202 pr_err_ratelimited("Rejecting underflow/overflow"
1203 " for WRITE data CDB\n");
1204 return TCM_INVALID_CDB_FIELD;
1205 }
1206
1207
1208
1209
1210
1211
1212 if (size > cmd->data_length) {
1213 pr_err_ratelimited("Rejecting overflow for"
1214 " WRITE control CDB\n");
1215 return TCM_INVALID_CDB_FIELD;
1216 }
1217 }
1218
1219
1220
1221
1222 if (dev->dev_attrib.block_size != 512) {
1223 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1224 " CDB on non 512-byte sector setup subsystem"
1225 " plugin: %s\n", dev->transport->name);
1226
1227 return TCM_INVALID_CDB_FIELD;
1228 }
1229
1230
1231
1232
1233
1234
1235 if (size > cmd->data_length) {
1236 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1237 cmd->residual_count = (size - cmd->data_length);
1238 } else {
1239 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1240 cmd->residual_count = (cmd->data_length - size);
1241 cmd->data_length = size;
1242 }
1243 }
1244
1245 return target_check_max_data_sg_nents(cmd, dev, size);
1246
1247}
1248
1249
1250
1251
1252
1253
1254
1255void transport_init_se_cmd(
1256 struct se_cmd *cmd,
1257 const struct target_core_fabric_ops *tfo,
1258 struct se_session *se_sess,
1259 u32 data_length,
1260 int data_direction,
1261 int task_attr,
1262 unsigned char *sense_buffer)
1263{
1264 INIT_LIST_HEAD(&cmd->se_delayed_node);
1265 INIT_LIST_HEAD(&cmd->se_qf_node);
1266 INIT_LIST_HEAD(&cmd->se_cmd_list);
1267 INIT_LIST_HEAD(&cmd->state_list);
1268 init_completion(&cmd->t_transport_stop_comp);
1269 init_completion(&cmd->cmd_wait_comp);
1270 spin_lock_init(&cmd->t_state_lock);
1271 INIT_WORK(&cmd->work, NULL);
1272 kref_init(&cmd->cmd_kref);
1273
1274 cmd->se_tfo = tfo;
1275 cmd->se_sess = se_sess;
1276 cmd->data_length = data_length;
1277 cmd->data_direction = data_direction;
1278 cmd->sam_task_attr = task_attr;
1279 cmd->sense_buffer = sense_buffer;
1280
1281 cmd->state_active = false;
1282}
1283EXPORT_SYMBOL(transport_init_se_cmd);
1284
1285static sense_reason_t
1286transport_check_alloc_task_attr(struct se_cmd *cmd)
1287{
1288 struct se_device *dev = cmd->se_dev;
1289
1290
1291
1292
1293
1294 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1295 return 0;
1296
1297 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1298 pr_debug("SAM Task Attribute ACA"
1299 " emulation is not supported\n");
1300 return TCM_INVALID_CDB_FIELD;
1301 }
1302
1303 return 0;
1304}
1305
1306sense_reason_t
1307target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1308{
1309 struct se_device *dev = cmd->se_dev;
1310 sense_reason_t ret;
1311
1312
1313
1314
1315
1316 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1317 pr_err("Received SCSI CDB with command_size: %d that"
1318 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1319 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1320 return TCM_INVALID_CDB_FIELD;
1321 }
1322
1323
1324
1325
1326
1327 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1328 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1329 GFP_KERNEL);
1330 if (!cmd->t_task_cdb) {
1331 pr_err("Unable to allocate cmd->t_task_cdb"
1332 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1333 scsi_command_size(cdb),
1334 (unsigned long)sizeof(cmd->__t_task_cdb));
1335 return TCM_OUT_OF_RESOURCES;
1336 }
1337 } else
1338 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1339
1340
1341
1342 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1343
1344 trace_target_sequencer_start(cmd);
1345
1346 ret = dev->transport->parse_cdb(cmd);
1347 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1348 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1349 cmd->se_tfo->get_fabric_name(),
1350 cmd->se_sess->se_node_acl->initiatorname,
1351 cmd->t_task_cdb[0]);
1352 if (ret)
1353 return ret;
1354
1355 ret = transport_check_alloc_task_attr(cmd);
1356 if (ret)
1357 return ret;
1358
1359 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1360 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1361 return 0;
1362}
1363EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1364
1365
1366
1367
1368
1369int transport_handle_cdb_direct(
1370 struct se_cmd *cmd)
1371{
1372 sense_reason_t ret;
1373
1374 if (!cmd->se_lun) {
1375 dump_stack();
1376 pr_err("cmd->se_lun is NULL\n");
1377 return -EINVAL;
1378 }
1379 if (in_interrupt()) {
1380 dump_stack();
1381 pr_err("transport_generic_handle_cdb cannot be called"
1382 " from interrupt context\n");
1383 return -EINVAL;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393 cmd->t_state = TRANSPORT_NEW_CMD;
1394 cmd->transport_state |= CMD_T_ACTIVE;
1395
1396
1397
1398
1399
1400
1401 ret = transport_generic_new_cmd(cmd);
1402 if (ret)
1403 transport_generic_request_failure(cmd, ret);
1404 return 0;
1405}
1406EXPORT_SYMBOL(transport_handle_cdb_direct);
1407
1408sense_reason_t
1409transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1410 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1411{
1412 if (!sgl || !sgl_count)
1413 return 0;
1414
1415
1416
1417
1418
1419
1420 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1421 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1422 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1423 return TCM_INVALID_CDB_FIELD;
1424 }
1425
1426 cmd->t_data_sg = sgl;
1427 cmd->t_data_nents = sgl_count;
1428 cmd->t_bidi_data_sg = sgl_bidi;
1429 cmd->t_bidi_data_nents = sgl_bidi_count;
1430
1431 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1432 return 0;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1465 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1466 u32 data_length, int task_attr, int data_dir, int flags,
1467 struct scatterlist *sgl, u32 sgl_count,
1468 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1469 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1470{
1471 struct se_portal_group *se_tpg;
1472 sense_reason_t rc;
1473 int ret;
1474
1475 se_tpg = se_sess->se_tpg;
1476 BUG_ON(!se_tpg);
1477 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1478 BUG_ON(in_interrupt());
1479
1480
1481
1482
1483
1484 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1485 data_length, data_dir, task_attr, sense);
1486
1487 if (flags & TARGET_SCF_USE_CPUID)
1488 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1489 else
1490 se_cmd->cpuid = WORK_CPU_UNBOUND;
1491
1492 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1493 se_cmd->unknown_data_length = 1;
1494
1495
1496
1497
1498
1499
1500 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1501 if (ret)
1502 return ret;
1503
1504
1505
1506 if (flags & TARGET_SCF_BIDI_OP)
1507 se_cmd->se_cmd_flags |= SCF_BIDI;
1508
1509
1510
1511 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1512 if (rc) {
1513 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1514 target_put_sess_cmd(se_cmd);
1515 return 0;
1516 }
1517
1518 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1519 if (rc != 0) {
1520 transport_generic_request_failure(se_cmd, rc);
1521 return 0;
1522 }
1523
1524
1525
1526
1527
1528 if (sgl_prot_count) {
1529 se_cmd->t_prot_sg = sgl_prot;
1530 se_cmd->t_prot_nents = sgl_prot_count;
1531 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1532 }
1533
1534
1535
1536
1537
1538
1539 if (sgl_count != 0) {
1540 BUG_ON(!sgl);
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1551 se_cmd->data_direction == DMA_FROM_DEVICE) {
1552 unsigned char *buf = NULL;
1553
1554 if (sgl)
1555 buf = kmap(sg_page(sgl)) + sgl->offset;
1556
1557 if (buf) {
1558 memset(buf, 0, sgl->length);
1559 kunmap(sg_page(sgl));
1560 }
1561 }
1562
1563 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1564 sgl_bidi, sgl_bidi_count);
1565 if (rc != 0) {
1566 transport_generic_request_failure(se_cmd, rc);
1567 return 0;
1568 }
1569 }
1570
1571
1572
1573
1574
1575 core_alua_check_nonop_delay(se_cmd);
1576
1577 transport_handle_cdb_direct(se_cmd);
1578 return 0;
1579}
1580EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1607 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1608 u32 data_length, int task_attr, int data_dir, int flags)
1609{
1610 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1611 unpacked_lun, data_length, task_attr, data_dir,
1612 flags, NULL, 0, NULL, 0, NULL, 0);
1613}
1614EXPORT_SYMBOL(target_submit_cmd);
1615
1616static void target_complete_tmr_failure(struct work_struct *work)
1617{
1618 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1619
1620 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1621 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1622
1623 transport_lun_remove_cmd(se_cmd);
1624 transport_cmd_check_stop_to_fabric(se_cmd);
1625}
1626
1627static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1628 u64 *unpacked_lun)
1629{
1630 struct se_cmd *se_cmd;
1631 unsigned long flags;
1632 bool ret = false;
1633
1634 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1635 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1636 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1637 continue;
1638
1639 if (se_cmd->tag == tag) {
1640 *unpacked_lun = se_cmd->orig_fe_lun;
1641 ret = true;
1642 break;
1643 }
1644 }
1645 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1646
1647 return ret;
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1668 unsigned char *sense, u64 unpacked_lun,
1669 void *fabric_tmr_ptr, unsigned char tm_type,
1670 gfp_t gfp, u64 tag, int flags)
1671{
1672 struct se_portal_group *se_tpg;
1673 int ret;
1674
1675 se_tpg = se_sess->se_tpg;
1676 BUG_ON(!se_tpg);
1677
1678 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1679 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1680
1681
1682
1683
1684 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1685 if (ret < 0)
1686 return -ENOMEM;
1687
1688 if (tm_type == TMR_ABORT_TASK)
1689 se_cmd->se_tmr_req->ref_task_tag = tag;
1690
1691
1692 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1693 if (ret) {
1694 core_tmr_release_req(se_cmd->se_tmr_req);
1695 return ret;
1696 }
1697
1698
1699
1700
1701
1702 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1703 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1704 goto failure;
1705 }
1706
1707 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1708 if (ret)
1709 goto failure;
1710
1711 transport_generic_handle_tmr(se_cmd);
1712 return 0;
1713
1714
1715
1716
1717
1718failure:
1719 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1720 schedule_work(&se_cmd->work);
1721 return 0;
1722}
1723EXPORT_SYMBOL(target_submit_tmr);
1724
1725
1726
1727
1728void transport_generic_request_failure(struct se_cmd *cmd,
1729 sense_reason_t sense_reason)
1730{
1731 int ret = 0, post_ret = 0;
1732
1733 if (transport_check_aborted_status(cmd, 1))
1734 return;
1735
1736 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1737 sense_reason);
1738 target_show_cmd("-----[ ", cmd);
1739
1740
1741
1742
1743 transport_complete_task_attr(cmd);
1744
1745
1746
1747
1748 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1749 cmd->transport_complete_callback)
1750 cmd->transport_complete_callback(cmd, false, &post_ret);
1751
1752 switch (sense_reason) {
1753 case TCM_NON_EXISTENT_LUN:
1754 case TCM_UNSUPPORTED_SCSI_OPCODE:
1755 case TCM_INVALID_CDB_FIELD:
1756 case TCM_INVALID_PARAMETER_LIST:
1757 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1758 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1759 case TCM_UNKNOWN_MODE_PAGE:
1760 case TCM_WRITE_PROTECTED:
1761 case TCM_ADDRESS_OUT_OF_RANGE:
1762 case TCM_CHECK_CONDITION_ABORT_CMD:
1763 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1764 case TCM_CHECK_CONDITION_NOT_READY:
1765 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1766 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1767 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1768 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1769 case TCM_TOO_MANY_TARGET_DESCS:
1770 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1771 case TCM_TOO_MANY_SEGMENT_DESCS:
1772 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1773 break;
1774 case TCM_OUT_OF_RESOURCES:
1775 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1776 break;
1777 case TCM_RESERVATION_CONFLICT:
1778
1779
1780
1781
1782
1783
1784 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1785
1786
1787
1788
1789
1790
1791
1792 if (cmd->se_sess &&
1793 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1794 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1795 cmd->orig_fe_lun, 0x2C,
1796 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1797 }
1798 trace_target_cmd_complete(cmd);
1799 ret = cmd->se_tfo->queue_status(cmd);
1800 if (ret)
1801 goto queue_full;
1802 goto check_stop;
1803 default:
1804 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1805 cmd->t_task_cdb[0], sense_reason);
1806 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1807 break;
1808 }
1809
1810 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1811 if (ret)
1812 goto queue_full;
1813
1814check_stop:
1815 transport_lun_remove_cmd(cmd);
1816 transport_cmd_check_stop_to_fabric(cmd);
1817 return;
1818
1819queue_full:
1820 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1821}
1822EXPORT_SYMBOL(transport_generic_request_failure);
1823
1824void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1825{
1826 sense_reason_t ret;
1827
1828 if (!cmd->execute_cmd) {
1829 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1830 goto err;
1831 }
1832 if (do_checks) {
1833
1834
1835
1836
1837
1838
1839 ret = target_scsi3_ua_check(cmd);
1840 if (ret)
1841 goto err;
1842
1843 ret = target_alua_state_check(cmd);
1844 if (ret)
1845 goto err;
1846
1847 ret = target_check_reservation(cmd);
1848 if (ret) {
1849 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1850 goto err;
1851 }
1852 }
1853
1854 ret = cmd->execute_cmd(cmd);
1855 if (!ret)
1856 return;
1857err:
1858 spin_lock_irq(&cmd->t_state_lock);
1859 cmd->transport_state &= ~CMD_T_SENT;
1860 spin_unlock_irq(&cmd->t_state_lock);
1861
1862 transport_generic_request_failure(cmd, ret);
1863}
1864
1865static int target_write_prot_action(struct se_cmd *cmd)
1866{
1867 u32 sectors;
1868
1869
1870
1871
1872
1873 switch (cmd->prot_op) {
1874 case TARGET_PROT_DOUT_INSERT:
1875 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1876 sbc_dif_generate(cmd);
1877 break;
1878 case TARGET_PROT_DOUT_STRIP:
1879 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1880 break;
1881
1882 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1883 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1884 sectors, 0, cmd->t_prot_sg, 0);
1885 if (unlikely(cmd->pi_err)) {
1886 spin_lock_irq(&cmd->t_state_lock);
1887 cmd->transport_state &= ~CMD_T_SENT;
1888 spin_unlock_irq(&cmd->t_state_lock);
1889 transport_generic_request_failure(cmd, cmd->pi_err);
1890 return -1;
1891 }
1892 break;
1893 default:
1894 break;
1895 }
1896
1897 return 0;
1898}
1899
1900static bool target_handle_task_attr(struct se_cmd *cmd)
1901{
1902 struct se_device *dev = cmd->se_dev;
1903
1904 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1905 return false;
1906
1907 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1908
1909
1910
1911
1912
1913 switch (cmd->sam_task_attr) {
1914 case TCM_HEAD_TAG:
1915 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1916 cmd->t_task_cdb[0]);
1917 return false;
1918 case TCM_ORDERED_TAG:
1919 atomic_inc_mb(&dev->dev_ordered_sync);
1920
1921 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1922 cmd->t_task_cdb[0]);
1923
1924
1925
1926
1927
1928 if (!atomic_read(&dev->simple_cmds))
1929 return false;
1930 break;
1931 default:
1932
1933
1934
1935 atomic_inc_mb(&dev->simple_cmds);
1936 break;
1937 }
1938
1939 if (atomic_read(&dev->dev_ordered_sync) == 0)
1940 return false;
1941
1942 spin_lock(&dev->delayed_cmd_lock);
1943 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1944 spin_unlock(&dev->delayed_cmd_lock);
1945
1946 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1947 cmd->t_task_cdb[0], cmd->sam_task_attr);
1948 return true;
1949}
1950
1951static int __transport_check_aborted_status(struct se_cmd *, int);
1952
1953void target_execute_cmd(struct se_cmd *cmd)
1954{
1955
1956
1957
1958
1959
1960
1961 spin_lock_irq(&cmd->t_state_lock);
1962 if (__transport_check_aborted_status(cmd, 1)) {
1963 spin_unlock_irq(&cmd->t_state_lock);
1964 return;
1965 }
1966 if (cmd->transport_state & CMD_T_STOP) {
1967 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1968 __func__, __LINE__, cmd->tag);
1969
1970 spin_unlock_irq(&cmd->t_state_lock);
1971 complete_all(&cmd->t_transport_stop_comp);
1972 return;
1973 }
1974
1975 cmd->t_state = TRANSPORT_PROCESSING;
1976 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
1977 spin_unlock_irq(&cmd->t_state_lock);
1978
1979 if (target_write_prot_action(cmd))
1980 return;
1981
1982 if (target_handle_task_attr(cmd)) {
1983 spin_lock_irq(&cmd->t_state_lock);
1984 cmd->transport_state &= ~CMD_T_SENT;
1985 spin_unlock_irq(&cmd->t_state_lock);
1986 return;
1987 }
1988
1989 __target_execute_cmd(cmd, true);
1990}
1991EXPORT_SYMBOL(target_execute_cmd);
1992
1993
1994
1995
1996
1997static void target_restart_delayed_cmds(struct se_device *dev)
1998{
1999 for (;;) {
2000 struct se_cmd *cmd;
2001
2002 spin_lock(&dev->delayed_cmd_lock);
2003 if (list_empty(&dev->delayed_cmd_list)) {
2004 spin_unlock(&dev->delayed_cmd_lock);
2005 break;
2006 }
2007
2008 cmd = list_entry(dev->delayed_cmd_list.next,
2009 struct se_cmd, se_delayed_node);
2010 list_del(&cmd->se_delayed_node);
2011 spin_unlock(&dev->delayed_cmd_lock);
2012
2013 __target_execute_cmd(cmd, true);
2014
2015 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2016 break;
2017 }
2018}
2019
2020
2021
2022
2023
2024static void transport_complete_task_attr(struct se_cmd *cmd)
2025{
2026 struct se_device *dev = cmd->se_dev;
2027
2028 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2029 return;
2030
2031 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2032 goto restart;
2033
2034 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2035 atomic_dec_mb(&dev->simple_cmds);
2036 dev->dev_cur_ordered_id++;
2037 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2038 dev->dev_cur_ordered_id++;
2039 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2040 dev->dev_cur_ordered_id);
2041 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2042 atomic_dec_mb(&dev->dev_ordered_sync);
2043
2044 dev->dev_cur_ordered_id++;
2045 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2046 dev->dev_cur_ordered_id);
2047 }
2048restart:
2049 target_restart_delayed_cmds(dev);
2050}
2051
2052static void transport_complete_qf(struct se_cmd *cmd)
2053{
2054 int ret = 0;
2055
2056 transport_complete_task_attr(cmd);
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2067 if (cmd->scsi_status)
2068 goto queue_status;
2069
2070 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2071 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2072 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2073 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2074 goto queue_status;
2075 }
2076
2077 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2078 goto queue_status;
2079
2080 switch (cmd->data_direction) {
2081 case DMA_FROM_DEVICE:
2082 if (cmd->scsi_status)
2083 goto queue_status;
2084
2085 trace_target_cmd_complete(cmd);
2086 ret = cmd->se_tfo->queue_data_in(cmd);
2087 break;
2088 case DMA_TO_DEVICE:
2089 if (cmd->se_cmd_flags & SCF_BIDI) {
2090 ret = cmd->se_tfo->queue_data_in(cmd);
2091 break;
2092 }
2093
2094 case DMA_NONE:
2095queue_status:
2096 trace_target_cmd_complete(cmd);
2097 ret = cmd->se_tfo->queue_status(cmd);
2098 break;
2099 default:
2100 break;
2101 }
2102
2103 if (ret < 0) {
2104 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2105 return;
2106 }
2107 transport_lun_remove_cmd(cmd);
2108 transport_cmd_check_stop_to_fabric(cmd);
2109}
2110
2111static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2112 int err, bool write_pending)
2113{
2114
2115
2116
2117
2118
2119
2120
2121
2122 if (err == -EAGAIN || err == -ENOMEM) {
2123 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2124 TRANSPORT_COMPLETE_QF_OK;
2125 } else {
2126 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2127 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2128 }
2129
2130 spin_lock_irq(&dev->qf_cmd_lock);
2131 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2132 atomic_inc_mb(&dev->dev_qf_count);
2133 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2134
2135 schedule_work(&cmd->se_dev->qf_work_queue);
2136}
2137
2138static bool target_read_prot_action(struct se_cmd *cmd)
2139{
2140 switch (cmd->prot_op) {
2141 case TARGET_PROT_DIN_STRIP:
2142 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2143 u32 sectors = cmd->data_length >>
2144 ilog2(cmd->se_dev->dev_attrib.block_size);
2145
2146 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2147 sectors, 0, cmd->t_prot_sg,
2148 0);
2149 if (cmd->pi_err)
2150 return true;
2151 }
2152 break;
2153 case TARGET_PROT_DIN_INSERT:
2154 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2155 break;
2156
2157 sbc_dif_generate(cmd);
2158 break;
2159 default:
2160 break;
2161 }
2162
2163 return false;
2164}
2165
2166static void target_complete_ok_work(struct work_struct *work)
2167{
2168 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2169 int ret;
2170
2171
2172
2173
2174
2175
2176 transport_complete_task_attr(cmd);
2177
2178
2179
2180
2181
2182 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2183 schedule_work(&cmd->se_dev->qf_work_queue);
2184
2185
2186
2187
2188
2189 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2190 WARN_ON(!cmd->scsi_status);
2191 ret = transport_send_check_condition_and_sense(
2192 cmd, 0, 1);
2193 if (ret)
2194 goto queue_full;
2195
2196 transport_lun_remove_cmd(cmd);
2197 transport_cmd_check_stop_to_fabric(cmd);
2198 return;
2199 }
2200
2201
2202
2203
2204 if (cmd->transport_complete_callback) {
2205 sense_reason_t rc;
2206 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2207 bool zero_dl = !(cmd->data_length);
2208 int post_ret = 0;
2209
2210 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2211 if (!rc && !post_ret) {
2212 if (caw && zero_dl)
2213 goto queue_rsp;
2214
2215 return;
2216 } else if (rc) {
2217 ret = transport_send_check_condition_and_sense(cmd,
2218 rc, 0);
2219 if (ret)
2220 goto queue_full;
2221
2222 transport_lun_remove_cmd(cmd);
2223 transport_cmd_check_stop_to_fabric(cmd);
2224 return;
2225 }
2226 }
2227
2228queue_rsp:
2229 switch (cmd->data_direction) {
2230 case DMA_FROM_DEVICE:
2231 if (cmd->scsi_status)
2232 goto queue_status;
2233
2234 atomic_long_add(cmd->data_length,
2235 &cmd->se_lun->lun_stats.tx_data_octets);
2236
2237
2238
2239
2240
2241 if (target_read_prot_action(cmd)) {
2242 ret = transport_send_check_condition_and_sense(cmd,
2243 cmd->pi_err, 0);
2244 if (ret)
2245 goto queue_full;
2246
2247 transport_lun_remove_cmd(cmd);
2248 transport_cmd_check_stop_to_fabric(cmd);
2249 return;
2250 }
2251
2252 trace_target_cmd_complete(cmd);
2253 ret = cmd->se_tfo->queue_data_in(cmd);
2254 if (ret)
2255 goto queue_full;
2256 break;
2257 case DMA_TO_DEVICE:
2258 atomic_long_add(cmd->data_length,
2259 &cmd->se_lun->lun_stats.rx_data_octets);
2260
2261
2262
2263 if (cmd->se_cmd_flags & SCF_BIDI) {
2264 atomic_long_add(cmd->data_length,
2265 &cmd->se_lun->lun_stats.tx_data_octets);
2266 ret = cmd->se_tfo->queue_data_in(cmd);
2267 if (ret)
2268 goto queue_full;
2269 break;
2270 }
2271
2272 case DMA_NONE:
2273queue_status:
2274 trace_target_cmd_complete(cmd);
2275 ret = cmd->se_tfo->queue_status(cmd);
2276 if (ret)
2277 goto queue_full;
2278 break;
2279 default:
2280 break;
2281 }
2282
2283 transport_lun_remove_cmd(cmd);
2284 transport_cmd_check_stop_to_fabric(cmd);
2285 return;
2286
2287queue_full:
2288 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2289 " data_direction: %d\n", cmd, cmd->data_direction);
2290
2291 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2292}
2293
2294void target_free_sgl(struct scatterlist *sgl, int nents)
2295{
2296 struct scatterlist *sg;
2297 int count;
2298
2299 for_each_sg(sgl, sg, nents, count)
2300 __free_page(sg_page(sg));
2301
2302 kfree(sgl);
2303}
2304EXPORT_SYMBOL(target_free_sgl);
2305
2306static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2307{
2308
2309
2310
2311
2312 if (!cmd->t_data_sg_orig)
2313 return;
2314
2315 kfree(cmd->t_data_sg);
2316 cmd->t_data_sg = cmd->t_data_sg_orig;
2317 cmd->t_data_sg_orig = NULL;
2318 cmd->t_data_nents = cmd->t_data_nents_orig;
2319 cmd->t_data_nents_orig = 0;
2320}
2321
2322static inline void transport_free_pages(struct se_cmd *cmd)
2323{
2324 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2325 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2326 cmd->t_prot_sg = NULL;
2327 cmd->t_prot_nents = 0;
2328 }
2329
2330 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2331
2332
2333
2334
2335 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2336 target_free_sgl(cmd->t_bidi_data_sg,
2337 cmd->t_bidi_data_nents);
2338 cmd->t_bidi_data_sg = NULL;
2339 cmd->t_bidi_data_nents = 0;
2340 }
2341 transport_reset_sgl_orig(cmd);
2342 return;
2343 }
2344 transport_reset_sgl_orig(cmd);
2345
2346 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2347 cmd->t_data_sg = NULL;
2348 cmd->t_data_nents = 0;
2349
2350 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2351 cmd->t_bidi_data_sg = NULL;
2352 cmd->t_bidi_data_nents = 0;
2353}
2354
2355
2356
2357
2358
2359
2360
2361static int transport_put_cmd(struct se_cmd *cmd)
2362{
2363 BUG_ON(!cmd->se_tfo);
2364
2365
2366
2367
2368 return target_put_sess_cmd(cmd);
2369}
2370
2371void *transport_kmap_data_sg(struct se_cmd *cmd)
2372{
2373 struct scatterlist *sg = cmd->t_data_sg;
2374 struct page **pages;
2375 int i;
2376
2377
2378
2379
2380
2381
2382 if (!cmd->t_data_nents)
2383 return NULL;
2384
2385 BUG_ON(!sg);
2386 if (cmd->t_data_nents == 1)
2387 return kmap(sg_page(sg)) + sg->offset;
2388
2389
2390 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2391 if (!pages)
2392 return NULL;
2393
2394
2395 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2396 pages[i] = sg_page(sg);
2397 }
2398
2399 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2400 kfree(pages);
2401 if (!cmd->t_data_vmap)
2402 return NULL;
2403
2404 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2405}
2406EXPORT_SYMBOL(transport_kmap_data_sg);
2407
2408void transport_kunmap_data_sg(struct se_cmd *cmd)
2409{
2410 if (!cmd->t_data_nents) {
2411 return;
2412 } else if (cmd->t_data_nents == 1) {
2413 kunmap(sg_page(cmd->t_data_sg));
2414 return;
2415 }
2416
2417 vunmap(cmd->t_data_vmap);
2418 cmd->t_data_vmap = NULL;
2419}
2420EXPORT_SYMBOL(transport_kunmap_data_sg);
2421
2422int
2423target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2424 bool zero_page, bool chainable)
2425{
2426 struct scatterlist *sg;
2427 struct page *page;
2428 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2429 unsigned int nalloc, nent;
2430 int i = 0;
2431
2432 nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
2433 if (chainable)
2434 nalloc++;
2435 sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
2436 if (!sg)
2437 return -ENOMEM;
2438
2439 sg_init_table(sg, nalloc);
2440
2441 while (length) {
2442 u32 page_len = min_t(u32, length, PAGE_SIZE);
2443 page = alloc_page(GFP_KERNEL | zero_flag);
2444 if (!page)
2445 goto out;
2446
2447 sg_set_page(&sg[i], page, page_len, 0);
2448 length -= page_len;
2449 i++;
2450 }
2451 *sgl = sg;
2452 *nents = nent;
2453 return 0;
2454
2455out:
2456 while (i > 0) {
2457 i--;
2458 __free_page(sg_page(&sg[i]));
2459 }
2460 kfree(sg);
2461 return -ENOMEM;
2462}
2463EXPORT_SYMBOL(target_alloc_sgl);
2464
2465
2466
2467
2468
2469
2470sense_reason_t
2471transport_generic_new_cmd(struct se_cmd *cmd)
2472{
2473 unsigned long flags;
2474 int ret = 0;
2475 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2476
2477 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2478 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2479 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2480 cmd->prot_length, true, false);
2481 if (ret < 0)
2482 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2483 }
2484
2485
2486
2487
2488
2489
2490 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2491 cmd->data_length) {
2492
2493 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2494 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2495 u32 bidi_length;
2496
2497 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2498 bidi_length = cmd->t_task_nolb *
2499 cmd->se_dev->dev_attrib.block_size;
2500 else
2501 bidi_length = cmd->data_length;
2502
2503 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2504 &cmd->t_bidi_data_nents,
2505 bidi_length, zero_flag, false);
2506 if (ret < 0)
2507 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2508 }
2509
2510 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2511 cmd->data_length, zero_flag, false);
2512 if (ret < 0)
2513 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2514 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2515 cmd->data_length) {
2516
2517
2518
2519
2520 u32 caw_length = cmd->t_task_nolb *
2521 cmd->se_dev->dev_attrib.block_size;
2522
2523 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2524 &cmd->t_bidi_data_nents,
2525 caw_length, zero_flag, false);
2526 if (ret < 0)
2527 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2528 }
2529
2530
2531
2532
2533
2534 target_add_to_state_list(cmd);
2535 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2536 target_execute_cmd(cmd);
2537 return 0;
2538 }
2539
2540 spin_lock_irqsave(&cmd->t_state_lock, flags);
2541 cmd->t_state = TRANSPORT_WRITE_PENDING;
2542
2543
2544
2545
2546 if (cmd->transport_state & CMD_T_STOP) {
2547 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2548 __func__, __LINE__, cmd->tag);
2549
2550 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2551
2552 complete_all(&cmd->t_transport_stop_comp);
2553 return 0;
2554 }
2555 cmd->transport_state &= ~CMD_T_ACTIVE;
2556 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2557
2558 ret = cmd->se_tfo->write_pending(cmd);
2559 if (ret)
2560 goto queue_full;
2561
2562 return 0;
2563
2564queue_full:
2565 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2566 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2567 return 0;
2568}
2569EXPORT_SYMBOL(transport_generic_new_cmd);
2570
2571static void transport_write_pending_qf(struct se_cmd *cmd)
2572{
2573 int ret;
2574
2575 ret = cmd->se_tfo->write_pending(cmd);
2576 if (ret) {
2577 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2578 cmd);
2579 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2580 }
2581}
2582
2583static bool
2584__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2585 unsigned long *flags);
2586
2587static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2588{
2589 unsigned long flags;
2590
2591 spin_lock_irqsave(&cmd->t_state_lock, flags);
2592 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2593 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2594}
2595
2596int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2597{
2598 int ret = 0;
2599 bool aborted = false, tas = false;
2600
2601 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2602 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2603 target_wait_free_cmd(cmd, &aborted, &tas);
2604
2605 if (!aborted || tas)
2606 ret = transport_put_cmd(cmd);
2607 } else {
2608 if (wait_for_tasks)
2609 target_wait_free_cmd(cmd, &aborted, &tas);
2610
2611
2612
2613
2614
2615 if (cmd->state_active)
2616 target_remove_from_state_list(cmd);
2617
2618 if (cmd->se_lun)
2619 transport_lun_remove_cmd(cmd);
2620
2621 if (!aborted || tas)
2622 ret = transport_put_cmd(cmd);
2623 }
2624
2625
2626
2627
2628
2629
2630 if (aborted) {
2631 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2632 wait_for_completion(&cmd->cmd_wait_comp);
2633 cmd->se_tfo->release_cmd(cmd);
2634 ret = 1;
2635 }
2636 return ret;
2637}
2638EXPORT_SYMBOL(transport_generic_free_cmd);
2639
2640
2641
2642
2643
2644int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2645{
2646 struct se_session *se_sess = se_cmd->se_sess;
2647 unsigned long flags;
2648 int ret = 0;
2649
2650
2651
2652
2653
2654
2655 if (ack_kref) {
2656 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2657 return -EINVAL;
2658
2659 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2660 }
2661
2662 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2663 if (se_sess->sess_tearing_down) {
2664 ret = -ESHUTDOWN;
2665 goto out;
2666 }
2667 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2668out:
2669 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2670
2671 if (ret && ack_kref)
2672 target_put_sess_cmd(se_cmd);
2673
2674 return ret;
2675}
2676EXPORT_SYMBOL(target_get_sess_cmd);
2677
2678static void target_free_cmd_mem(struct se_cmd *cmd)
2679{
2680 transport_free_pages(cmd);
2681
2682 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2683 core_tmr_release_req(cmd->se_tmr_req);
2684 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2685 kfree(cmd->t_task_cdb);
2686}
2687
2688static void target_release_cmd_kref(struct kref *kref)
2689{
2690 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2691 struct se_session *se_sess = se_cmd->se_sess;
2692 unsigned long flags;
2693 bool fabric_stop;
2694
2695 if (se_sess) {
2696 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2697
2698 spin_lock(&se_cmd->t_state_lock);
2699 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
2700 (se_cmd->transport_state & CMD_T_ABORTED);
2701 spin_unlock(&se_cmd->t_state_lock);
2702
2703 if (se_cmd->cmd_wait_set || fabric_stop) {
2704 list_del_init(&se_cmd->se_cmd_list);
2705 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2706 target_free_cmd_mem(se_cmd);
2707 complete(&se_cmd->cmd_wait_comp);
2708 return;
2709 }
2710 list_del_init(&se_cmd->se_cmd_list);
2711 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2712 }
2713
2714 target_free_cmd_mem(se_cmd);
2715 se_cmd->se_tfo->release_cmd(se_cmd);
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725int target_put_sess_cmd(struct se_cmd *se_cmd)
2726{
2727 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2728}
2729EXPORT_SYMBOL(target_put_sess_cmd);
2730
2731static const char *data_dir_name(enum dma_data_direction d)
2732{
2733 switch (d) {
2734 case DMA_BIDIRECTIONAL: return "BIDI";
2735 case DMA_TO_DEVICE: return "WRITE";
2736 case DMA_FROM_DEVICE: return "READ";
2737 case DMA_NONE: return "NONE";
2738 }
2739
2740 return "(?)";
2741}
2742
2743static const char *cmd_state_name(enum transport_state_table t)
2744{
2745 switch (t) {
2746 case TRANSPORT_NO_STATE: return "NO_STATE";
2747 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2748 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2749 case TRANSPORT_PROCESSING: return "PROCESSING";
2750 case TRANSPORT_COMPLETE: return "COMPLETE";
2751 case TRANSPORT_ISTATE_PROCESSING:
2752 return "ISTATE_PROCESSING";
2753 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2754 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2755 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2756 }
2757
2758 return "(?)";
2759}
2760
2761static void target_append_str(char **str, const char *txt)
2762{
2763 char *prev = *str;
2764
2765 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2766 kstrdup(txt, GFP_ATOMIC);
2767 kfree(prev);
2768}
2769
2770
2771
2772
2773
2774static char *target_ts_to_str(u32 ts)
2775{
2776 char *str = NULL;
2777
2778 if (ts & CMD_T_ABORTED)
2779 target_append_str(&str, "aborted");
2780 if (ts & CMD_T_ACTIVE)
2781 target_append_str(&str, "active");
2782 if (ts & CMD_T_COMPLETE)
2783 target_append_str(&str, "complete");
2784 if (ts & CMD_T_SENT)
2785 target_append_str(&str, "sent");
2786 if (ts & CMD_T_STOP)
2787 target_append_str(&str, "stop");
2788 if (ts & CMD_T_FABRIC_STOP)
2789 target_append_str(&str, "fabric_stop");
2790
2791 return str;
2792}
2793
2794static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2795{
2796 switch (tmf) {
2797 case TMR_ABORT_TASK: return "ABORT_TASK";
2798 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2799 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2800 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2801 case TMR_LUN_RESET: return "LUN_RESET";
2802 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2803 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2804 case TMR_UNKNOWN: break;
2805 }
2806 return "(?)";
2807}
2808
2809void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2810{
2811 char *ts_str = target_ts_to_str(cmd->transport_state);
2812 const u8 *cdb = cmd->t_task_cdb;
2813 struct se_tmr_req *tmf = cmd->se_tmr_req;
2814
2815 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2816 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2817 pfx, cdb[0], cdb[1], cmd->tag,
2818 data_dir_name(cmd->data_direction),
2819 cmd->se_tfo->get_cmd_state(cmd),
2820 cmd_state_name(cmd->t_state), cmd->data_length,
2821 kref_read(&cmd->cmd_kref), ts_str);
2822 } else {
2823 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2824 pfx, target_tmf_name(tmf->function), cmd->tag,
2825 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2826 cmd_state_name(cmd->t_state),
2827 kref_read(&cmd->cmd_kref), ts_str);
2828 }
2829 kfree(ts_str);
2830}
2831EXPORT_SYMBOL(target_show_cmd);
2832
2833
2834
2835
2836
2837
2838void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2839{
2840 struct se_cmd *se_cmd, *tmp_cmd;
2841 unsigned long flags;
2842 int rc;
2843
2844 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2845 if (se_sess->sess_tearing_down) {
2846 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2847 return;
2848 }
2849 se_sess->sess_tearing_down = 1;
2850 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2851
2852 list_for_each_entry_safe(se_cmd, tmp_cmd,
2853 &se_sess->sess_wait_list, se_cmd_list) {
2854 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2855 if (rc) {
2856 se_cmd->cmd_wait_set = 1;
2857 spin_lock(&se_cmd->t_state_lock);
2858 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2859 spin_unlock(&se_cmd->t_state_lock);
2860 } else
2861 list_del_init(&se_cmd->se_cmd_list);
2862 }
2863
2864 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2865}
2866EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2867
2868
2869
2870
2871void target_wait_for_sess_cmds(struct se_session *se_sess)
2872{
2873 struct se_cmd *se_cmd, *tmp_cmd;
2874 unsigned long flags;
2875 bool tas;
2876
2877 list_for_each_entry_safe(se_cmd, tmp_cmd,
2878 &se_sess->sess_wait_list, se_cmd_list) {
2879 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2880 " %d\n", se_cmd, se_cmd->t_state,
2881 se_cmd->se_tfo->get_cmd_state(se_cmd));
2882
2883 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2884 tas = (se_cmd->transport_state & CMD_T_TAS);
2885 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2886
2887 if (!target_put_sess_cmd(se_cmd)) {
2888 if (tas)
2889 target_put_sess_cmd(se_cmd);
2890 }
2891
2892 wait_for_completion(&se_cmd->cmd_wait_comp);
2893 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2894 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2895 se_cmd->se_tfo->get_cmd_state(se_cmd));
2896
2897 se_cmd->se_tfo->release_cmd(se_cmd);
2898 }
2899
2900 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2901 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2902 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2903
2904}
2905EXPORT_SYMBOL(target_wait_for_sess_cmds);
2906
2907static void target_lun_confirm(struct percpu_ref *ref)
2908{
2909 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2910
2911 complete(&lun->lun_ref_comp);
2912}
2913
2914void transport_clear_lun_ref(struct se_lun *lun)
2915{
2916
2917
2918
2919
2920
2921 percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2922
2923
2924
2925
2926
2927
2928
2929 wait_for_completion(&lun->lun_ref_comp);
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939 wait_for_completion(&lun->lun_shutdown_comp);
2940}
2941
2942static bool
2943__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2944 bool *aborted, bool *tas, unsigned long *flags)
2945 __releases(&cmd->t_state_lock)
2946 __acquires(&cmd->t_state_lock)
2947{
2948
2949 assert_spin_locked(&cmd->t_state_lock);
2950 WARN_ON_ONCE(!irqs_disabled());
2951
2952 if (fabric_stop)
2953 cmd->transport_state |= CMD_T_FABRIC_STOP;
2954
2955 if (cmd->transport_state & CMD_T_ABORTED)
2956 *aborted = true;
2957
2958 if (cmd->transport_state & CMD_T_TAS)
2959 *tas = true;
2960
2961 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2962 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2963 return false;
2964
2965 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2966 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2967 return false;
2968
2969 if (!(cmd->transport_state & CMD_T_ACTIVE))
2970 return false;
2971
2972 if (fabric_stop && *aborted)
2973 return false;
2974
2975 cmd->transport_state |= CMD_T_STOP;
2976
2977 target_show_cmd("wait_for_tasks: Stopping ", cmd);
2978
2979 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2980
2981 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
2982 180 * HZ))
2983 target_show_cmd("wait for tasks: ", cmd);
2984
2985 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2986 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2987
2988 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2989 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2990
2991 return true;
2992}
2993
2994
2995
2996
2997
2998bool transport_wait_for_tasks(struct se_cmd *cmd)
2999{
3000 unsigned long flags;
3001 bool ret, aborted = false, tas = false;
3002
3003 spin_lock_irqsave(&cmd->t_state_lock, flags);
3004 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3005 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3006
3007 return ret;
3008}
3009EXPORT_SYMBOL(transport_wait_for_tasks);
3010
3011struct sense_info {
3012 u8 key;
3013 u8 asc;
3014 u8 ascq;
3015 bool add_sector_info;
3016};
3017
3018static const struct sense_info sense_info_table[] = {
3019 [TCM_NO_SENSE] = {
3020 .key = NOT_READY
3021 },
3022 [TCM_NON_EXISTENT_LUN] = {
3023 .key = ILLEGAL_REQUEST,
3024 .asc = 0x25
3025 },
3026 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3027 .key = ILLEGAL_REQUEST,
3028 .asc = 0x20,
3029 },
3030 [TCM_SECTOR_COUNT_TOO_MANY] = {
3031 .key = ILLEGAL_REQUEST,
3032 .asc = 0x20,
3033 },
3034 [TCM_UNKNOWN_MODE_PAGE] = {
3035 .key = ILLEGAL_REQUEST,
3036 .asc = 0x24,
3037 },
3038 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3039 .key = ABORTED_COMMAND,
3040 .asc = 0x29,
3041 .ascq = 0x03,
3042 },
3043 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3044 .key = ABORTED_COMMAND,
3045 .asc = 0x0c,
3046 .ascq = 0x0d,
3047 },
3048 [TCM_INVALID_CDB_FIELD] = {
3049 .key = ILLEGAL_REQUEST,
3050 .asc = 0x24,
3051 },
3052 [TCM_INVALID_PARAMETER_LIST] = {
3053 .key = ILLEGAL_REQUEST,
3054 .asc = 0x26,
3055 },
3056 [TCM_TOO_MANY_TARGET_DESCS] = {
3057 .key = ILLEGAL_REQUEST,
3058 .asc = 0x26,
3059 .ascq = 0x06,
3060 },
3061 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3062 .key = ILLEGAL_REQUEST,
3063 .asc = 0x26,
3064 .ascq = 0x07,
3065 },
3066 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3067 .key = ILLEGAL_REQUEST,
3068 .asc = 0x26,
3069 .ascq = 0x08,
3070 },
3071 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3072 .key = ILLEGAL_REQUEST,
3073 .asc = 0x26,
3074 .ascq = 0x09,
3075 },
3076 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3077 .key = ILLEGAL_REQUEST,
3078 .asc = 0x1a,
3079 },
3080 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3081 .key = ILLEGAL_REQUEST,
3082 .asc = 0x0c,
3083 .ascq = 0x0c,
3084 },
3085 [TCM_SERVICE_CRC_ERROR] = {
3086 .key = ABORTED_COMMAND,
3087 .asc = 0x47,
3088 .ascq = 0x05,
3089 },
3090 [TCM_SNACK_REJECTED] = {
3091 .key = ABORTED_COMMAND,
3092 .asc = 0x11,
3093 .ascq = 0x13,
3094 },
3095 [TCM_WRITE_PROTECTED] = {
3096 .key = DATA_PROTECT,
3097 .asc = 0x27,
3098 },
3099 [TCM_ADDRESS_OUT_OF_RANGE] = {
3100 .key = ILLEGAL_REQUEST,
3101 .asc = 0x21,
3102 },
3103 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3104 .key = UNIT_ATTENTION,
3105 },
3106 [TCM_CHECK_CONDITION_NOT_READY] = {
3107 .key = NOT_READY,
3108 },
3109 [TCM_MISCOMPARE_VERIFY] = {
3110 .key = MISCOMPARE,
3111 .asc = 0x1d,
3112 .ascq = 0x00,
3113 },
3114 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3115 .key = ABORTED_COMMAND,
3116 .asc = 0x10,
3117 .ascq = 0x01,
3118 .add_sector_info = true,
3119 },
3120 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3121 .key = ABORTED_COMMAND,
3122 .asc = 0x10,
3123 .ascq = 0x02,
3124 .add_sector_info = true,
3125 },
3126 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3127 .key = ABORTED_COMMAND,
3128 .asc = 0x10,
3129 .ascq = 0x03,
3130 .add_sector_info = true,
3131 },
3132 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3133 .key = COPY_ABORTED,
3134 .asc = 0x0d,
3135 .ascq = 0x02,
3136
3137 },
3138 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3139
3140
3141
3142
3143
3144
3145 .key = NOT_READY,
3146 .asc = 0x08,
3147 },
3148};
3149
3150static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3151{
3152 const struct sense_info *si;
3153 u8 *buffer = cmd->sense_buffer;
3154 int r = (__force int)reason;
3155 u8 asc, ascq;
3156 bool desc_format = target_sense_desc_format(cmd->se_dev);
3157
3158 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3159 si = &sense_info_table[r];
3160 else
3161 si = &sense_info_table[(__force int)
3162 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3163
3164 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3165 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3166 WARN_ON_ONCE(asc == 0);
3167 } else if (si->asc == 0) {
3168 WARN_ON_ONCE(cmd->scsi_asc == 0);
3169 asc = cmd->scsi_asc;
3170 ascq = cmd->scsi_ascq;
3171 } else {
3172 asc = si->asc;
3173 ascq = si->ascq;
3174 }
3175
3176 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
3177 if (si->add_sector_info)
3178 return scsi_set_sense_information(buffer,
3179 cmd->scsi_sense_length,
3180 cmd->bad_sector);
3181
3182 return 0;
3183}
3184
3185int
3186transport_send_check_condition_and_sense(struct se_cmd *cmd,
3187 sense_reason_t reason, int from_transport)
3188{
3189 unsigned long flags;
3190
3191 spin_lock_irqsave(&cmd->t_state_lock, flags);
3192 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3193 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3194 return 0;
3195 }
3196 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3197 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3198
3199 if (!from_transport) {
3200 int rc;
3201
3202 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3203 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3204 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3205 rc = translate_sense_reason(cmd, reason);
3206 if (rc)
3207 return rc;
3208 }
3209
3210 trace_target_cmd_complete(cmd);
3211 return cmd->se_tfo->queue_status(cmd);
3212}
3213EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3214
3215static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3216 __releases(&cmd->t_state_lock)
3217 __acquires(&cmd->t_state_lock)
3218{
3219 int ret;
3220
3221 assert_spin_locked(&cmd->t_state_lock);
3222 WARN_ON_ONCE(!irqs_disabled());
3223
3224 if (!(cmd->transport_state & CMD_T_ABORTED))
3225 return 0;
3226
3227
3228
3229
3230 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3231 if (send_status)
3232 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3233 return 1;
3234 }
3235
3236 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3237 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3238
3239 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3240 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3241 trace_target_cmd_complete(cmd);
3242
3243 spin_unlock_irq(&cmd->t_state_lock);
3244 ret = cmd->se_tfo->queue_status(cmd);
3245 if (ret)
3246 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3247 spin_lock_irq(&cmd->t_state_lock);
3248
3249 return 1;
3250}
3251
3252int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3253{
3254 int ret;
3255
3256 spin_lock_irq(&cmd->t_state_lock);
3257 ret = __transport_check_aborted_status(cmd, send_status);
3258 spin_unlock_irq(&cmd->t_state_lock);
3259
3260 return ret;
3261}
3262EXPORT_SYMBOL(transport_check_aborted_status);
3263
3264void transport_send_task_abort(struct se_cmd *cmd)
3265{
3266 unsigned long flags;
3267 int ret;
3268
3269 spin_lock_irqsave(&cmd->t_state_lock, flags);
3270 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3271 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3272 return;
3273 }
3274 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3275
3276
3277
3278
3279
3280
3281
3282 if (cmd->data_direction == DMA_TO_DEVICE) {
3283 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3284 spin_lock_irqsave(&cmd->t_state_lock, flags);
3285 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3286 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3287 goto send_abort;
3288 }
3289 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3290 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3291 return;
3292 }
3293 }
3294send_abort:
3295 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3296
3297 transport_lun_remove_cmd(cmd);
3298
3299 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3300 cmd->t_task_cdb[0], cmd->tag);
3301
3302 trace_target_cmd_complete(cmd);
3303 ret = cmd->se_tfo->queue_status(cmd);
3304 if (ret)
3305 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3306}
3307
3308static void target_tmr_work(struct work_struct *work)
3309{
3310 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3311 struct se_device *dev = cmd->se_dev;
3312 struct se_tmr_req *tmr = cmd->se_tmr_req;
3313 unsigned long flags;
3314 int ret;
3315
3316 spin_lock_irqsave(&cmd->t_state_lock, flags);
3317 if (cmd->transport_state & CMD_T_ABORTED) {
3318 tmr->response = TMR_FUNCTION_REJECTED;
3319 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3320 goto check_stop;
3321 }
3322 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3323
3324 switch (tmr->function) {
3325 case TMR_ABORT_TASK:
3326 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3327 break;
3328 case TMR_ABORT_TASK_SET:
3329 case TMR_CLEAR_ACA:
3330 case TMR_CLEAR_TASK_SET:
3331 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3332 break;
3333 case TMR_LUN_RESET:
3334 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3335 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3336 TMR_FUNCTION_REJECTED;
3337 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3338 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3339 cmd->orig_fe_lun, 0x29,
3340 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3341 }
3342 break;
3343 case TMR_TARGET_WARM_RESET:
3344 tmr->response = TMR_FUNCTION_REJECTED;
3345 break;
3346 case TMR_TARGET_COLD_RESET:
3347 tmr->response = TMR_FUNCTION_REJECTED;
3348 break;
3349 default:
3350 pr_err("Uknown TMR function: 0x%02x.\n",
3351 tmr->function);
3352 tmr->response = TMR_FUNCTION_REJECTED;
3353 break;
3354 }
3355
3356 spin_lock_irqsave(&cmd->t_state_lock, flags);
3357 if (cmd->transport_state & CMD_T_ABORTED) {
3358 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3359 goto check_stop;
3360 }
3361 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3362
3363 cmd->se_tfo->queue_tm_rsp(cmd);
3364
3365check_stop:
3366 transport_lun_remove_cmd(cmd);
3367 transport_cmd_check_stop_to_fabric(cmd);
3368}
3369
3370int transport_generic_handle_tmr(
3371 struct se_cmd *cmd)
3372{
3373 unsigned long flags;
3374 bool aborted = false;
3375
3376 spin_lock_irqsave(&cmd->t_state_lock, flags);
3377 if (cmd->transport_state & CMD_T_ABORTED) {
3378 aborted = true;
3379 } else {
3380 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3381 cmd->transport_state |= CMD_T_ACTIVE;
3382 }
3383 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3384
3385 if (aborted) {
3386 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3387 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3388 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3389 transport_lun_remove_cmd(cmd);
3390 transport_cmd_check_stop_to_fabric(cmd);
3391 return 0;
3392 }
3393
3394 INIT_WORK(&cmd->work, target_tmr_work);
3395 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3396 return 0;
3397}
3398EXPORT_SYMBOL(transport_generic_handle_tmr);
3399
3400bool
3401target_check_wce(struct se_device *dev)
3402{
3403 bool wce = false;
3404
3405 if (dev->transport->get_write_cache)
3406 wce = dev->transport->get_write_cache(dev);
3407 else if (dev->dev_attrib.emulate_write_cache > 0)
3408 wce = true;
3409
3410 return wce;
3411}
3412
3413bool
3414target_check_fua(struct se_device *dev)
3415{
3416 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3417}
3418