1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
35#include <linux/module.h>
36#include <linux/ratelimit.h>
37#include <linux/vmalloc.h>
38#include <asm/unaligned.h>
39#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi_proto.h>
42#include <scsi/scsi_common.h>
43
44#include <target/target_core_base.h>
45#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
47
48#include "target_core_internal.h"
49#include "target_core_alua.h"
50#include "target_core_pr.h"
51#include "target_core_ua.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
56static struct workqueue_struct *target_completion_wq;
57static struct kmem_cache *se_sess_cache;
58struct kmem_cache *se_ua_cache;
59struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
63struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66static void transport_complete_task_attr(struct se_cmd *cmd);
67static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
68static void transport_handle_queue_full(struct se_cmd *cmd,
69 struct se_device *dev, int err, bool write_pending);
70static void target_complete_ok_work(struct work_struct *work);
71
72int init_se_kmem_caches(void)
73{
74 se_sess_cache = kmem_cache_create("se_sess_cache",
75 sizeof(struct se_session), __alignof__(struct se_session),
76 0, NULL);
77 if (!se_sess_cache) {
78 pr_err("kmem_cache_create() for struct se_session"
79 " failed\n");
80 goto out;
81 }
82 se_ua_cache = kmem_cache_create("se_ua_cache",
83 sizeof(struct se_ua), __alignof__(struct se_ua),
84 0, NULL);
85 if (!se_ua_cache) {
86 pr_err("kmem_cache_create() for struct se_ua failed\n");
87 goto out_free_sess_cache;
88 }
89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90 sizeof(struct t10_pr_registration),
91 __alignof__(struct t10_pr_registration), 0, NULL);
92 if (!t10_pr_reg_cache) {
93 pr_err("kmem_cache_create() for struct t10_pr_registration"
94 " failed\n");
95 goto out_free_ua_cache;
96 }
97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99 0, NULL);
100 if (!t10_alua_lu_gp_cache) {
101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102 " failed\n");
103 goto out_free_pr_reg_cache;
104 }
105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106 sizeof(struct t10_alua_lu_gp_member),
107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108 if (!t10_alua_lu_gp_mem_cache) {
109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110 "cache failed\n");
111 goto out_free_lu_gp_cache;
112 }
113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114 sizeof(struct t10_alua_tg_pt_gp),
115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116 if (!t10_alua_tg_pt_gp_cache) {
117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118 "cache failed\n");
119 goto out_free_lu_gp_mem_cache;
120 }
121 t10_alua_lba_map_cache = kmem_cache_create(
122 "t10_alua_lba_map_cache",
123 sizeof(struct t10_alua_lba_map),
124 __alignof__(struct t10_alua_lba_map), 0, NULL);
125 if (!t10_alua_lba_map_cache) {
126 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127 "cache failed\n");
128 goto out_free_tg_pt_gp_cache;
129 }
130 t10_alua_lba_map_mem_cache = kmem_cache_create(
131 "t10_alua_lba_map_mem_cache",
132 sizeof(struct t10_alua_lba_map_member),
133 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134 if (!t10_alua_lba_map_mem_cache) {
135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136 "cache failed\n");
137 goto out_free_lba_map_cache;
138 }
139
140 target_completion_wq = alloc_workqueue("target_completion",
141 WQ_MEM_RECLAIM, 0);
142 if (!target_completion_wq)
143 goto out_free_lba_map_mem_cache;
144
145 return 0;
146
147out_free_lba_map_mem_cache:
148 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149out_free_lba_map_cache:
150 kmem_cache_destroy(t10_alua_lba_map_cache);
151out_free_tg_pt_gp_cache:
152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153out_free_lu_gp_mem_cache:
154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155out_free_lu_gp_cache:
156 kmem_cache_destroy(t10_alua_lu_gp_cache);
157out_free_pr_reg_cache:
158 kmem_cache_destroy(t10_pr_reg_cache);
159out_free_ua_cache:
160 kmem_cache_destroy(se_ua_cache);
161out_free_sess_cache:
162 kmem_cache_destroy(se_sess_cache);
163out:
164 return -ENOMEM;
165}
166
167void release_se_kmem_caches(void)
168{
169 destroy_workqueue(target_completion_wq);
170 kmem_cache_destroy(se_sess_cache);
171 kmem_cache_destroy(se_ua_cache);
172 kmem_cache_destroy(t10_pr_reg_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_cache);
174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176 kmem_cache_destroy(t10_alua_lba_map_cache);
177 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178}
179
180
181static DEFINE_SPINLOCK(scsi_mib_index_lock);
182static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184
185
186
187u32 scsi_get_new_index(scsi_index_t type)
188{
189 u32 new_index;
190
191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193 spin_lock(&scsi_mib_index_lock);
194 new_index = ++scsi_mib_index[type];
195 spin_unlock(&scsi_mib_index_lock);
196
197 return new_index;
198}
199
200void transport_subsystem_check_init(void)
201{
202 int ret;
203 static int sub_api_initialized;
204
205 if (sub_api_initialized)
206 return;
207
208 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
209 if (ret != 0)
210 pr_err("Unable to load target_core_iblock\n");
211
212 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
213 if (ret != 0)
214 pr_err("Unable to load target_core_file\n");
215
216 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
217 if (ret != 0)
218 pr_err("Unable to load target_core_pscsi\n");
219
220 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
221 if (ret != 0)
222 pr_err("Unable to load target_core_user\n");
223
224 sub_api_initialized = 1;
225}
226
227static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
228{
229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
230
231 wake_up(&sess->cmd_list_wq);
232}
233
234
235
236
237
238
239
240int transport_init_session(struct se_session *se_sess)
241{
242 INIT_LIST_HEAD(&se_sess->sess_list);
243 INIT_LIST_HEAD(&se_sess->sess_acl_list);
244 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
245 spin_lock_init(&se_sess->sess_cmd_lock);
246 init_waitqueue_head(&se_sess->cmd_list_wq);
247 return percpu_ref_init(&se_sess->cmd_count,
248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
249}
250EXPORT_SYMBOL(transport_init_session);
251
252void transport_uninit_session(struct se_session *se_sess)
253{
254 percpu_ref_exit(&se_sess->cmd_count);
255}
256
257
258
259
260
261struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
262{
263 struct se_session *se_sess;
264 int ret;
265
266 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
267 if (!se_sess) {
268 pr_err("Unable to allocate struct se_session from"
269 " se_sess_cache\n");
270 return ERR_PTR(-ENOMEM);
271 }
272 ret = transport_init_session(se_sess);
273 if (ret < 0) {
274 kmem_cache_free(se_sess_cache, se_sess);
275 return ERR_PTR(ret);
276 }
277 se_sess->sup_prot_ops = sup_prot_ops;
278
279 return se_sess;
280}
281EXPORT_SYMBOL(transport_alloc_session);
282
283
284
285
286
287
288
289
290int transport_alloc_session_tags(struct se_session *se_sess,
291 unsigned int tag_num, unsigned int tag_size)
292{
293 int rc;
294
295 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
296 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
297 if (!se_sess->sess_cmd_map) {
298 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
299 return -ENOMEM;
300 }
301
302 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
303 false, GFP_KERNEL, NUMA_NO_NODE);
304 if (rc < 0) {
305 pr_err("Unable to init se_sess->sess_tag_pool,"
306 " tag_num: %u\n", tag_num);
307 kvfree(se_sess->sess_cmd_map);
308 se_sess->sess_cmd_map = NULL;
309 return -ENOMEM;
310 }
311
312 return 0;
313}
314EXPORT_SYMBOL(transport_alloc_session_tags);
315
316
317
318
319
320
321
322
323static struct se_session *
324transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
325 enum target_prot_op sup_prot_ops)
326{
327 struct se_session *se_sess;
328 int rc;
329
330 if (tag_num != 0 && !tag_size) {
331 pr_err("init_session_tags called with percpu-ida tag_num:"
332 " %u, but zero tag_size\n", tag_num);
333 return ERR_PTR(-EINVAL);
334 }
335 if (!tag_num && tag_size) {
336 pr_err("init_session_tags called with percpu-ida tag_size:"
337 " %u, but zero tag_num\n", tag_size);
338 return ERR_PTR(-EINVAL);
339 }
340
341 se_sess = transport_alloc_session(sup_prot_ops);
342 if (IS_ERR(se_sess))
343 return se_sess;
344
345 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
346 if (rc < 0) {
347 transport_free_session(se_sess);
348 return ERR_PTR(-ENOMEM);
349 }
350
351 return se_sess;
352}
353
354
355
356
357void __transport_register_session(
358 struct se_portal_group *se_tpg,
359 struct se_node_acl *se_nacl,
360 struct se_session *se_sess,
361 void *fabric_sess_ptr)
362{
363 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
364 unsigned char buf[PR_REG_ISID_LEN];
365 unsigned long flags;
366
367 se_sess->se_tpg = se_tpg;
368 se_sess->fabric_sess_ptr = fabric_sess_ptr;
369
370
371
372
373
374
375 if (se_nacl) {
376
377
378
379
380
381
382
383
384
385
386 if (se_nacl->saved_prot_type)
387 se_sess->sess_prot_type = se_nacl->saved_prot_type;
388 else if (tfo->tpg_check_prot_fabric_only)
389 se_sess->sess_prot_type = se_nacl->saved_prot_type =
390 tfo->tpg_check_prot_fabric_only(se_tpg);
391
392
393
394
395 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
396 memset(&buf[0], 0, PR_REG_ISID_LEN);
397 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
398 &buf[0], PR_REG_ISID_LEN);
399 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
400 }
401
402 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
403
404
405
406
407 se_nacl->nacl_sess = se_sess;
408
409 list_add_tail(&se_sess->sess_acl_list,
410 &se_nacl->acl_sess_list);
411 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
412 }
413 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
414
415 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
416 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
417}
418EXPORT_SYMBOL(__transport_register_session);
419
420void transport_register_session(
421 struct se_portal_group *se_tpg,
422 struct se_node_acl *se_nacl,
423 struct se_session *se_sess,
424 void *fabric_sess_ptr)
425{
426 unsigned long flags;
427
428 spin_lock_irqsave(&se_tpg->session_lock, flags);
429 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
430 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
431}
432EXPORT_SYMBOL(transport_register_session);
433
434struct se_session *
435target_setup_session(struct se_portal_group *tpg,
436 unsigned int tag_num, unsigned int tag_size,
437 enum target_prot_op prot_op,
438 const char *initiatorname, void *private,
439 int (*callback)(struct se_portal_group *,
440 struct se_session *, void *))
441{
442 struct se_session *sess;
443
444
445
446
447
448 if (tag_num != 0)
449 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
450 else
451 sess = transport_alloc_session(prot_op);
452
453 if (IS_ERR(sess))
454 return sess;
455
456 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
457 (unsigned char *)initiatorname);
458 if (!sess->se_node_acl) {
459 transport_free_session(sess);
460 return ERR_PTR(-EACCES);
461 }
462
463
464
465
466 if (callback != NULL) {
467 int rc = callback(tpg, sess, private);
468 if (rc) {
469 transport_free_session(sess);
470 return ERR_PTR(rc);
471 }
472 }
473
474 transport_register_session(tpg, sess->se_node_acl, sess, private);
475 return sess;
476}
477EXPORT_SYMBOL(target_setup_session);
478
479ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
480{
481 struct se_session *se_sess;
482 ssize_t len = 0;
483
484 spin_lock_bh(&se_tpg->session_lock);
485 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
486 if (!se_sess->se_node_acl)
487 continue;
488 if (!se_sess->se_node_acl->dynamic_node_acl)
489 continue;
490 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
491 break;
492
493 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
494 se_sess->se_node_acl->initiatorname);
495 len += 1;
496 }
497 spin_unlock_bh(&se_tpg->session_lock);
498
499 return len;
500}
501EXPORT_SYMBOL(target_show_dynamic_sessions);
502
503static void target_complete_nacl(struct kref *kref)
504{
505 struct se_node_acl *nacl = container_of(kref,
506 struct se_node_acl, acl_kref);
507 struct se_portal_group *se_tpg = nacl->se_tpg;
508
509 if (!nacl->dynamic_stop) {
510 complete(&nacl->acl_free_comp);
511 return;
512 }
513
514 mutex_lock(&se_tpg->acl_node_mutex);
515 list_del_init(&nacl->acl_list);
516 mutex_unlock(&se_tpg->acl_node_mutex);
517
518 core_tpg_wait_for_nacl_pr_ref(nacl);
519 core_free_device_list_for_node(nacl, se_tpg);
520 kfree(nacl);
521}
522
523void target_put_nacl(struct se_node_acl *nacl)
524{
525 kref_put(&nacl->acl_kref, target_complete_nacl);
526}
527EXPORT_SYMBOL(target_put_nacl);
528
529void transport_deregister_session_configfs(struct se_session *se_sess)
530{
531 struct se_node_acl *se_nacl;
532 unsigned long flags;
533
534
535
536 se_nacl = se_sess->se_node_acl;
537 if (se_nacl) {
538 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
539 if (!list_empty(&se_sess->sess_acl_list))
540 list_del_init(&se_sess->sess_acl_list);
541
542
543
544
545
546 if (list_empty(&se_nacl->acl_sess_list))
547 se_nacl->nacl_sess = NULL;
548 else {
549 se_nacl->nacl_sess = container_of(
550 se_nacl->acl_sess_list.prev,
551 struct se_session, sess_acl_list);
552 }
553 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
554 }
555}
556EXPORT_SYMBOL(transport_deregister_session_configfs);
557
558void transport_free_session(struct se_session *se_sess)
559{
560 struct se_node_acl *se_nacl = se_sess->se_node_acl;
561
562
563
564
565
566 if (se_nacl) {
567 struct se_portal_group *se_tpg = se_nacl->se_tpg;
568 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
569 unsigned long flags;
570
571 se_sess->se_node_acl = NULL;
572
573
574
575
576
577
578 mutex_lock(&se_tpg->acl_node_mutex);
579 if (se_nacl->dynamic_node_acl &&
580 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
581 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
582 if (list_empty(&se_nacl->acl_sess_list))
583 se_nacl->dynamic_stop = true;
584 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
585
586 if (se_nacl->dynamic_stop)
587 list_del_init(&se_nacl->acl_list);
588 }
589 mutex_unlock(&se_tpg->acl_node_mutex);
590
591 if (se_nacl->dynamic_stop)
592 target_put_nacl(se_nacl);
593
594 target_put_nacl(se_nacl);
595 }
596 if (se_sess->sess_cmd_map) {
597 sbitmap_queue_free(&se_sess->sess_tag_pool);
598 kvfree(se_sess->sess_cmd_map);
599 }
600 transport_uninit_session(se_sess);
601 kmem_cache_free(se_sess_cache, se_sess);
602}
603EXPORT_SYMBOL(transport_free_session);
604
605static int target_release_res(struct se_device *dev, void *data)
606{
607 struct se_session *sess = data;
608
609 if (dev->reservation_holder == sess)
610 target_release_reservation(dev);
611 return 0;
612}
613
614void transport_deregister_session(struct se_session *se_sess)
615{
616 struct se_portal_group *se_tpg = se_sess->se_tpg;
617 unsigned long flags;
618
619 if (!se_tpg) {
620 transport_free_session(se_sess);
621 return;
622 }
623
624 spin_lock_irqsave(&se_tpg->session_lock, flags);
625 list_del(&se_sess->sess_list);
626 se_sess->se_tpg = NULL;
627 se_sess->fabric_sess_ptr = NULL;
628 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
629
630
631
632
633
634 target_for_each_device(target_release_res, se_sess);
635
636 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
637 se_tpg->se_tpg_tfo->fabric_name);
638
639
640
641
642
643
644
645
646
647 transport_free_session(se_sess);
648}
649EXPORT_SYMBOL(transport_deregister_session);
650
651void target_remove_session(struct se_session *se_sess)
652{
653 transport_deregister_session_configfs(se_sess);
654 transport_deregister_session(se_sess);
655}
656EXPORT_SYMBOL(target_remove_session);
657
658static void target_remove_from_state_list(struct se_cmd *cmd)
659{
660 struct se_device *dev = cmd->se_dev;
661 unsigned long flags;
662
663 if (!dev)
664 return;
665
666 spin_lock_irqsave(&dev->execute_task_lock, flags);
667 if (cmd->state_active) {
668 list_del(&cmd->state_list);
669 cmd->state_active = false;
670 }
671 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
672}
673
674
675
676
677
678
679
680
681static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
682{
683 unsigned long flags;
684
685 target_remove_from_state_list(cmd);
686
687
688
689
690 cmd->se_lun = NULL;
691
692 spin_lock_irqsave(&cmd->t_state_lock, flags);
693
694
695
696
697 if (cmd->transport_state & CMD_T_STOP) {
698 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
699 __func__, __LINE__, cmd->tag);
700
701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
702
703 complete_all(&cmd->t_transport_stop_comp);
704 return 1;
705 }
706 cmd->transport_state &= ~CMD_T_ACTIVE;
707 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
708
709
710
711
712
713
714
715
716 return cmd->se_tfo->check_stop_free(cmd);
717}
718
719static void transport_lun_remove_cmd(struct se_cmd *cmd)
720{
721 struct se_lun *lun = cmd->se_lun;
722
723 if (!lun)
724 return;
725
726 if (cmpxchg(&cmd->lun_ref_active, true, false))
727 percpu_ref_put(&lun->lun_ref);
728}
729
730static void target_complete_failure_work(struct work_struct *work)
731{
732 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
733
734 transport_generic_request_failure(cmd,
735 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
736}
737
738
739
740
741
742static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
743{
744 struct se_device *dev = cmd->se_dev;
745
746 WARN_ON(!cmd->se_lun);
747
748 if (!dev)
749 return NULL;
750
751 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
752 return NULL;
753
754 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
755
756 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
757 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
758 return cmd->sense_buffer;
759}
760
761void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
762{
763 unsigned char *cmd_sense_buf;
764 unsigned long flags;
765
766 spin_lock_irqsave(&cmd->t_state_lock, flags);
767 cmd_sense_buf = transport_get_sense_buffer(cmd);
768 if (!cmd_sense_buf) {
769 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770 return;
771 }
772
773 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
774 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
775 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
776}
777EXPORT_SYMBOL(transport_copy_sense_to_cmd);
778
779static void target_handle_abort(struct se_cmd *cmd)
780{
781 bool tas = cmd->transport_state & CMD_T_TAS;
782 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
783 int ret;
784
785 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
786
787 if (tas) {
788 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
789 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
790 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
791 cmd->t_task_cdb[0], cmd->tag);
792 trace_target_cmd_complete(cmd);
793 ret = cmd->se_tfo->queue_status(cmd);
794 if (ret) {
795 transport_handle_queue_full(cmd, cmd->se_dev,
796 ret, false);
797 return;
798 }
799 } else {
800 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
801 cmd->se_tfo->queue_tm_rsp(cmd);
802 }
803 } else {
804
805
806
807
808 cmd->se_tfo->aborted_task(cmd);
809 if (ack_kref)
810 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
811
812
813
814
815
816 }
817
818 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
819
820 transport_lun_remove_cmd(cmd);
821
822 transport_cmd_check_stop_to_fabric(cmd);
823}
824
825static void target_abort_work(struct work_struct *work)
826{
827 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
828
829 target_handle_abort(cmd);
830}
831
832static bool target_cmd_interrupted(struct se_cmd *cmd)
833{
834 int post_ret;
835
836 if (cmd->transport_state & CMD_T_ABORTED) {
837 if (cmd->transport_complete_callback)
838 cmd->transport_complete_callback(cmd, false, &post_ret);
839 INIT_WORK(&cmd->work, target_abort_work);
840 queue_work(target_completion_wq, &cmd->work);
841 return true;
842 } else if (cmd->transport_state & CMD_T_STOP) {
843 if (cmd->transport_complete_callback)
844 cmd->transport_complete_callback(cmd, false, &post_ret);
845 complete_all(&cmd->t_transport_stop_comp);
846 return true;
847 }
848
849 return false;
850}
851
852
853void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
854{
855 int success;
856 unsigned long flags;
857
858 if (target_cmd_interrupted(cmd))
859 return;
860
861 cmd->scsi_status = scsi_status;
862
863 spin_lock_irqsave(&cmd->t_state_lock, flags);
864 switch (cmd->scsi_status) {
865 case SAM_STAT_CHECK_CONDITION:
866 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
867 success = 1;
868 else
869 success = 0;
870 break;
871 default:
872 success = 1;
873 break;
874 }
875
876 cmd->t_state = TRANSPORT_COMPLETE;
877 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
878 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
879
880 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
881 target_complete_failure_work);
882 if (cmd->se_cmd_flags & SCF_USE_CPUID)
883 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
884 else
885 queue_work(target_completion_wq, &cmd->work);
886}
887EXPORT_SYMBOL(target_complete_cmd);
888
889void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
890{
891 if ((scsi_status == SAM_STAT_GOOD ||
892 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
893 length < cmd->data_length) {
894 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
895 cmd->residual_count += cmd->data_length - length;
896 } else {
897 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
898 cmd->residual_count = cmd->data_length - length;
899 }
900
901 cmd->data_length = length;
902 }
903
904 target_complete_cmd(cmd, scsi_status);
905}
906EXPORT_SYMBOL(target_complete_cmd_with_length);
907
908static void target_add_to_state_list(struct se_cmd *cmd)
909{
910 struct se_device *dev = cmd->se_dev;
911 unsigned long flags;
912
913 spin_lock_irqsave(&dev->execute_task_lock, flags);
914 if (!cmd->state_active) {
915 list_add_tail(&cmd->state_list, &dev->state_list);
916 cmd->state_active = true;
917 }
918 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
919}
920
921
922
923
924static void transport_write_pending_qf(struct se_cmd *cmd);
925static void transport_complete_qf(struct se_cmd *cmd);
926
927void target_qf_do_work(struct work_struct *work)
928{
929 struct se_device *dev = container_of(work, struct se_device,
930 qf_work_queue);
931 LIST_HEAD(qf_cmd_list);
932 struct se_cmd *cmd, *cmd_tmp;
933
934 spin_lock_irq(&dev->qf_cmd_lock);
935 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
936 spin_unlock_irq(&dev->qf_cmd_lock);
937
938 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
939 list_del(&cmd->se_qf_node);
940 atomic_dec_mb(&dev->dev_qf_count);
941
942 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
943 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
944 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
945 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
946 : "UNKNOWN");
947
948 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
949 transport_write_pending_qf(cmd);
950 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
951 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
952 transport_complete_qf(cmd);
953 }
954}
955
956unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
957{
958 switch (cmd->data_direction) {
959 case DMA_NONE:
960 return "NONE";
961 case DMA_FROM_DEVICE:
962 return "READ";
963 case DMA_TO_DEVICE:
964 return "WRITE";
965 case DMA_BIDIRECTIONAL:
966 return "BIDI";
967 default:
968 break;
969 }
970
971 return "UNKNOWN";
972}
973
974void transport_dump_dev_state(
975 struct se_device *dev,
976 char *b,
977 int *bl)
978{
979 *bl += sprintf(b + *bl, "Status: ");
980 if (dev->export_count)
981 *bl += sprintf(b + *bl, "ACTIVATED");
982 else
983 *bl += sprintf(b + *bl, "DEACTIVATED");
984
985 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
986 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
987 dev->dev_attrib.block_size,
988 dev->dev_attrib.hw_max_sectors);
989 *bl += sprintf(b + *bl, " ");
990}
991
992void transport_dump_vpd_proto_id(
993 struct t10_vpd *vpd,
994 unsigned char *p_buf,
995 int p_buf_len)
996{
997 unsigned char buf[VPD_TMP_BUF_SIZE];
998 int len;
999
1000 memset(buf, 0, VPD_TMP_BUF_SIZE);
1001 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1002
1003 switch (vpd->protocol_identifier) {
1004 case 0x00:
1005 sprintf(buf+len, "Fibre Channel\n");
1006 break;
1007 case 0x10:
1008 sprintf(buf+len, "Parallel SCSI\n");
1009 break;
1010 case 0x20:
1011 sprintf(buf+len, "SSA\n");
1012 break;
1013 case 0x30:
1014 sprintf(buf+len, "IEEE 1394\n");
1015 break;
1016 case 0x40:
1017 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1018 " Protocol\n");
1019 break;
1020 case 0x50:
1021 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1022 break;
1023 case 0x60:
1024 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1025 break;
1026 case 0x70:
1027 sprintf(buf+len, "Automation/Drive Interface Transport"
1028 " Protocol\n");
1029 break;
1030 case 0x80:
1031 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1032 break;
1033 default:
1034 sprintf(buf+len, "Unknown 0x%02x\n",
1035 vpd->protocol_identifier);
1036 break;
1037 }
1038
1039 if (p_buf)
1040 strncpy(p_buf, buf, p_buf_len);
1041 else
1042 pr_debug("%s", buf);
1043}
1044
1045void
1046transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1047{
1048
1049
1050
1051
1052
1053 if (page_83[1] & 0x80) {
1054 vpd->protocol_identifier = (page_83[0] & 0xf0);
1055 vpd->protocol_identifier_set = 1;
1056 transport_dump_vpd_proto_id(vpd, NULL, 0);
1057 }
1058}
1059EXPORT_SYMBOL(transport_set_vpd_proto_id);
1060
1061int transport_dump_vpd_assoc(
1062 struct t10_vpd *vpd,
1063 unsigned char *p_buf,
1064 int p_buf_len)
1065{
1066 unsigned char buf[VPD_TMP_BUF_SIZE];
1067 int ret = 0;
1068 int len;
1069
1070 memset(buf, 0, VPD_TMP_BUF_SIZE);
1071 len = sprintf(buf, "T10 VPD Identifier Association: ");
1072
1073 switch (vpd->association) {
1074 case 0x00:
1075 sprintf(buf+len, "addressed logical unit\n");
1076 break;
1077 case 0x10:
1078 sprintf(buf+len, "target port\n");
1079 break;
1080 case 0x20:
1081 sprintf(buf+len, "SCSI target device\n");
1082 break;
1083 default:
1084 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1085 ret = -EINVAL;
1086 break;
1087 }
1088
1089 if (p_buf)
1090 strncpy(p_buf, buf, p_buf_len);
1091 else
1092 pr_debug("%s", buf);
1093
1094 return ret;
1095}
1096
1097int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1098{
1099
1100
1101
1102
1103
1104 vpd->association = (page_83[1] & 0x30);
1105 return transport_dump_vpd_assoc(vpd, NULL, 0);
1106}
1107EXPORT_SYMBOL(transport_set_vpd_assoc);
1108
1109int transport_dump_vpd_ident_type(
1110 struct t10_vpd *vpd,
1111 unsigned char *p_buf,
1112 int p_buf_len)
1113{
1114 unsigned char buf[VPD_TMP_BUF_SIZE];
1115 int ret = 0;
1116 int len;
1117
1118 memset(buf, 0, VPD_TMP_BUF_SIZE);
1119 len = sprintf(buf, "T10 VPD Identifier Type: ");
1120
1121 switch (vpd->device_identifier_type) {
1122 case 0x00:
1123 sprintf(buf+len, "Vendor specific\n");
1124 break;
1125 case 0x01:
1126 sprintf(buf+len, "T10 Vendor ID based\n");
1127 break;
1128 case 0x02:
1129 sprintf(buf+len, "EUI-64 based\n");
1130 break;
1131 case 0x03:
1132 sprintf(buf+len, "NAA\n");
1133 break;
1134 case 0x04:
1135 sprintf(buf+len, "Relative target port identifier\n");
1136 break;
1137 case 0x08:
1138 sprintf(buf+len, "SCSI name string\n");
1139 break;
1140 default:
1141 sprintf(buf+len, "Unsupported: 0x%02x\n",
1142 vpd->device_identifier_type);
1143 ret = -EINVAL;
1144 break;
1145 }
1146
1147 if (p_buf) {
1148 if (p_buf_len < strlen(buf)+1)
1149 return -EINVAL;
1150 strncpy(p_buf, buf, p_buf_len);
1151 } else {
1152 pr_debug("%s", buf);
1153 }
1154
1155 return ret;
1156}
1157
1158int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1159{
1160
1161
1162
1163
1164
1165 vpd->device_identifier_type = (page_83[1] & 0x0f);
1166 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1167}
1168EXPORT_SYMBOL(transport_set_vpd_ident_type);
1169
1170int transport_dump_vpd_ident(
1171 struct t10_vpd *vpd,
1172 unsigned char *p_buf,
1173 int p_buf_len)
1174{
1175 unsigned char buf[VPD_TMP_BUF_SIZE];
1176 int ret = 0;
1177
1178 memset(buf, 0, VPD_TMP_BUF_SIZE);
1179
1180 switch (vpd->device_identifier_code_set) {
1181 case 0x01:
1182 snprintf(buf, sizeof(buf),
1183 "T10 VPD Binary Device Identifier: %s\n",
1184 &vpd->device_identifier[0]);
1185 break;
1186 case 0x02:
1187 snprintf(buf, sizeof(buf),
1188 "T10 VPD ASCII Device Identifier: %s\n",
1189 &vpd->device_identifier[0]);
1190 break;
1191 case 0x03:
1192 snprintf(buf, sizeof(buf),
1193 "T10 VPD UTF-8 Device Identifier: %s\n",
1194 &vpd->device_identifier[0]);
1195 break;
1196 default:
1197 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1198 " 0x%02x", vpd->device_identifier_code_set);
1199 ret = -EINVAL;
1200 break;
1201 }
1202
1203 if (p_buf)
1204 strncpy(p_buf, buf, p_buf_len);
1205 else
1206 pr_debug("%s", buf);
1207
1208 return ret;
1209}
1210
1211int
1212transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1213{
1214 static const char hex_str[] = "0123456789abcdef";
1215 int j = 0, i = 4;
1216
1217
1218
1219
1220
1221
1222 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1223 switch (vpd->device_identifier_code_set) {
1224 case 0x01:
1225 vpd->device_identifier[j++] =
1226 hex_str[vpd->device_identifier_type];
1227 while (i < (4 + page_83[3])) {
1228 vpd->device_identifier[j++] =
1229 hex_str[(page_83[i] & 0xf0) >> 4];
1230 vpd->device_identifier[j++] =
1231 hex_str[page_83[i] & 0x0f];
1232 i++;
1233 }
1234 break;
1235 case 0x02:
1236 case 0x03:
1237 while (i < (4 + page_83[3]))
1238 vpd->device_identifier[j++] = page_83[i++];
1239 break;
1240 default:
1241 break;
1242 }
1243
1244 return transport_dump_vpd_ident(vpd, NULL, 0);
1245}
1246EXPORT_SYMBOL(transport_set_vpd_ident);
1247
1248static sense_reason_t
1249target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1250 unsigned int size)
1251{
1252 u32 mtl;
1253
1254 if (!cmd->se_tfo->max_data_sg_nents)
1255 return TCM_NO_SENSE;
1256
1257
1258
1259
1260
1261
1262 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1263 if (cmd->data_length > mtl) {
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1276 cmd->residual_count = (size - mtl);
1277 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1278 u32 orig_dl = size + cmd->residual_count;
1279 cmd->residual_count = (orig_dl - mtl);
1280 } else {
1281 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1282 cmd->residual_count = (cmd->data_length - mtl);
1283 }
1284 cmd->data_length = mtl;
1285
1286
1287
1288
1289 if (cmd->prot_length) {
1290 u32 sectors = (mtl / dev->dev_attrib.block_size);
1291 cmd->prot_length = dev->prot_length * sectors;
1292 }
1293 }
1294 return TCM_NO_SENSE;
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310sense_reason_t
1311target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1312{
1313 struct se_device *dev = cmd->se_dev;
1314
1315 if (cmd->unknown_data_length) {
1316 cmd->data_length = size;
1317 } else if (size != cmd->data_length) {
1318 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1319 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1320 " 0x%02x\n", cmd->se_tfo->fabric_name,
1321 cmd->data_length, size, cmd->t_task_cdb[0]);
1322
1323 if (cmd->data_direction == DMA_TO_DEVICE) {
1324 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1325 pr_err_ratelimited("Rejecting underflow/overflow"
1326 " for WRITE data CDB\n");
1327 return TCM_INVALID_CDB_FIELD;
1328 }
1329
1330
1331
1332
1333
1334
1335 if (size > cmd->data_length) {
1336 pr_err_ratelimited("Rejecting overflow for"
1337 " WRITE control CDB\n");
1338 return TCM_INVALID_CDB_FIELD;
1339 }
1340 }
1341
1342
1343
1344
1345 if (dev->dev_attrib.block_size != 512) {
1346 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1347 " CDB on non 512-byte sector setup subsystem"
1348 " plugin: %s\n", dev->transport->name);
1349
1350 return TCM_INVALID_CDB_FIELD;
1351 }
1352
1353
1354
1355
1356
1357
1358 if (size > cmd->data_length) {
1359 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1360 cmd->residual_count = (size - cmd->data_length);
1361 } else {
1362 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1363 cmd->residual_count = (cmd->data_length - size);
1364 cmd->data_length = size;
1365 }
1366 }
1367
1368 return target_check_max_data_sg_nents(cmd, dev, size);
1369
1370}
1371
1372
1373
1374
1375
1376
1377
1378void transport_init_se_cmd(
1379 struct se_cmd *cmd,
1380 const struct target_core_fabric_ops *tfo,
1381 struct se_session *se_sess,
1382 u32 data_length,
1383 int data_direction,
1384 int task_attr,
1385 unsigned char *sense_buffer, u64 unpacked_lun)
1386{
1387 INIT_LIST_HEAD(&cmd->se_delayed_node);
1388 INIT_LIST_HEAD(&cmd->se_qf_node);
1389 INIT_LIST_HEAD(&cmd->se_cmd_list);
1390 INIT_LIST_HEAD(&cmd->state_list);
1391 init_completion(&cmd->t_transport_stop_comp);
1392 cmd->free_compl = NULL;
1393 cmd->abrt_compl = NULL;
1394 spin_lock_init(&cmd->t_state_lock);
1395 INIT_WORK(&cmd->work, NULL);
1396 kref_init(&cmd->cmd_kref);
1397
1398 cmd->se_tfo = tfo;
1399 cmd->se_sess = se_sess;
1400 cmd->data_length = data_length;
1401 cmd->data_direction = data_direction;
1402 cmd->sam_task_attr = task_attr;
1403 cmd->sense_buffer = sense_buffer;
1404 cmd->orig_fe_lun = unpacked_lun;
1405
1406 cmd->state_active = false;
1407}
1408EXPORT_SYMBOL(transport_init_se_cmd);
1409
1410static sense_reason_t
1411transport_check_alloc_task_attr(struct se_cmd *cmd)
1412{
1413 struct se_device *dev = cmd->se_dev;
1414
1415
1416
1417
1418
1419 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1420 return 0;
1421
1422 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1423 pr_debug("SAM Task Attribute ACA"
1424 " emulation is not supported\n");
1425 return TCM_INVALID_CDB_FIELD;
1426 }
1427
1428 return 0;
1429}
1430
1431sense_reason_t
1432target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
1433{
1434 sense_reason_t ret;
1435
1436 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1437
1438
1439
1440
1441 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1442 pr_err("Received SCSI CDB with command_size: %d that"
1443 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1444 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1445 ret = TCM_INVALID_CDB_FIELD;
1446 goto err;
1447 }
1448
1449
1450
1451
1452
1453 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1454 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1455 GFP_KERNEL);
1456 if (!cmd->t_task_cdb) {
1457 pr_err("Unable to allocate cmd->t_task_cdb"
1458 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1459 scsi_command_size(cdb),
1460 (unsigned long)sizeof(cmd->__t_task_cdb));
1461 ret = TCM_OUT_OF_RESOURCES;
1462 goto err;
1463 }
1464 }
1465
1466
1467
1468 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1469
1470 trace_target_sequencer_start(cmd);
1471 return 0;
1472
1473err:
1474
1475
1476
1477
1478 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1479 (unsigned int)TCM_MAX_COMMAND_SIZE));
1480 return ret;
1481}
1482EXPORT_SYMBOL(target_cmd_init_cdb);
1483
1484sense_reason_t
1485target_cmd_parse_cdb(struct se_cmd *cmd)
1486{
1487 struct se_device *dev = cmd->se_dev;
1488 sense_reason_t ret;
1489
1490 ret = dev->transport->parse_cdb(cmd);
1491 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1492 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1493 cmd->se_tfo->fabric_name,
1494 cmd->se_sess->se_node_acl->initiatorname,
1495 cmd->t_task_cdb[0]);
1496 if (ret)
1497 return ret;
1498
1499 ret = transport_check_alloc_task_attr(cmd);
1500 if (ret)
1501 return ret;
1502
1503 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1504 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1505 return 0;
1506}
1507EXPORT_SYMBOL(target_cmd_parse_cdb);
1508
1509
1510
1511
1512
1513int transport_handle_cdb_direct(
1514 struct se_cmd *cmd)
1515{
1516 sense_reason_t ret;
1517
1518 if (!cmd->se_lun) {
1519 dump_stack();
1520 pr_err("cmd->se_lun is NULL\n");
1521 return -EINVAL;
1522 }
1523 if (in_interrupt()) {
1524 dump_stack();
1525 pr_err("transport_generic_handle_cdb cannot be called"
1526 " from interrupt context\n");
1527 return -EINVAL;
1528 }
1529
1530
1531
1532
1533
1534
1535
1536
1537 cmd->t_state = TRANSPORT_NEW_CMD;
1538 cmd->transport_state |= CMD_T_ACTIVE;
1539
1540
1541
1542
1543
1544
1545 ret = transport_generic_new_cmd(cmd);
1546 if (ret)
1547 transport_generic_request_failure(cmd, ret);
1548 return 0;
1549}
1550EXPORT_SYMBOL(transport_handle_cdb_direct);
1551
1552sense_reason_t
1553transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1554 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1555{
1556 if (!sgl || !sgl_count)
1557 return 0;
1558
1559
1560
1561
1562
1563
1564 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1565 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1566 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1567 return TCM_INVALID_CDB_FIELD;
1568 }
1569
1570 cmd->t_data_sg = sgl;
1571 cmd->t_data_nents = sgl_count;
1572 cmd->t_bidi_data_sg = sgl_bidi;
1573 cmd->t_bidi_data_nents = sgl_bidi_count;
1574
1575 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1576 return 0;
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1609 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1610 u32 data_length, int task_attr, int data_dir, int flags,
1611 struct scatterlist *sgl, u32 sgl_count,
1612 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1613 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1614{
1615 struct se_portal_group *se_tpg;
1616 sense_reason_t rc;
1617 int ret;
1618
1619 se_tpg = se_sess->se_tpg;
1620 BUG_ON(!se_tpg);
1621 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1622 BUG_ON(in_interrupt());
1623
1624
1625
1626
1627
1628 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1629 data_length, data_dir, task_attr, sense,
1630 unpacked_lun);
1631
1632 if (flags & TARGET_SCF_USE_CPUID)
1633 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1634 else
1635 se_cmd->cpuid = WORK_CPU_UNBOUND;
1636
1637 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1638 se_cmd->unknown_data_length = 1;
1639
1640
1641
1642
1643
1644
1645 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1646 if (ret)
1647 return ret;
1648
1649
1650
1651 if (flags & TARGET_SCF_BIDI_OP)
1652 se_cmd->se_cmd_flags |= SCF_BIDI;
1653
1654 rc = target_cmd_init_cdb(se_cmd, cdb);
1655 if (rc) {
1656 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1657 target_put_sess_cmd(se_cmd);
1658 return 0;
1659 }
1660
1661
1662
1663
1664 rc = transport_lookup_cmd_lun(se_cmd);
1665 if (rc) {
1666 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1667 target_put_sess_cmd(se_cmd);
1668 return 0;
1669 }
1670
1671 rc = target_cmd_parse_cdb(se_cmd);
1672 if (rc != 0) {
1673 transport_generic_request_failure(se_cmd, rc);
1674 return 0;
1675 }
1676
1677
1678
1679
1680
1681 if (sgl_prot_count) {
1682 se_cmd->t_prot_sg = sgl_prot;
1683 se_cmd->t_prot_nents = sgl_prot_count;
1684 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1685 }
1686
1687
1688
1689
1690
1691
1692 if (sgl_count != 0) {
1693 BUG_ON(!sgl);
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1704 se_cmd->data_direction == DMA_FROM_DEVICE) {
1705 unsigned char *buf = NULL;
1706
1707 if (sgl)
1708 buf = kmap(sg_page(sgl)) + sgl->offset;
1709
1710 if (buf) {
1711 memset(buf, 0, sgl->length);
1712 kunmap(sg_page(sgl));
1713 }
1714 }
1715
1716 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1717 sgl_bidi, sgl_bidi_count);
1718 if (rc != 0) {
1719 transport_generic_request_failure(se_cmd, rc);
1720 return 0;
1721 }
1722 }
1723
1724
1725
1726
1727
1728 core_alua_check_nonop_delay(se_cmd);
1729
1730 transport_handle_cdb_direct(se_cmd);
1731 return 0;
1732}
1733EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1760 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1761 u32 data_length, int task_attr, int data_dir, int flags)
1762{
1763 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1764 unpacked_lun, data_length, task_attr, data_dir,
1765 flags, NULL, 0, NULL, 0, NULL, 0);
1766}
1767EXPORT_SYMBOL(target_submit_cmd);
1768
1769static void target_complete_tmr_failure(struct work_struct *work)
1770{
1771 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1772
1773 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1774 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1775
1776 transport_lun_remove_cmd(se_cmd);
1777 transport_cmd_check_stop_to_fabric(se_cmd);
1778}
1779
1780static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1781 u64 *unpacked_lun)
1782{
1783 struct se_cmd *se_cmd;
1784 unsigned long flags;
1785 bool ret = false;
1786
1787 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1788 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1789 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1790 continue;
1791
1792 if (se_cmd->tag == tag) {
1793 *unpacked_lun = se_cmd->orig_fe_lun;
1794 ret = true;
1795 break;
1796 }
1797 }
1798 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1799
1800 return ret;
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1821 unsigned char *sense, u64 unpacked_lun,
1822 void *fabric_tmr_ptr, unsigned char tm_type,
1823 gfp_t gfp, u64 tag, int flags)
1824{
1825 struct se_portal_group *se_tpg;
1826 int ret;
1827
1828 se_tpg = se_sess->se_tpg;
1829 BUG_ON(!se_tpg);
1830
1831 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1832 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
1833
1834
1835
1836
1837 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1838 if (ret < 0)
1839 return -ENOMEM;
1840
1841 if (tm_type == TMR_ABORT_TASK)
1842 se_cmd->se_tmr_req->ref_task_tag = tag;
1843
1844
1845 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1846 if (ret) {
1847 core_tmr_release_req(se_cmd->se_tmr_req);
1848 return ret;
1849 }
1850
1851
1852
1853
1854
1855 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1856 if (!target_lookup_lun_from_tag(se_sess, tag,
1857 &se_cmd->orig_fe_lun))
1858 goto failure;
1859 }
1860
1861 ret = transport_lookup_tmr_lun(se_cmd);
1862 if (ret)
1863 goto failure;
1864
1865 transport_generic_handle_tmr(se_cmd);
1866 return 0;
1867
1868
1869
1870
1871
1872failure:
1873 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1874 schedule_work(&se_cmd->work);
1875 return 0;
1876}
1877EXPORT_SYMBOL(target_submit_tmr);
1878
1879
1880
1881
1882void transport_generic_request_failure(struct se_cmd *cmd,
1883 sense_reason_t sense_reason)
1884{
1885 int ret = 0, post_ret;
1886
1887 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1888 sense_reason);
1889 target_show_cmd("-----[ ", cmd);
1890
1891
1892
1893
1894 transport_complete_task_attr(cmd);
1895
1896 if (cmd->transport_complete_callback)
1897 cmd->transport_complete_callback(cmd, false, &post_ret);
1898
1899 if (cmd->transport_state & CMD_T_ABORTED) {
1900 INIT_WORK(&cmd->work, target_abort_work);
1901 queue_work(target_completion_wq, &cmd->work);
1902 return;
1903 }
1904
1905 switch (sense_reason) {
1906 case TCM_NON_EXISTENT_LUN:
1907 case TCM_UNSUPPORTED_SCSI_OPCODE:
1908 case TCM_INVALID_CDB_FIELD:
1909 case TCM_INVALID_PARAMETER_LIST:
1910 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1911 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1912 case TCM_UNKNOWN_MODE_PAGE:
1913 case TCM_WRITE_PROTECTED:
1914 case TCM_ADDRESS_OUT_OF_RANGE:
1915 case TCM_CHECK_CONDITION_ABORT_CMD:
1916 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1917 case TCM_CHECK_CONDITION_NOT_READY:
1918 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1919 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1920 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1921 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1922 case TCM_TOO_MANY_TARGET_DESCS:
1923 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1924 case TCM_TOO_MANY_SEGMENT_DESCS:
1925 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1926 break;
1927 case TCM_OUT_OF_RESOURCES:
1928 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1929 goto queue_status;
1930 case TCM_LUN_BUSY:
1931 cmd->scsi_status = SAM_STAT_BUSY;
1932 goto queue_status;
1933 case TCM_RESERVATION_CONFLICT:
1934
1935
1936
1937
1938
1939
1940 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1941
1942
1943
1944
1945
1946
1947
1948 if (cmd->se_sess &&
1949 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1950 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
1951 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1952 cmd->orig_fe_lun, 0x2C,
1953 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1954 }
1955
1956 goto queue_status;
1957 default:
1958 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1959 cmd->t_task_cdb[0], sense_reason);
1960 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1961 break;
1962 }
1963
1964 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1965 if (ret)
1966 goto queue_full;
1967
1968check_stop:
1969 transport_lun_remove_cmd(cmd);
1970 transport_cmd_check_stop_to_fabric(cmd);
1971 return;
1972
1973queue_status:
1974 trace_target_cmd_complete(cmd);
1975 ret = cmd->se_tfo->queue_status(cmd);
1976 if (!ret)
1977 goto check_stop;
1978queue_full:
1979 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1980}
1981EXPORT_SYMBOL(transport_generic_request_failure);
1982
1983void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1984{
1985 sense_reason_t ret;
1986
1987 if (!cmd->execute_cmd) {
1988 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1989 goto err;
1990 }
1991 if (do_checks) {
1992
1993
1994
1995
1996
1997
1998 ret = target_scsi3_ua_check(cmd);
1999 if (ret)
2000 goto err;
2001
2002 ret = target_alua_state_check(cmd);
2003 if (ret)
2004 goto err;
2005
2006 ret = target_check_reservation(cmd);
2007 if (ret) {
2008 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2009 goto err;
2010 }
2011 }
2012
2013 ret = cmd->execute_cmd(cmd);
2014 if (!ret)
2015 return;
2016err:
2017 spin_lock_irq(&cmd->t_state_lock);
2018 cmd->transport_state &= ~CMD_T_SENT;
2019 spin_unlock_irq(&cmd->t_state_lock);
2020
2021 transport_generic_request_failure(cmd, ret);
2022}
2023
2024static int target_write_prot_action(struct se_cmd *cmd)
2025{
2026 u32 sectors;
2027
2028
2029
2030
2031
2032 switch (cmd->prot_op) {
2033 case TARGET_PROT_DOUT_INSERT:
2034 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
2035 sbc_dif_generate(cmd);
2036 break;
2037 case TARGET_PROT_DOUT_STRIP:
2038 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2039 break;
2040
2041 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2042 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2043 sectors, 0, cmd->t_prot_sg, 0);
2044 if (unlikely(cmd->pi_err)) {
2045 spin_lock_irq(&cmd->t_state_lock);
2046 cmd->transport_state &= ~CMD_T_SENT;
2047 spin_unlock_irq(&cmd->t_state_lock);
2048 transport_generic_request_failure(cmd, cmd->pi_err);
2049 return -1;
2050 }
2051 break;
2052 default:
2053 break;
2054 }
2055
2056 return 0;
2057}
2058
2059static bool target_handle_task_attr(struct se_cmd *cmd)
2060{
2061 struct se_device *dev = cmd->se_dev;
2062
2063 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2064 return false;
2065
2066 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2067
2068
2069
2070
2071
2072 switch (cmd->sam_task_attr) {
2073 case TCM_HEAD_TAG:
2074 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2075 cmd->t_task_cdb[0]);
2076 return false;
2077 case TCM_ORDERED_TAG:
2078 atomic_inc_mb(&dev->dev_ordered_sync);
2079
2080 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2081 cmd->t_task_cdb[0]);
2082
2083
2084
2085
2086
2087 if (!atomic_read(&dev->simple_cmds))
2088 return false;
2089 break;
2090 default:
2091
2092
2093
2094 atomic_inc_mb(&dev->simple_cmds);
2095 break;
2096 }
2097
2098 if (atomic_read(&dev->dev_ordered_sync) == 0)
2099 return false;
2100
2101 spin_lock(&dev->delayed_cmd_lock);
2102 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2103 spin_unlock(&dev->delayed_cmd_lock);
2104
2105 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2106 cmd->t_task_cdb[0], cmd->sam_task_attr);
2107 return true;
2108}
2109
2110void target_execute_cmd(struct se_cmd *cmd)
2111{
2112
2113
2114
2115
2116
2117
2118 if (target_cmd_interrupted(cmd))
2119 return;
2120
2121 spin_lock_irq(&cmd->t_state_lock);
2122 cmd->t_state = TRANSPORT_PROCESSING;
2123 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2124 spin_unlock_irq(&cmd->t_state_lock);
2125
2126 if (target_write_prot_action(cmd))
2127 return;
2128
2129 if (target_handle_task_attr(cmd)) {
2130 spin_lock_irq(&cmd->t_state_lock);
2131 cmd->transport_state &= ~CMD_T_SENT;
2132 spin_unlock_irq(&cmd->t_state_lock);
2133 return;
2134 }
2135
2136 __target_execute_cmd(cmd, true);
2137}
2138EXPORT_SYMBOL(target_execute_cmd);
2139
2140
2141
2142
2143
2144static void target_restart_delayed_cmds(struct se_device *dev)
2145{
2146 for (;;) {
2147 struct se_cmd *cmd;
2148
2149 spin_lock(&dev->delayed_cmd_lock);
2150 if (list_empty(&dev->delayed_cmd_list)) {
2151 spin_unlock(&dev->delayed_cmd_lock);
2152 break;
2153 }
2154
2155 cmd = list_entry(dev->delayed_cmd_list.next,
2156 struct se_cmd, se_delayed_node);
2157 list_del(&cmd->se_delayed_node);
2158 spin_unlock(&dev->delayed_cmd_lock);
2159
2160 cmd->transport_state |= CMD_T_SENT;
2161
2162 __target_execute_cmd(cmd, true);
2163
2164 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2165 break;
2166 }
2167}
2168
2169
2170
2171
2172
2173static void transport_complete_task_attr(struct se_cmd *cmd)
2174{
2175 struct se_device *dev = cmd->se_dev;
2176
2177 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2178 return;
2179
2180 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2181 goto restart;
2182
2183 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2184 atomic_dec_mb(&dev->simple_cmds);
2185 dev->dev_cur_ordered_id++;
2186 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2187 dev->dev_cur_ordered_id++;
2188 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2189 dev->dev_cur_ordered_id);
2190 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2191 atomic_dec_mb(&dev->dev_ordered_sync);
2192
2193 dev->dev_cur_ordered_id++;
2194 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2195 dev->dev_cur_ordered_id);
2196 }
2197 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2198
2199restart:
2200 target_restart_delayed_cmds(dev);
2201}
2202
2203static void transport_complete_qf(struct se_cmd *cmd)
2204{
2205 int ret = 0;
2206
2207 transport_complete_task_attr(cmd);
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2218 if (cmd->scsi_status)
2219 goto queue_status;
2220
2221 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2222 goto queue_status;
2223 }
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2235 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2236 goto queue_status;
2237
2238 switch (cmd->data_direction) {
2239 case DMA_FROM_DEVICE:
2240
2241 if (cmd->scsi_status &&
2242 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2243 goto queue_status;
2244
2245 trace_target_cmd_complete(cmd);
2246 ret = cmd->se_tfo->queue_data_in(cmd);
2247 break;
2248 case DMA_TO_DEVICE:
2249 if (cmd->se_cmd_flags & SCF_BIDI) {
2250 ret = cmd->se_tfo->queue_data_in(cmd);
2251 break;
2252 }
2253
2254 case DMA_NONE:
2255queue_status:
2256 trace_target_cmd_complete(cmd);
2257 ret = cmd->se_tfo->queue_status(cmd);
2258 break;
2259 default:
2260 break;
2261 }
2262
2263 if (ret < 0) {
2264 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2265 return;
2266 }
2267 transport_lun_remove_cmd(cmd);
2268 transport_cmd_check_stop_to_fabric(cmd);
2269}
2270
2271static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2272 int err, bool write_pending)
2273{
2274
2275
2276
2277
2278
2279
2280
2281
2282 if (err == -EAGAIN || err == -ENOMEM) {
2283 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2284 TRANSPORT_COMPLETE_QF_OK;
2285 } else {
2286 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2287 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2288 }
2289
2290 spin_lock_irq(&dev->qf_cmd_lock);
2291 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2292 atomic_inc_mb(&dev->dev_qf_count);
2293 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2294
2295 schedule_work(&cmd->se_dev->qf_work_queue);
2296}
2297
2298static bool target_read_prot_action(struct se_cmd *cmd)
2299{
2300 switch (cmd->prot_op) {
2301 case TARGET_PROT_DIN_STRIP:
2302 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2303 u32 sectors = cmd->data_length >>
2304 ilog2(cmd->se_dev->dev_attrib.block_size);
2305
2306 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2307 sectors, 0, cmd->t_prot_sg,
2308 0);
2309 if (cmd->pi_err)
2310 return true;
2311 }
2312 break;
2313 case TARGET_PROT_DIN_INSERT:
2314 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2315 break;
2316
2317 sbc_dif_generate(cmd);
2318 break;
2319 default:
2320 break;
2321 }
2322
2323 return false;
2324}
2325
2326static void target_complete_ok_work(struct work_struct *work)
2327{
2328 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2329 int ret;
2330
2331
2332
2333
2334
2335
2336 transport_complete_task_attr(cmd);
2337
2338
2339
2340
2341
2342 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2343 schedule_work(&cmd->se_dev->qf_work_queue);
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2355 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2356 WARN_ON(!cmd->scsi_status);
2357 ret = transport_send_check_condition_and_sense(
2358 cmd, 0, 1);
2359 if (ret)
2360 goto queue_full;
2361
2362 transport_lun_remove_cmd(cmd);
2363 transport_cmd_check_stop_to_fabric(cmd);
2364 return;
2365 }
2366
2367
2368
2369
2370 if (cmd->transport_complete_callback) {
2371 sense_reason_t rc;
2372 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2373 bool zero_dl = !(cmd->data_length);
2374 int post_ret = 0;
2375
2376 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2377 if (!rc && !post_ret) {
2378 if (caw && zero_dl)
2379 goto queue_rsp;
2380
2381 return;
2382 } else if (rc) {
2383 ret = transport_send_check_condition_and_sense(cmd,
2384 rc, 0);
2385 if (ret)
2386 goto queue_full;
2387
2388 transport_lun_remove_cmd(cmd);
2389 transport_cmd_check_stop_to_fabric(cmd);
2390 return;
2391 }
2392 }
2393
2394queue_rsp:
2395 switch (cmd->data_direction) {
2396 case DMA_FROM_DEVICE:
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407 if (cmd->scsi_status &&
2408 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2409 goto queue_status;
2410
2411 atomic_long_add(cmd->data_length,
2412 &cmd->se_lun->lun_stats.tx_data_octets);
2413
2414
2415
2416
2417
2418 if (target_read_prot_action(cmd)) {
2419 ret = transport_send_check_condition_and_sense(cmd,
2420 cmd->pi_err, 0);
2421 if (ret)
2422 goto queue_full;
2423
2424 transport_lun_remove_cmd(cmd);
2425 transport_cmd_check_stop_to_fabric(cmd);
2426 return;
2427 }
2428
2429 trace_target_cmd_complete(cmd);
2430 ret = cmd->se_tfo->queue_data_in(cmd);
2431 if (ret)
2432 goto queue_full;
2433 break;
2434 case DMA_TO_DEVICE:
2435 atomic_long_add(cmd->data_length,
2436 &cmd->se_lun->lun_stats.rx_data_octets);
2437
2438
2439
2440 if (cmd->se_cmd_flags & SCF_BIDI) {
2441 atomic_long_add(cmd->data_length,
2442 &cmd->se_lun->lun_stats.tx_data_octets);
2443 ret = cmd->se_tfo->queue_data_in(cmd);
2444 if (ret)
2445 goto queue_full;
2446 break;
2447 }
2448
2449 case DMA_NONE:
2450queue_status:
2451 trace_target_cmd_complete(cmd);
2452 ret = cmd->se_tfo->queue_status(cmd);
2453 if (ret)
2454 goto queue_full;
2455 break;
2456 default:
2457 break;
2458 }
2459
2460 transport_lun_remove_cmd(cmd);
2461 transport_cmd_check_stop_to_fabric(cmd);
2462 return;
2463
2464queue_full:
2465 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2466 " data_direction: %d\n", cmd, cmd->data_direction);
2467
2468 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2469}
2470
2471void target_free_sgl(struct scatterlist *sgl, int nents)
2472{
2473 sgl_free_n_order(sgl, nents, 0);
2474}
2475EXPORT_SYMBOL(target_free_sgl);
2476
2477static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2478{
2479
2480
2481
2482
2483 if (!cmd->t_data_sg_orig)
2484 return;
2485
2486 kfree(cmd->t_data_sg);
2487 cmd->t_data_sg = cmd->t_data_sg_orig;
2488 cmd->t_data_sg_orig = NULL;
2489 cmd->t_data_nents = cmd->t_data_nents_orig;
2490 cmd->t_data_nents_orig = 0;
2491}
2492
2493static inline void transport_free_pages(struct se_cmd *cmd)
2494{
2495 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2496 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2497 cmd->t_prot_sg = NULL;
2498 cmd->t_prot_nents = 0;
2499 }
2500
2501 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2502
2503
2504
2505
2506 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2507 target_free_sgl(cmd->t_bidi_data_sg,
2508 cmd->t_bidi_data_nents);
2509 cmd->t_bidi_data_sg = NULL;
2510 cmd->t_bidi_data_nents = 0;
2511 }
2512 transport_reset_sgl_orig(cmd);
2513 return;
2514 }
2515 transport_reset_sgl_orig(cmd);
2516
2517 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2518 cmd->t_data_sg = NULL;
2519 cmd->t_data_nents = 0;
2520
2521 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2522 cmd->t_bidi_data_sg = NULL;
2523 cmd->t_bidi_data_nents = 0;
2524}
2525
2526void *transport_kmap_data_sg(struct se_cmd *cmd)
2527{
2528 struct scatterlist *sg = cmd->t_data_sg;
2529 struct page **pages;
2530 int i;
2531
2532
2533
2534
2535
2536
2537 if (!cmd->t_data_nents)
2538 return NULL;
2539
2540 BUG_ON(!sg);
2541 if (cmd->t_data_nents == 1)
2542 return kmap(sg_page(sg)) + sg->offset;
2543
2544
2545 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2546 if (!pages)
2547 return NULL;
2548
2549
2550 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2551 pages[i] = sg_page(sg);
2552 }
2553
2554 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2555 kfree(pages);
2556 if (!cmd->t_data_vmap)
2557 return NULL;
2558
2559 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2560}
2561EXPORT_SYMBOL(transport_kmap_data_sg);
2562
2563void transport_kunmap_data_sg(struct se_cmd *cmd)
2564{
2565 if (!cmd->t_data_nents) {
2566 return;
2567 } else if (cmd->t_data_nents == 1) {
2568 kunmap(sg_page(cmd->t_data_sg));
2569 return;
2570 }
2571
2572 vunmap(cmd->t_data_vmap);
2573 cmd->t_data_vmap = NULL;
2574}
2575EXPORT_SYMBOL(transport_kunmap_data_sg);
2576
2577int
2578target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2579 bool zero_page, bool chainable)
2580{
2581 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2582
2583 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2584 return *sgl ? 0 : -ENOMEM;
2585}
2586EXPORT_SYMBOL(target_alloc_sgl);
2587
2588
2589
2590
2591
2592
2593sense_reason_t
2594transport_generic_new_cmd(struct se_cmd *cmd)
2595{
2596 unsigned long flags;
2597 int ret = 0;
2598 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2599
2600 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2601 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2602 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2603 cmd->prot_length, true, false);
2604 if (ret < 0)
2605 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2606 }
2607
2608
2609
2610
2611
2612
2613 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2614 cmd->data_length) {
2615
2616 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2617 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2618 u32 bidi_length;
2619
2620 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2621 bidi_length = cmd->t_task_nolb *
2622 cmd->se_dev->dev_attrib.block_size;
2623 else
2624 bidi_length = cmd->data_length;
2625
2626 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2627 &cmd->t_bidi_data_nents,
2628 bidi_length, zero_flag, false);
2629 if (ret < 0)
2630 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2631 }
2632
2633 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2634 cmd->data_length, zero_flag, false);
2635 if (ret < 0)
2636 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2637 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2638 cmd->data_length) {
2639
2640
2641
2642
2643 u32 caw_length = cmd->t_task_nolb *
2644 cmd->se_dev->dev_attrib.block_size;
2645
2646 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2647 &cmd->t_bidi_data_nents,
2648 caw_length, zero_flag, false);
2649 if (ret < 0)
2650 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2651 }
2652
2653
2654
2655
2656
2657 target_add_to_state_list(cmd);
2658 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2659 target_execute_cmd(cmd);
2660 return 0;
2661 }
2662
2663 spin_lock_irqsave(&cmd->t_state_lock, flags);
2664 cmd->t_state = TRANSPORT_WRITE_PENDING;
2665
2666
2667
2668
2669 if (cmd->transport_state & CMD_T_STOP &&
2670 !cmd->se_tfo->write_pending_must_be_called) {
2671 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2672 __func__, __LINE__, cmd->tag);
2673
2674 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2675
2676 complete_all(&cmd->t_transport_stop_comp);
2677 return 0;
2678 }
2679 cmd->transport_state &= ~CMD_T_ACTIVE;
2680 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2681
2682 ret = cmd->se_tfo->write_pending(cmd);
2683 if (ret)
2684 goto queue_full;
2685
2686 return 0;
2687
2688queue_full:
2689 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2690 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2691 return 0;
2692}
2693EXPORT_SYMBOL(transport_generic_new_cmd);
2694
2695static void transport_write_pending_qf(struct se_cmd *cmd)
2696{
2697 unsigned long flags;
2698 int ret;
2699 bool stop;
2700
2701 spin_lock_irqsave(&cmd->t_state_lock, flags);
2702 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2703 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2704
2705 if (stop) {
2706 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2707 __func__, __LINE__, cmd->tag);
2708 complete_all(&cmd->t_transport_stop_comp);
2709 return;
2710 }
2711
2712 ret = cmd->se_tfo->write_pending(cmd);
2713 if (ret) {
2714 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2715 cmd);
2716 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2717 }
2718}
2719
2720static bool
2721__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2722 unsigned long *flags);
2723
2724static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2725{
2726 unsigned long flags;
2727
2728 spin_lock_irqsave(&cmd->t_state_lock, flags);
2729 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2730 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2731}
2732
2733
2734
2735
2736
2737void target_put_cmd_and_wait(struct se_cmd *cmd)
2738{
2739 DECLARE_COMPLETION_ONSTACK(compl);
2740
2741 WARN_ON_ONCE(cmd->abrt_compl);
2742 cmd->abrt_compl = &compl;
2743 target_put_sess_cmd(cmd);
2744 wait_for_completion(&compl);
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2771{
2772 DECLARE_COMPLETION_ONSTACK(compl);
2773 int ret = 0;
2774 bool aborted = false, tas = false;
2775
2776 if (wait_for_tasks)
2777 target_wait_free_cmd(cmd, &aborted, &tas);
2778
2779 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2780
2781
2782
2783
2784
2785 if (cmd->state_active)
2786 target_remove_from_state_list(cmd);
2787
2788 if (cmd->se_lun)
2789 transport_lun_remove_cmd(cmd);
2790 }
2791 if (aborted)
2792 cmd->free_compl = &compl;
2793 ret = target_put_sess_cmd(cmd);
2794 if (aborted) {
2795 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2796 wait_for_completion(&compl);
2797 ret = 1;
2798 }
2799 return ret;
2800}
2801EXPORT_SYMBOL(transport_generic_free_cmd);
2802
2803
2804
2805
2806
2807
2808int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2809{
2810 struct se_session *se_sess = se_cmd->se_sess;
2811 unsigned long flags;
2812 int ret = 0;
2813
2814
2815
2816
2817
2818
2819 if (ack_kref) {
2820 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2821 return -EINVAL;
2822
2823 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2824 }
2825
2826 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2827 if (se_sess->sess_tearing_down) {
2828 ret = -ESHUTDOWN;
2829 goto out;
2830 }
2831 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2832 percpu_ref_get(&se_sess->cmd_count);
2833out:
2834 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2835
2836 if (ret && ack_kref)
2837 target_put_sess_cmd(se_cmd);
2838
2839 return ret;
2840}
2841EXPORT_SYMBOL(target_get_sess_cmd);
2842
2843static void target_free_cmd_mem(struct se_cmd *cmd)
2844{
2845 transport_free_pages(cmd);
2846
2847 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2848 core_tmr_release_req(cmd->se_tmr_req);
2849 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2850 kfree(cmd->t_task_cdb);
2851}
2852
2853static void target_release_cmd_kref(struct kref *kref)
2854{
2855 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2856 struct se_session *se_sess = se_cmd->se_sess;
2857 struct completion *free_compl = se_cmd->free_compl;
2858 struct completion *abrt_compl = se_cmd->abrt_compl;
2859 unsigned long flags;
2860
2861 if (se_sess) {
2862 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2863 list_del_init(&se_cmd->se_cmd_list);
2864 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2865 }
2866
2867 target_free_cmd_mem(se_cmd);
2868 se_cmd->se_tfo->release_cmd(se_cmd);
2869 if (free_compl)
2870 complete(free_compl);
2871 if (abrt_compl)
2872 complete(abrt_compl);
2873
2874 percpu_ref_put(&se_sess->cmd_count);
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884int target_put_sess_cmd(struct se_cmd *se_cmd)
2885{
2886 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2887}
2888EXPORT_SYMBOL(target_put_sess_cmd);
2889
2890static const char *data_dir_name(enum dma_data_direction d)
2891{
2892 switch (d) {
2893 case DMA_BIDIRECTIONAL: return "BIDI";
2894 case DMA_TO_DEVICE: return "WRITE";
2895 case DMA_FROM_DEVICE: return "READ";
2896 case DMA_NONE: return "NONE";
2897 }
2898
2899 return "(?)";
2900}
2901
2902static const char *cmd_state_name(enum transport_state_table t)
2903{
2904 switch (t) {
2905 case TRANSPORT_NO_STATE: return "NO_STATE";
2906 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2907 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2908 case TRANSPORT_PROCESSING: return "PROCESSING";
2909 case TRANSPORT_COMPLETE: return "COMPLETE";
2910 case TRANSPORT_ISTATE_PROCESSING:
2911 return "ISTATE_PROCESSING";
2912 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2913 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2914 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2915 }
2916
2917 return "(?)";
2918}
2919
2920static void target_append_str(char **str, const char *txt)
2921{
2922 char *prev = *str;
2923
2924 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2925 kstrdup(txt, GFP_ATOMIC);
2926 kfree(prev);
2927}
2928
2929
2930
2931
2932
2933static char *target_ts_to_str(u32 ts)
2934{
2935 char *str = NULL;
2936
2937 if (ts & CMD_T_ABORTED)
2938 target_append_str(&str, "aborted");
2939 if (ts & CMD_T_ACTIVE)
2940 target_append_str(&str, "active");
2941 if (ts & CMD_T_COMPLETE)
2942 target_append_str(&str, "complete");
2943 if (ts & CMD_T_SENT)
2944 target_append_str(&str, "sent");
2945 if (ts & CMD_T_STOP)
2946 target_append_str(&str, "stop");
2947 if (ts & CMD_T_FABRIC_STOP)
2948 target_append_str(&str, "fabric_stop");
2949
2950 return str;
2951}
2952
2953static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2954{
2955 switch (tmf) {
2956 case TMR_ABORT_TASK: return "ABORT_TASK";
2957 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2958 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2959 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2960 case TMR_LUN_RESET: return "LUN_RESET";
2961 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2962 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2963 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
2964 case TMR_UNKNOWN: break;
2965 }
2966 return "(?)";
2967}
2968
2969void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2970{
2971 char *ts_str = target_ts_to_str(cmd->transport_state);
2972 const u8 *cdb = cmd->t_task_cdb;
2973 struct se_tmr_req *tmf = cmd->se_tmr_req;
2974
2975 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2976 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2977 pfx, cdb[0], cdb[1], cmd->tag,
2978 data_dir_name(cmd->data_direction),
2979 cmd->se_tfo->get_cmd_state(cmd),
2980 cmd_state_name(cmd->t_state), cmd->data_length,
2981 kref_read(&cmd->cmd_kref), ts_str);
2982 } else {
2983 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2984 pfx, target_tmf_name(tmf->function), cmd->tag,
2985 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2986 cmd_state_name(cmd->t_state),
2987 kref_read(&cmd->cmd_kref), ts_str);
2988 }
2989 kfree(ts_str);
2990}
2991EXPORT_SYMBOL(target_show_cmd);
2992
2993
2994
2995
2996
2997void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2998{
2999 unsigned long flags;
3000
3001 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3002 se_sess->sess_tearing_down = 1;
3003 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3004
3005 percpu_ref_kill(&se_sess->cmd_count);
3006}
3007EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
3008
3009
3010
3011
3012
3013void target_wait_for_sess_cmds(struct se_session *se_sess)
3014{
3015 struct se_cmd *cmd;
3016 int ret;
3017
3018 WARN_ON_ONCE(!se_sess->sess_tearing_down);
3019
3020 do {
3021 ret = wait_event_timeout(se_sess->cmd_list_wq,
3022 percpu_ref_is_zero(&se_sess->cmd_count),
3023 180 * HZ);
3024 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
3025 target_show_cmd("session shutdown: still waiting for ",
3026 cmd);
3027 } while (ret <= 0);
3028}
3029EXPORT_SYMBOL(target_wait_for_sess_cmds);
3030
3031
3032
3033
3034
3035void transport_clear_lun_ref(struct se_lun *lun)
3036{
3037 percpu_ref_kill(&lun->lun_ref);
3038 wait_for_completion(&lun->lun_shutdown_comp);
3039}
3040
3041static bool
3042__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
3043 bool *aborted, bool *tas, unsigned long *flags)
3044 __releases(&cmd->t_state_lock)
3045 __acquires(&cmd->t_state_lock)
3046{
3047
3048 assert_spin_locked(&cmd->t_state_lock);
3049 WARN_ON_ONCE(!irqs_disabled());
3050
3051 if (fabric_stop)
3052 cmd->transport_state |= CMD_T_FABRIC_STOP;
3053
3054 if (cmd->transport_state & CMD_T_ABORTED)
3055 *aborted = true;
3056
3057 if (cmd->transport_state & CMD_T_TAS)
3058 *tas = true;
3059
3060 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3061 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3062 return false;
3063
3064 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3065 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3066 return false;
3067
3068 if (!(cmd->transport_state & CMD_T_ACTIVE))
3069 return false;
3070
3071 if (fabric_stop && *aborted)
3072 return false;
3073
3074 cmd->transport_state |= CMD_T_STOP;
3075
3076 target_show_cmd("wait_for_tasks: Stopping ", cmd);
3077
3078 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3079
3080 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3081 180 * HZ))
3082 target_show_cmd("wait for tasks: ", cmd);
3083
3084 spin_lock_irqsave(&cmd->t_state_lock, *flags);
3085 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3086
3087 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3088 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3089
3090 return true;
3091}
3092
3093
3094
3095
3096
3097bool transport_wait_for_tasks(struct se_cmd *cmd)
3098{
3099 unsigned long flags;
3100 bool ret, aborted = false, tas = false;
3101
3102 spin_lock_irqsave(&cmd->t_state_lock, flags);
3103 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3104 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3105
3106 return ret;
3107}
3108EXPORT_SYMBOL(transport_wait_for_tasks);
3109
3110struct sense_info {
3111 u8 key;
3112 u8 asc;
3113 u8 ascq;
3114 bool add_sector_info;
3115};
3116
3117static const struct sense_info sense_info_table[] = {
3118 [TCM_NO_SENSE] = {
3119 .key = NOT_READY
3120 },
3121 [TCM_NON_EXISTENT_LUN] = {
3122 .key = ILLEGAL_REQUEST,
3123 .asc = 0x25
3124 },
3125 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3126 .key = ILLEGAL_REQUEST,
3127 .asc = 0x20,
3128 },
3129 [TCM_SECTOR_COUNT_TOO_MANY] = {
3130 .key = ILLEGAL_REQUEST,
3131 .asc = 0x20,
3132 },
3133 [TCM_UNKNOWN_MODE_PAGE] = {
3134 .key = ILLEGAL_REQUEST,
3135 .asc = 0x24,
3136 },
3137 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3138 .key = ABORTED_COMMAND,
3139 .asc = 0x29,
3140 .ascq = 0x03,
3141 },
3142 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3143 .key = ABORTED_COMMAND,
3144 .asc = 0x0c,
3145 .ascq = 0x0d,
3146 },
3147 [TCM_INVALID_CDB_FIELD] = {
3148 .key = ILLEGAL_REQUEST,
3149 .asc = 0x24,
3150 },
3151 [TCM_INVALID_PARAMETER_LIST] = {
3152 .key = ILLEGAL_REQUEST,
3153 .asc = 0x26,
3154 },
3155 [TCM_TOO_MANY_TARGET_DESCS] = {
3156 .key = ILLEGAL_REQUEST,
3157 .asc = 0x26,
3158 .ascq = 0x06,
3159 },
3160 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3161 .key = ILLEGAL_REQUEST,
3162 .asc = 0x26,
3163 .ascq = 0x07,
3164 },
3165 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3166 .key = ILLEGAL_REQUEST,
3167 .asc = 0x26,
3168 .ascq = 0x08,
3169 },
3170 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3171 .key = ILLEGAL_REQUEST,
3172 .asc = 0x26,
3173 .ascq = 0x09,
3174 },
3175 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3176 .key = ILLEGAL_REQUEST,
3177 .asc = 0x1a,
3178 },
3179 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3180 .key = ILLEGAL_REQUEST,
3181 .asc = 0x0c,
3182 .ascq = 0x0c,
3183 },
3184 [TCM_SERVICE_CRC_ERROR] = {
3185 .key = ABORTED_COMMAND,
3186 .asc = 0x47,
3187 .ascq = 0x05,
3188 },
3189 [TCM_SNACK_REJECTED] = {
3190 .key = ABORTED_COMMAND,
3191 .asc = 0x11,
3192 .ascq = 0x13,
3193 },
3194 [TCM_WRITE_PROTECTED] = {
3195 .key = DATA_PROTECT,
3196 .asc = 0x27,
3197 },
3198 [TCM_ADDRESS_OUT_OF_RANGE] = {
3199 .key = ILLEGAL_REQUEST,
3200 .asc = 0x21,
3201 },
3202 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3203 .key = UNIT_ATTENTION,
3204 },
3205 [TCM_CHECK_CONDITION_NOT_READY] = {
3206 .key = NOT_READY,
3207 },
3208 [TCM_MISCOMPARE_VERIFY] = {
3209 .key = MISCOMPARE,
3210 .asc = 0x1d,
3211 .ascq = 0x00,
3212 },
3213 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3214 .key = ABORTED_COMMAND,
3215 .asc = 0x10,
3216 .ascq = 0x01,
3217 .add_sector_info = true,
3218 },
3219 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3220 .key = ABORTED_COMMAND,
3221 .asc = 0x10,
3222 .ascq = 0x02,
3223 .add_sector_info = true,
3224 },
3225 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3226 .key = ABORTED_COMMAND,
3227 .asc = 0x10,
3228 .ascq = 0x03,
3229 .add_sector_info = true,
3230 },
3231 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3232 .key = COPY_ABORTED,
3233 .asc = 0x0d,
3234 .ascq = 0x02,
3235
3236 },
3237 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3238
3239
3240
3241
3242
3243
3244 .key = NOT_READY,
3245 .asc = 0x08,
3246 },
3247 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258 .key = ILLEGAL_REQUEST,
3259 .asc = 0x55,
3260 .ascq = 0x04,
3261 },
3262};
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3276{
3277 const struct sense_info *si;
3278 u8 *buffer = cmd->sense_buffer;
3279 int r = (__force int)reason;
3280 u8 key, asc, ascq;
3281 bool desc_format = target_sense_desc_format(cmd->se_dev);
3282
3283 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3284 si = &sense_info_table[r];
3285 else
3286 si = &sense_info_table[(__force int)
3287 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3288
3289 key = si->key;
3290 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3291 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3292 &ascq)) {
3293 cmd->scsi_status = SAM_STAT_BUSY;
3294 return;
3295 }
3296 } else if (si->asc == 0) {
3297 WARN_ON_ONCE(cmd->scsi_asc == 0);
3298 asc = cmd->scsi_asc;
3299 ascq = cmd->scsi_ascq;
3300 } else {
3301 asc = si->asc;
3302 ascq = si->ascq;
3303 }
3304
3305 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3306 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3307 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3308 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3309 if (si->add_sector_info)
3310 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3311 cmd->scsi_sense_length,
3312 cmd->bad_sector) < 0);
3313}
3314
3315int
3316transport_send_check_condition_and_sense(struct se_cmd *cmd,
3317 sense_reason_t reason, int from_transport)
3318{
3319 unsigned long flags;
3320
3321 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3322
3323 spin_lock_irqsave(&cmd->t_state_lock, flags);
3324 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3325 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3326 return 0;
3327 }
3328 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3329 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3330
3331 if (!from_transport)
3332 translate_sense_reason(cmd, reason);
3333
3334 trace_target_cmd_complete(cmd);
3335 return cmd->se_tfo->queue_status(cmd);
3336}
3337EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3338
3339
3340
3341
3342
3343
3344
3345int target_send_busy(struct se_cmd *cmd)
3346{
3347 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3348
3349 cmd->scsi_status = SAM_STAT_BUSY;
3350 trace_target_cmd_complete(cmd);
3351 return cmd->se_tfo->queue_status(cmd);
3352}
3353EXPORT_SYMBOL(target_send_busy);
3354
3355static void target_tmr_work(struct work_struct *work)
3356{
3357 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3358 struct se_device *dev = cmd->se_dev;
3359 struct se_tmr_req *tmr = cmd->se_tmr_req;
3360 int ret;
3361
3362 if (cmd->transport_state & CMD_T_ABORTED)
3363 goto aborted;
3364
3365 switch (tmr->function) {
3366 case TMR_ABORT_TASK:
3367 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3368 break;
3369 case TMR_ABORT_TASK_SET:
3370 case TMR_CLEAR_ACA:
3371 case TMR_CLEAR_TASK_SET:
3372 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3373 break;
3374 case TMR_LUN_RESET:
3375 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3376 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3377 TMR_FUNCTION_REJECTED;
3378 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3379 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3380 cmd->orig_fe_lun, 0x29,
3381 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3382 }
3383 break;
3384 case TMR_TARGET_WARM_RESET:
3385 tmr->response = TMR_FUNCTION_REJECTED;
3386 break;
3387 case TMR_TARGET_COLD_RESET:
3388 tmr->response = TMR_FUNCTION_REJECTED;
3389 break;
3390 default:
3391 pr_err("Unknown TMR function: 0x%02x.\n",
3392 tmr->function);
3393 tmr->response = TMR_FUNCTION_REJECTED;
3394 break;
3395 }
3396
3397 if (cmd->transport_state & CMD_T_ABORTED)
3398 goto aborted;
3399
3400 cmd->se_tfo->queue_tm_rsp(cmd);
3401
3402 transport_lun_remove_cmd(cmd);
3403 transport_cmd_check_stop_to_fabric(cmd);
3404 return;
3405
3406aborted:
3407 target_handle_abort(cmd);
3408}
3409
3410int transport_generic_handle_tmr(
3411 struct se_cmd *cmd)
3412{
3413 unsigned long flags;
3414 bool aborted = false;
3415
3416 spin_lock_irqsave(&cmd->t_state_lock, flags);
3417 if (cmd->transport_state & CMD_T_ABORTED) {
3418 aborted = true;
3419 } else {
3420 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3421 cmd->transport_state |= CMD_T_ACTIVE;
3422 }
3423 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3424
3425 if (aborted) {
3426 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3427 cmd->se_tmr_req->function,
3428 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3429 target_handle_abort(cmd);
3430 return 0;
3431 }
3432
3433 INIT_WORK(&cmd->work, target_tmr_work);
3434 schedule_work(&cmd->work);
3435 return 0;
3436}
3437EXPORT_SYMBOL(transport_generic_handle_tmr);
3438
3439bool
3440target_check_wce(struct se_device *dev)
3441{
3442 bool wce = false;
3443
3444 if (dev->transport->get_write_cache)
3445 wce = dev->transport->get_write_cache(dev);
3446 else if (dev->dev_attrib.emulate_write_cache > 0)
3447 wce = true;
3448
3449 return wce;
3450}
3451
3452bool
3453target_check_fua(struct se_device *dev)
3454{
3455 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3456}
3457