1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
35#include <linux/module.h>
36#include <linux/ratelimit.h>
37#include <linux/vmalloc.h>
38#include <asm/unaligned.h>
39#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi_proto.h>
42#include <scsi/scsi_common.h>
43
44#include <target/target_core_base.h>
45#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
47
48#include "target_core_internal.h"
49#include "target_core_alua.h"
50#include "target_core_pr.h"
51#include "target_core_ua.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
56static struct workqueue_struct *target_completion_wq;
57static struct kmem_cache *se_sess_cache;
58struct kmem_cache *se_ua_cache;
59struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
63struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66static void transport_complete_task_attr(struct se_cmd *cmd);
67static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev);
69static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work);
71
72int init_se_kmem_caches(void)
73{
74 se_sess_cache = kmem_cache_create("se_sess_cache",
75 sizeof(struct se_session), __alignof__(struct se_session),
76 0, NULL);
77 if (!se_sess_cache) {
78 pr_err("kmem_cache_create() for struct se_session"
79 " failed\n");
80 goto out;
81 }
82 se_ua_cache = kmem_cache_create("se_ua_cache",
83 sizeof(struct se_ua), __alignof__(struct se_ua),
84 0, NULL);
85 if (!se_ua_cache) {
86 pr_err("kmem_cache_create() for struct se_ua failed\n");
87 goto out_free_sess_cache;
88 }
89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90 sizeof(struct t10_pr_registration),
91 __alignof__(struct t10_pr_registration), 0, NULL);
92 if (!t10_pr_reg_cache) {
93 pr_err("kmem_cache_create() for struct t10_pr_registration"
94 " failed\n");
95 goto out_free_ua_cache;
96 }
97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99 0, NULL);
100 if (!t10_alua_lu_gp_cache) {
101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102 " failed\n");
103 goto out_free_pr_reg_cache;
104 }
105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106 sizeof(struct t10_alua_lu_gp_member),
107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108 if (!t10_alua_lu_gp_mem_cache) {
109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110 "cache failed\n");
111 goto out_free_lu_gp_cache;
112 }
113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114 sizeof(struct t10_alua_tg_pt_gp),
115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116 if (!t10_alua_tg_pt_gp_cache) {
117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118 "cache failed\n");
119 goto out_free_lu_gp_mem_cache;
120 }
121 t10_alua_lba_map_cache = kmem_cache_create(
122 "t10_alua_lba_map_cache",
123 sizeof(struct t10_alua_lba_map),
124 __alignof__(struct t10_alua_lba_map), 0, NULL);
125 if (!t10_alua_lba_map_cache) {
126 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127 "cache failed\n");
128 goto out_free_tg_pt_gp_cache;
129 }
130 t10_alua_lba_map_mem_cache = kmem_cache_create(
131 "t10_alua_lba_map_mem_cache",
132 sizeof(struct t10_alua_lba_map_member),
133 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134 if (!t10_alua_lba_map_mem_cache) {
135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136 "cache failed\n");
137 goto out_free_lba_map_cache;
138 }
139
140 target_completion_wq = alloc_workqueue("target_completion",
141 WQ_MEM_RECLAIM, 0);
142 if (!target_completion_wq)
143 goto out_free_lba_map_mem_cache;
144
145 return 0;
146
147out_free_lba_map_mem_cache:
148 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149out_free_lba_map_cache:
150 kmem_cache_destroy(t10_alua_lba_map_cache);
151out_free_tg_pt_gp_cache:
152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153out_free_lu_gp_mem_cache:
154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155out_free_lu_gp_cache:
156 kmem_cache_destroy(t10_alua_lu_gp_cache);
157out_free_pr_reg_cache:
158 kmem_cache_destroy(t10_pr_reg_cache);
159out_free_ua_cache:
160 kmem_cache_destroy(se_ua_cache);
161out_free_sess_cache:
162 kmem_cache_destroy(se_sess_cache);
163out:
164 return -ENOMEM;
165}
166
167void release_se_kmem_caches(void)
168{
169 destroy_workqueue(target_completion_wq);
170 kmem_cache_destroy(se_sess_cache);
171 kmem_cache_destroy(se_ua_cache);
172 kmem_cache_destroy(t10_pr_reg_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_cache);
174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176 kmem_cache_destroy(t10_alua_lba_map_cache);
177 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178}
179
180
181static DEFINE_SPINLOCK(scsi_mib_index_lock);
182static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184
185
186
187u32 scsi_get_new_index(scsi_index_t type)
188{
189 u32 new_index;
190
191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193 spin_lock(&scsi_mib_index_lock);
194 new_index = ++scsi_mib_index[type];
195 spin_unlock(&scsi_mib_index_lock);
196
197 return new_index;
198}
199
200void transport_subsystem_check_init(void)
201{
202 int ret;
203 static int sub_api_initialized;
204
205 if (sub_api_initialized)
206 return;
207
208 ret = request_module("target_core_iblock");
209 if (ret != 0)
210 pr_err("Unable to load target_core_iblock\n");
211
212 ret = request_module("target_core_file");
213 if (ret != 0)
214 pr_err("Unable to load target_core_file\n");
215
216 ret = request_module("target_core_pscsi");
217 if (ret != 0)
218 pr_err("Unable to load target_core_pscsi\n");
219
220 ret = request_module("target_core_user");
221 if (ret != 0)
222 pr_err("Unable to load target_core_user\n");
223
224 sub_api_initialized = 1;
225}
226
227struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228{
229 struct se_session *se_sess;
230
231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232 if (!se_sess) {
233 pr_err("Unable to allocate struct se_session from"
234 " se_sess_cache\n");
235 return ERR_PTR(-ENOMEM);
236 }
237 INIT_LIST_HEAD(&se_sess->sess_list);
238 INIT_LIST_HEAD(&se_sess->sess_acl_list);
239 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240 INIT_LIST_HEAD(&se_sess->sess_wait_list);
241 spin_lock_init(&se_sess->sess_cmd_lock);
242 se_sess->sup_prot_ops = sup_prot_ops;
243
244 return se_sess;
245}
246EXPORT_SYMBOL(transport_init_session);
247
248int transport_alloc_session_tags(struct se_session *se_sess,
249 unsigned int tag_num, unsigned int tag_size)
250{
251 int rc;
252
253 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
254 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
255 if (!se_sess->sess_cmd_map) {
256 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
257 if (!se_sess->sess_cmd_map) {
258 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
259 return -ENOMEM;
260 }
261 }
262
263 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
264 if (rc < 0) {
265 pr_err("Unable to init se_sess->sess_tag_pool,"
266 " tag_num: %u\n", tag_num);
267 kvfree(se_sess->sess_cmd_map);
268 se_sess->sess_cmd_map = NULL;
269 return -ENOMEM;
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL(transport_alloc_session_tags);
275
276struct se_session *transport_init_session_tags(unsigned int tag_num,
277 unsigned int tag_size,
278 enum target_prot_op sup_prot_ops)
279{
280 struct se_session *se_sess;
281 int rc;
282
283 if (tag_num != 0 && !tag_size) {
284 pr_err("init_session_tags called with percpu-ida tag_num:"
285 " %u, but zero tag_size\n", tag_num);
286 return ERR_PTR(-EINVAL);
287 }
288 if (!tag_num && tag_size) {
289 pr_err("init_session_tags called with percpu-ida tag_size:"
290 " %u, but zero tag_num\n", tag_size);
291 return ERR_PTR(-EINVAL);
292 }
293
294 se_sess = transport_init_session(sup_prot_ops);
295 if (IS_ERR(se_sess))
296 return se_sess;
297
298 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
299 if (rc < 0) {
300 transport_free_session(se_sess);
301 return ERR_PTR(-ENOMEM);
302 }
303
304 return se_sess;
305}
306EXPORT_SYMBOL(transport_init_session_tags);
307
308
309
310
311void __transport_register_session(
312 struct se_portal_group *se_tpg,
313 struct se_node_acl *se_nacl,
314 struct se_session *se_sess,
315 void *fabric_sess_ptr)
316{
317 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
318 unsigned char buf[PR_REG_ISID_LEN];
319
320 se_sess->se_tpg = se_tpg;
321 se_sess->fabric_sess_ptr = fabric_sess_ptr;
322
323
324
325
326
327
328 if (se_nacl) {
329
330
331
332
333
334
335
336
337
338
339 if (se_nacl->saved_prot_type)
340 se_sess->sess_prot_type = se_nacl->saved_prot_type;
341 else if (tfo->tpg_check_prot_fabric_only)
342 se_sess->sess_prot_type = se_nacl->saved_prot_type =
343 tfo->tpg_check_prot_fabric_only(se_tpg);
344
345
346
347
348 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
349 memset(&buf[0], 0, PR_REG_ISID_LEN);
350 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
351 &buf[0], PR_REG_ISID_LEN);
352 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
353 }
354
355 spin_lock_irq(&se_nacl->nacl_sess_lock);
356
357
358
359
360 se_nacl->nacl_sess = se_sess;
361
362 list_add_tail(&se_sess->sess_acl_list,
363 &se_nacl->acl_sess_list);
364 spin_unlock_irq(&se_nacl->nacl_sess_lock);
365 }
366 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
367
368 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
369 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
370}
371EXPORT_SYMBOL(__transport_register_session);
372
373void transport_register_session(
374 struct se_portal_group *se_tpg,
375 struct se_node_acl *se_nacl,
376 struct se_session *se_sess,
377 void *fabric_sess_ptr)
378{
379 unsigned long flags;
380
381 spin_lock_irqsave(&se_tpg->session_lock, flags);
382 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
383 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
384}
385EXPORT_SYMBOL(transport_register_session);
386
387struct se_session *
388target_alloc_session(struct se_portal_group *tpg,
389 unsigned int tag_num, unsigned int tag_size,
390 enum target_prot_op prot_op,
391 const char *initiatorname, void *private,
392 int (*callback)(struct se_portal_group *,
393 struct se_session *, void *))
394{
395 struct se_session *sess;
396
397
398
399
400
401 if (tag_num != 0)
402 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
403 else
404 sess = transport_init_session(prot_op);
405
406 if (IS_ERR(sess))
407 return sess;
408
409 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
410 (unsigned char *)initiatorname);
411 if (!sess->se_node_acl) {
412 transport_free_session(sess);
413 return ERR_PTR(-EACCES);
414 }
415
416
417
418
419 if (callback != NULL) {
420 int rc = callback(tpg, sess, private);
421 if (rc) {
422 transport_free_session(sess);
423 return ERR_PTR(rc);
424 }
425 }
426
427 transport_register_session(tpg, sess->se_node_acl, sess, private);
428 return sess;
429}
430EXPORT_SYMBOL(target_alloc_session);
431
432ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
433{
434 struct se_session *se_sess;
435 ssize_t len = 0;
436
437 spin_lock_bh(&se_tpg->session_lock);
438 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
439 if (!se_sess->se_node_acl)
440 continue;
441 if (!se_sess->se_node_acl->dynamic_node_acl)
442 continue;
443 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
444 break;
445
446 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
447 se_sess->se_node_acl->initiatorname);
448 len += 1;
449 }
450 spin_unlock_bh(&se_tpg->session_lock);
451
452 return len;
453}
454EXPORT_SYMBOL(target_show_dynamic_sessions);
455
456static void target_complete_nacl(struct kref *kref)
457{
458 struct se_node_acl *nacl = container_of(kref,
459 struct se_node_acl, acl_kref);
460
461 complete(&nacl->acl_free_comp);
462}
463
464void target_put_nacl(struct se_node_acl *nacl)
465{
466 kref_put(&nacl->acl_kref, target_complete_nacl);
467}
468EXPORT_SYMBOL(target_put_nacl);
469
470void transport_deregister_session_configfs(struct se_session *se_sess)
471{
472 struct se_node_acl *se_nacl;
473 unsigned long flags;
474
475
476
477 se_nacl = se_sess->se_node_acl;
478 if (se_nacl) {
479 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
480 if (!list_empty(&se_sess->sess_acl_list))
481 list_del_init(&se_sess->sess_acl_list);
482
483
484
485
486
487 if (list_empty(&se_nacl->acl_sess_list))
488 se_nacl->nacl_sess = NULL;
489 else {
490 se_nacl->nacl_sess = container_of(
491 se_nacl->acl_sess_list.prev,
492 struct se_session, sess_acl_list);
493 }
494 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
495 }
496}
497EXPORT_SYMBOL(transport_deregister_session_configfs);
498
499void transport_free_session(struct se_session *se_sess)
500{
501 struct se_node_acl *se_nacl = se_sess->se_node_acl;
502
503
504
505
506 if (se_nacl) {
507 se_sess->se_node_acl = NULL;
508 target_put_nacl(se_nacl);
509 }
510 if (se_sess->sess_cmd_map) {
511 percpu_ida_destroy(&se_sess->sess_tag_pool);
512 kvfree(se_sess->sess_cmd_map);
513 }
514 kmem_cache_free(se_sess_cache, se_sess);
515}
516EXPORT_SYMBOL(transport_free_session);
517
518void transport_deregister_session(struct se_session *se_sess)
519{
520 struct se_portal_group *se_tpg = se_sess->se_tpg;
521 const struct target_core_fabric_ops *se_tfo;
522 struct se_node_acl *se_nacl;
523 unsigned long flags;
524 bool drop_nacl = false;
525
526 if (!se_tpg) {
527 transport_free_session(se_sess);
528 return;
529 }
530 se_tfo = se_tpg->se_tpg_tfo;
531
532 spin_lock_irqsave(&se_tpg->session_lock, flags);
533 list_del(&se_sess->sess_list);
534 se_sess->se_tpg = NULL;
535 se_sess->fabric_sess_ptr = NULL;
536 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
537
538
539
540
541
542 se_nacl = se_sess->se_node_acl;
543
544 mutex_lock(&se_tpg->acl_node_mutex);
545 if (se_nacl && se_nacl->dynamic_node_acl) {
546 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
547 list_del(&se_nacl->acl_list);
548 drop_nacl = true;
549 }
550 }
551 mutex_unlock(&se_tpg->acl_node_mutex);
552
553 if (drop_nacl) {
554 core_tpg_wait_for_nacl_pr_ref(se_nacl);
555 core_free_device_list_for_node(se_nacl, se_tpg);
556 se_sess->se_node_acl = NULL;
557 kfree(se_nacl);
558 }
559 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
560 se_tpg->se_tpg_tfo->get_fabric_name());
561
562
563
564
565
566
567 transport_free_session(se_sess);
568}
569EXPORT_SYMBOL(transport_deregister_session);
570
571static void target_remove_from_state_list(struct se_cmd *cmd)
572{
573 struct se_device *dev = cmd->se_dev;
574 unsigned long flags;
575
576 if (!dev)
577 return;
578
579 if (cmd->transport_state & CMD_T_BUSY)
580 return;
581
582 spin_lock_irqsave(&dev->execute_task_lock, flags);
583 if (cmd->state_active) {
584 list_del(&cmd->state_list);
585 cmd->state_active = false;
586 }
587 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
588}
589
590static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
591 bool write_pending)
592{
593 unsigned long flags;
594
595 if (remove_from_lists) {
596 target_remove_from_state_list(cmd);
597
598
599
600
601 cmd->se_lun = NULL;
602 }
603
604 spin_lock_irqsave(&cmd->t_state_lock, flags);
605 if (write_pending)
606 cmd->t_state = TRANSPORT_WRITE_PENDING;
607
608
609
610
611
612 if (cmd->transport_state & CMD_T_STOP) {
613 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
614 __func__, __LINE__, cmd->tag);
615
616 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
617
618 complete_all(&cmd->t_transport_stop_comp);
619 return 1;
620 }
621
622 cmd->transport_state &= ~CMD_T_ACTIVE;
623 if (remove_from_lists) {
624
625
626
627
628
629
630
631
632
633 if (cmd->se_tfo->check_stop_free != NULL) {
634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
635 return cmd->se_tfo->check_stop_free(cmd);
636 }
637 }
638
639 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
640 return 0;
641}
642
643static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
644{
645 return transport_cmd_check_stop(cmd, true, false);
646}
647
648static void transport_lun_remove_cmd(struct se_cmd *cmd)
649{
650 struct se_lun *lun = cmd->se_lun;
651
652 if (!lun)
653 return;
654
655 if (cmpxchg(&cmd->lun_ref_active, true, false))
656 percpu_ref_put(&lun->lun_ref);
657}
658
659void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
660{
661 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
662
663 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
664 transport_lun_remove_cmd(cmd);
665
666
667
668
669 if (remove)
670 cmd->se_tfo->aborted_task(cmd);
671
672 if (transport_cmd_check_stop_to_fabric(cmd))
673 return;
674 if (remove && ack_kref)
675 transport_put_cmd(cmd);
676}
677
678static void target_complete_failure_work(struct work_struct *work)
679{
680 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
681
682 transport_generic_request_failure(cmd,
683 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
684}
685
686
687
688
689
690static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
691{
692 struct se_device *dev = cmd->se_dev;
693
694 WARN_ON(!cmd->se_lun);
695
696 if (!dev)
697 return NULL;
698
699 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
700 return NULL;
701
702 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
703
704 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
705 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
706 return cmd->sense_buffer;
707}
708
709void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
710{
711 struct se_device *dev = cmd->se_dev;
712 int success = scsi_status == GOOD;
713 unsigned long flags;
714
715 cmd->scsi_status = scsi_status;
716
717
718 spin_lock_irqsave(&cmd->t_state_lock, flags);
719 cmd->transport_state &= ~CMD_T_BUSY;
720
721 if (dev && dev->transport->transport_complete) {
722 dev->transport->transport_complete(cmd,
723 cmd->t_data_sg,
724 transport_get_sense_buffer(cmd));
725 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
726 success = 1;
727 }
728
729
730
731
732
733 if (cmd->transport_state & CMD_T_ABORTED ||
734 cmd->transport_state & CMD_T_STOP) {
735 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
736 complete_all(&cmd->t_transport_stop_comp);
737 return;
738 } else if (!success) {
739 INIT_WORK(&cmd->work, target_complete_failure_work);
740 } else {
741 INIT_WORK(&cmd->work, target_complete_ok_work);
742 }
743
744 cmd->t_state = TRANSPORT_COMPLETE;
745 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
746 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
747
748 if (cmd->se_cmd_flags & SCF_USE_CPUID)
749 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
750 else
751 queue_work(target_completion_wq, &cmd->work);
752}
753EXPORT_SYMBOL(target_complete_cmd);
754
755void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
756{
757 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
758 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
759 cmd->residual_count += cmd->data_length - length;
760 } else {
761 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
762 cmd->residual_count = cmd->data_length - length;
763 }
764
765 cmd->data_length = length;
766 }
767
768 target_complete_cmd(cmd, scsi_status);
769}
770EXPORT_SYMBOL(target_complete_cmd_with_length);
771
772static void target_add_to_state_list(struct se_cmd *cmd)
773{
774 struct se_device *dev = cmd->se_dev;
775 unsigned long flags;
776
777 spin_lock_irqsave(&dev->execute_task_lock, flags);
778 if (!cmd->state_active) {
779 list_add_tail(&cmd->state_list, &dev->state_list);
780 cmd->state_active = true;
781 }
782 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
783}
784
785
786
787
788static void transport_write_pending_qf(struct se_cmd *cmd);
789static void transport_complete_qf(struct se_cmd *cmd);
790
791void target_qf_do_work(struct work_struct *work)
792{
793 struct se_device *dev = container_of(work, struct se_device,
794 qf_work_queue);
795 LIST_HEAD(qf_cmd_list);
796 struct se_cmd *cmd, *cmd_tmp;
797
798 spin_lock_irq(&dev->qf_cmd_lock);
799 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
800 spin_unlock_irq(&dev->qf_cmd_lock);
801
802 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
803 list_del(&cmd->se_qf_node);
804 atomic_dec_mb(&dev->dev_qf_count);
805
806 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
807 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
808 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
809 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
810 : "UNKNOWN");
811
812 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
813 transport_write_pending_qf(cmd);
814 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
815 transport_complete_qf(cmd);
816 }
817}
818
819unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
820{
821 switch (cmd->data_direction) {
822 case DMA_NONE:
823 return "NONE";
824 case DMA_FROM_DEVICE:
825 return "READ";
826 case DMA_TO_DEVICE:
827 return "WRITE";
828 case DMA_BIDIRECTIONAL:
829 return "BIDI";
830 default:
831 break;
832 }
833
834 return "UNKNOWN";
835}
836
837void transport_dump_dev_state(
838 struct se_device *dev,
839 char *b,
840 int *bl)
841{
842 *bl += sprintf(b + *bl, "Status: ");
843 if (dev->export_count)
844 *bl += sprintf(b + *bl, "ACTIVATED");
845 else
846 *bl += sprintf(b + *bl, "DEACTIVATED");
847
848 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
849 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
850 dev->dev_attrib.block_size,
851 dev->dev_attrib.hw_max_sectors);
852 *bl += sprintf(b + *bl, " ");
853}
854
855void transport_dump_vpd_proto_id(
856 struct t10_vpd *vpd,
857 unsigned char *p_buf,
858 int p_buf_len)
859{
860 unsigned char buf[VPD_TMP_BUF_SIZE];
861 int len;
862
863 memset(buf, 0, VPD_TMP_BUF_SIZE);
864 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
865
866 switch (vpd->protocol_identifier) {
867 case 0x00:
868 sprintf(buf+len, "Fibre Channel\n");
869 break;
870 case 0x10:
871 sprintf(buf+len, "Parallel SCSI\n");
872 break;
873 case 0x20:
874 sprintf(buf+len, "SSA\n");
875 break;
876 case 0x30:
877 sprintf(buf+len, "IEEE 1394\n");
878 break;
879 case 0x40:
880 sprintf(buf+len, "SCSI Remote Direct Memory Access"
881 " Protocol\n");
882 break;
883 case 0x50:
884 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
885 break;
886 case 0x60:
887 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
888 break;
889 case 0x70:
890 sprintf(buf+len, "Automation/Drive Interface Transport"
891 " Protocol\n");
892 break;
893 case 0x80:
894 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
895 break;
896 default:
897 sprintf(buf+len, "Unknown 0x%02x\n",
898 vpd->protocol_identifier);
899 break;
900 }
901
902 if (p_buf)
903 strncpy(p_buf, buf, p_buf_len);
904 else
905 pr_debug("%s", buf);
906}
907
908void
909transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
910{
911
912
913
914
915
916 if (page_83[1] & 0x80) {
917 vpd->protocol_identifier = (page_83[0] & 0xf0);
918 vpd->protocol_identifier_set = 1;
919 transport_dump_vpd_proto_id(vpd, NULL, 0);
920 }
921}
922EXPORT_SYMBOL(transport_set_vpd_proto_id);
923
924int transport_dump_vpd_assoc(
925 struct t10_vpd *vpd,
926 unsigned char *p_buf,
927 int p_buf_len)
928{
929 unsigned char buf[VPD_TMP_BUF_SIZE];
930 int ret = 0;
931 int len;
932
933 memset(buf, 0, VPD_TMP_BUF_SIZE);
934 len = sprintf(buf, "T10 VPD Identifier Association: ");
935
936 switch (vpd->association) {
937 case 0x00:
938 sprintf(buf+len, "addressed logical unit\n");
939 break;
940 case 0x10:
941 sprintf(buf+len, "target port\n");
942 break;
943 case 0x20:
944 sprintf(buf+len, "SCSI target device\n");
945 break;
946 default:
947 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
948 ret = -EINVAL;
949 break;
950 }
951
952 if (p_buf)
953 strncpy(p_buf, buf, p_buf_len);
954 else
955 pr_debug("%s", buf);
956
957 return ret;
958}
959
960int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
961{
962
963
964
965
966
967 vpd->association = (page_83[1] & 0x30);
968 return transport_dump_vpd_assoc(vpd, NULL, 0);
969}
970EXPORT_SYMBOL(transport_set_vpd_assoc);
971
972int transport_dump_vpd_ident_type(
973 struct t10_vpd *vpd,
974 unsigned char *p_buf,
975 int p_buf_len)
976{
977 unsigned char buf[VPD_TMP_BUF_SIZE];
978 int ret = 0;
979 int len;
980
981 memset(buf, 0, VPD_TMP_BUF_SIZE);
982 len = sprintf(buf, "T10 VPD Identifier Type: ");
983
984 switch (vpd->device_identifier_type) {
985 case 0x00:
986 sprintf(buf+len, "Vendor specific\n");
987 break;
988 case 0x01:
989 sprintf(buf+len, "T10 Vendor ID based\n");
990 break;
991 case 0x02:
992 sprintf(buf+len, "EUI-64 based\n");
993 break;
994 case 0x03:
995 sprintf(buf+len, "NAA\n");
996 break;
997 case 0x04:
998 sprintf(buf+len, "Relative target port identifier\n");
999 break;
1000 case 0x08:
1001 sprintf(buf+len, "SCSI name string\n");
1002 break;
1003 default:
1004 sprintf(buf+len, "Unsupported: 0x%02x\n",
1005 vpd->device_identifier_type);
1006 ret = -EINVAL;
1007 break;
1008 }
1009
1010 if (p_buf) {
1011 if (p_buf_len < strlen(buf)+1)
1012 return -EINVAL;
1013 strncpy(p_buf, buf, p_buf_len);
1014 } else {
1015 pr_debug("%s", buf);
1016 }
1017
1018 return ret;
1019}
1020
1021int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1022{
1023
1024
1025
1026
1027
1028 vpd->device_identifier_type = (page_83[1] & 0x0f);
1029 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1030}
1031EXPORT_SYMBOL(transport_set_vpd_ident_type);
1032
1033int transport_dump_vpd_ident(
1034 struct t10_vpd *vpd,
1035 unsigned char *p_buf,
1036 int p_buf_len)
1037{
1038 unsigned char buf[VPD_TMP_BUF_SIZE];
1039 int ret = 0;
1040
1041 memset(buf, 0, VPD_TMP_BUF_SIZE);
1042
1043 switch (vpd->device_identifier_code_set) {
1044 case 0x01:
1045 snprintf(buf, sizeof(buf),
1046 "T10 VPD Binary Device Identifier: %s\n",
1047 &vpd->device_identifier[0]);
1048 break;
1049 case 0x02:
1050 snprintf(buf, sizeof(buf),
1051 "T10 VPD ASCII Device Identifier: %s\n",
1052 &vpd->device_identifier[0]);
1053 break;
1054 case 0x03:
1055 snprintf(buf, sizeof(buf),
1056 "T10 VPD UTF-8 Device Identifier: %s\n",
1057 &vpd->device_identifier[0]);
1058 break;
1059 default:
1060 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1061 " 0x%02x", vpd->device_identifier_code_set);
1062 ret = -EINVAL;
1063 break;
1064 }
1065
1066 if (p_buf)
1067 strncpy(p_buf, buf, p_buf_len);
1068 else
1069 pr_debug("%s", buf);
1070
1071 return ret;
1072}
1073
1074int
1075transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1076{
1077 static const char hex_str[] = "0123456789abcdef";
1078 int j = 0, i = 4;
1079
1080
1081
1082
1083
1084
1085 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1086 switch (vpd->device_identifier_code_set) {
1087 case 0x01:
1088 vpd->device_identifier[j++] =
1089 hex_str[vpd->device_identifier_type];
1090 while (i < (4 + page_83[3])) {
1091 vpd->device_identifier[j++] =
1092 hex_str[(page_83[i] & 0xf0) >> 4];
1093 vpd->device_identifier[j++] =
1094 hex_str[page_83[i] & 0x0f];
1095 i++;
1096 }
1097 break;
1098 case 0x02:
1099 case 0x03:
1100 while (i < (4 + page_83[3]))
1101 vpd->device_identifier[j++] = page_83[i++];
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 return transport_dump_vpd_ident(vpd, NULL, 0);
1108}
1109EXPORT_SYMBOL(transport_set_vpd_ident);
1110
1111static sense_reason_t
1112target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1113 unsigned int size)
1114{
1115 u32 mtl;
1116
1117 if (!cmd->se_tfo->max_data_sg_nents)
1118 return TCM_NO_SENSE;
1119
1120
1121
1122
1123
1124
1125 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1126 if (cmd->data_length > mtl) {
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1139 cmd->residual_count = (size - mtl);
1140 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1141 u32 orig_dl = size + cmd->residual_count;
1142 cmd->residual_count = (orig_dl - mtl);
1143 } else {
1144 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1145 cmd->residual_count = (cmd->data_length - mtl);
1146 }
1147 cmd->data_length = mtl;
1148
1149
1150
1151
1152 if (cmd->prot_length) {
1153 u32 sectors = (mtl / dev->dev_attrib.block_size);
1154 cmd->prot_length = dev->prot_length * sectors;
1155 }
1156 }
1157 return TCM_NO_SENSE;
1158}
1159
1160sense_reason_t
1161target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1162{
1163 struct se_device *dev = cmd->se_dev;
1164
1165 if (cmd->unknown_data_length) {
1166 cmd->data_length = size;
1167 } else if (size != cmd->data_length) {
1168 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1169 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1170 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1171 cmd->data_length, size, cmd->t_task_cdb[0]);
1172
1173 if (cmd->data_direction == DMA_TO_DEVICE &&
1174 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1175 pr_err("Rejecting underflow/overflow WRITE data\n");
1176 return TCM_INVALID_CDB_FIELD;
1177 }
1178
1179
1180
1181
1182 if (dev->dev_attrib.block_size != 512) {
1183 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1184 " CDB on non 512-byte sector setup subsystem"
1185 " plugin: %s\n", dev->transport->name);
1186
1187 return TCM_INVALID_CDB_FIELD;
1188 }
1189
1190
1191
1192
1193
1194
1195 if (size > cmd->data_length) {
1196 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1197 cmd->residual_count = (size - cmd->data_length);
1198 } else {
1199 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1200 cmd->residual_count = (cmd->data_length - size);
1201 cmd->data_length = size;
1202 }
1203 }
1204
1205 return target_check_max_data_sg_nents(cmd, dev, size);
1206
1207}
1208
1209
1210
1211
1212
1213
1214
1215void transport_init_se_cmd(
1216 struct se_cmd *cmd,
1217 const struct target_core_fabric_ops *tfo,
1218 struct se_session *se_sess,
1219 u32 data_length,
1220 int data_direction,
1221 int task_attr,
1222 unsigned char *sense_buffer)
1223{
1224 INIT_LIST_HEAD(&cmd->se_delayed_node);
1225 INIT_LIST_HEAD(&cmd->se_qf_node);
1226 INIT_LIST_HEAD(&cmd->se_cmd_list);
1227 INIT_LIST_HEAD(&cmd->state_list);
1228 init_completion(&cmd->t_transport_stop_comp);
1229 init_completion(&cmd->cmd_wait_comp);
1230 spin_lock_init(&cmd->t_state_lock);
1231 kref_init(&cmd->cmd_kref);
1232 cmd->transport_state = CMD_T_DEV_ACTIVE;
1233
1234 cmd->se_tfo = tfo;
1235 cmd->se_sess = se_sess;
1236 cmd->data_length = data_length;
1237 cmd->data_direction = data_direction;
1238 cmd->sam_task_attr = task_attr;
1239 cmd->sense_buffer = sense_buffer;
1240
1241 cmd->state_active = false;
1242}
1243EXPORT_SYMBOL(transport_init_se_cmd);
1244
1245static sense_reason_t
1246transport_check_alloc_task_attr(struct se_cmd *cmd)
1247{
1248 struct se_device *dev = cmd->se_dev;
1249
1250
1251
1252
1253
1254 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1255 return 0;
1256
1257 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1258 pr_debug("SAM Task Attribute ACA"
1259 " emulation is not supported\n");
1260 return TCM_INVALID_CDB_FIELD;
1261 }
1262
1263 return 0;
1264}
1265
1266sense_reason_t
1267target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1268{
1269 struct se_device *dev = cmd->se_dev;
1270 sense_reason_t ret;
1271
1272
1273
1274
1275
1276 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1277 pr_err("Received SCSI CDB with command_size: %d that"
1278 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1279 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1280 return TCM_INVALID_CDB_FIELD;
1281 }
1282
1283
1284
1285
1286
1287 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1288 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1289 GFP_KERNEL);
1290 if (!cmd->t_task_cdb) {
1291 pr_err("Unable to allocate cmd->t_task_cdb"
1292 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1293 scsi_command_size(cdb),
1294 (unsigned long)sizeof(cmd->__t_task_cdb));
1295 return TCM_OUT_OF_RESOURCES;
1296 }
1297 } else
1298 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1299
1300
1301
1302 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1303
1304 trace_target_sequencer_start(cmd);
1305
1306 ret = dev->transport->parse_cdb(cmd);
1307 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1308 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1309 cmd->se_tfo->get_fabric_name(),
1310 cmd->se_sess->se_node_acl->initiatorname,
1311 cmd->t_task_cdb[0]);
1312 if (ret)
1313 return ret;
1314
1315 ret = transport_check_alloc_task_attr(cmd);
1316 if (ret)
1317 return ret;
1318
1319 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1320 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1321 return 0;
1322}
1323EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1324
1325
1326
1327
1328
1329int transport_handle_cdb_direct(
1330 struct se_cmd *cmd)
1331{
1332 sense_reason_t ret;
1333
1334 if (!cmd->se_lun) {
1335 dump_stack();
1336 pr_err("cmd->se_lun is NULL\n");
1337 return -EINVAL;
1338 }
1339 if (in_interrupt()) {
1340 dump_stack();
1341 pr_err("transport_generic_handle_cdb cannot be called"
1342 " from interrupt context\n");
1343 return -EINVAL;
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353 cmd->t_state = TRANSPORT_NEW_CMD;
1354 cmd->transport_state |= CMD_T_ACTIVE;
1355
1356
1357
1358
1359
1360
1361 ret = transport_generic_new_cmd(cmd);
1362 if (ret)
1363 transport_generic_request_failure(cmd, ret);
1364 return 0;
1365}
1366EXPORT_SYMBOL(transport_handle_cdb_direct);
1367
1368sense_reason_t
1369transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1370 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1371{
1372 if (!sgl || !sgl_count)
1373 return 0;
1374
1375
1376
1377
1378
1379
1380 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1381 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1382 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1383 return TCM_INVALID_CDB_FIELD;
1384 }
1385
1386 cmd->t_data_sg = sgl;
1387 cmd->t_data_nents = sgl_count;
1388 cmd->t_bidi_data_sg = sgl_bidi;
1389 cmd->t_bidi_data_nents = sgl_bidi_count;
1390
1391 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1392 return 0;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1425 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1426 u32 data_length, int task_attr, int data_dir, int flags,
1427 struct scatterlist *sgl, u32 sgl_count,
1428 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1429 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1430{
1431 struct se_portal_group *se_tpg;
1432 sense_reason_t rc;
1433 int ret;
1434
1435 se_tpg = se_sess->se_tpg;
1436 BUG_ON(!se_tpg);
1437 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1438 BUG_ON(in_interrupt());
1439
1440
1441
1442
1443
1444 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1445 data_length, data_dir, task_attr, sense);
1446
1447 if (flags & TARGET_SCF_USE_CPUID)
1448 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1449 else
1450 se_cmd->cpuid = WORK_CPU_UNBOUND;
1451
1452 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1453 se_cmd->unknown_data_length = 1;
1454
1455
1456
1457
1458
1459
1460 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1461 if (ret)
1462 return ret;
1463
1464
1465
1466 if (flags & TARGET_SCF_BIDI_OP)
1467 se_cmd->se_cmd_flags |= SCF_BIDI;
1468
1469
1470
1471 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1472 if (rc) {
1473 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1474 target_put_sess_cmd(se_cmd);
1475 return 0;
1476 }
1477
1478 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1479 if (rc != 0) {
1480 transport_generic_request_failure(se_cmd, rc);
1481 return 0;
1482 }
1483
1484
1485
1486
1487
1488 if (sgl_prot_count) {
1489 se_cmd->t_prot_sg = sgl_prot;
1490 se_cmd->t_prot_nents = sgl_prot_count;
1491 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1492 }
1493
1494
1495
1496
1497
1498
1499 if (sgl_count != 0) {
1500 BUG_ON(!sgl);
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1511 se_cmd->data_direction == DMA_FROM_DEVICE) {
1512 unsigned char *buf = NULL;
1513
1514 if (sgl)
1515 buf = kmap(sg_page(sgl)) + sgl->offset;
1516
1517 if (buf) {
1518 memset(buf, 0, sgl->length);
1519 kunmap(sg_page(sgl));
1520 }
1521 }
1522
1523 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1524 sgl_bidi, sgl_bidi_count);
1525 if (rc != 0) {
1526 transport_generic_request_failure(se_cmd, rc);
1527 return 0;
1528 }
1529 }
1530
1531
1532
1533
1534
1535 core_alua_check_nonop_delay(se_cmd);
1536
1537 transport_handle_cdb_direct(se_cmd);
1538 return 0;
1539}
1540EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1567 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1568 u32 data_length, int task_attr, int data_dir, int flags)
1569{
1570 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1571 unpacked_lun, data_length, task_attr, data_dir,
1572 flags, NULL, 0, NULL, 0, NULL, 0);
1573}
1574EXPORT_SYMBOL(target_submit_cmd);
1575
1576static void target_complete_tmr_failure(struct work_struct *work)
1577{
1578 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1579
1580 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1581 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1582
1583 transport_cmd_check_stop_to_fabric(se_cmd);
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1604 unsigned char *sense, u64 unpacked_lun,
1605 void *fabric_tmr_ptr, unsigned char tm_type,
1606 gfp_t gfp, u64 tag, int flags)
1607{
1608 struct se_portal_group *se_tpg;
1609 int ret;
1610
1611 se_tpg = se_sess->se_tpg;
1612 BUG_ON(!se_tpg);
1613
1614 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1615 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1616
1617
1618
1619
1620 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1621 if (ret < 0)
1622 return -ENOMEM;
1623
1624 if (tm_type == TMR_ABORT_TASK)
1625 se_cmd->se_tmr_req->ref_task_tag = tag;
1626
1627
1628 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1629 if (ret) {
1630 core_tmr_release_req(se_cmd->se_tmr_req);
1631 return ret;
1632 }
1633
1634 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1635 if (ret) {
1636
1637
1638
1639
1640 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1641 schedule_work(&se_cmd->work);
1642 return 0;
1643 }
1644 transport_generic_handle_tmr(se_cmd);
1645 return 0;
1646}
1647EXPORT_SYMBOL(target_submit_tmr);
1648
1649
1650
1651
1652void transport_generic_request_failure(struct se_cmd *cmd,
1653 sense_reason_t sense_reason)
1654{
1655 int ret = 0, post_ret = 0;
1656
1657 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1658 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1659 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1660 cmd->se_tfo->get_cmd_state(cmd),
1661 cmd->t_state, sense_reason);
1662 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1663 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1664 (cmd->transport_state & CMD_T_STOP) != 0,
1665 (cmd->transport_state & CMD_T_SENT) != 0);
1666
1667
1668
1669
1670 transport_complete_task_attr(cmd);
1671
1672
1673
1674
1675 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1676 cmd->transport_complete_callback)
1677 cmd->transport_complete_callback(cmd, false, &post_ret);
1678
1679 switch (sense_reason) {
1680 case TCM_NON_EXISTENT_LUN:
1681 case TCM_UNSUPPORTED_SCSI_OPCODE:
1682 case TCM_INVALID_CDB_FIELD:
1683 case TCM_INVALID_PARAMETER_LIST:
1684 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1685 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1686 case TCM_UNKNOWN_MODE_PAGE:
1687 case TCM_WRITE_PROTECTED:
1688 case TCM_ADDRESS_OUT_OF_RANGE:
1689 case TCM_CHECK_CONDITION_ABORT_CMD:
1690 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1691 case TCM_CHECK_CONDITION_NOT_READY:
1692 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1693 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1694 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1695 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1696 break;
1697 case TCM_OUT_OF_RESOURCES:
1698 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1699 break;
1700 case TCM_RESERVATION_CONFLICT:
1701
1702
1703
1704
1705
1706
1707 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1708
1709
1710
1711
1712
1713
1714
1715 if (cmd->se_sess &&
1716 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1717 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1718 cmd->orig_fe_lun, 0x2C,
1719 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1720 }
1721 trace_target_cmd_complete(cmd);
1722 ret = cmd->se_tfo->queue_status(cmd);
1723 if (ret == -EAGAIN || ret == -ENOMEM)
1724 goto queue_full;
1725 goto check_stop;
1726 default:
1727 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1728 cmd->t_task_cdb[0], sense_reason);
1729 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1730 break;
1731 }
1732
1733 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1734 if (ret == -EAGAIN || ret == -ENOMEM)
1735 goto queue_full;
1736
1737check_stop:
1738 transport_lun_remove_cmd(cmd);
1739 transport_cmd_check_stop_to_fabric(cmd);
1740 return;
1741
1742queue_full:
1743 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1744 transport_handle_queue_full(cmd, cmd->se_dev);
1745}
1746EXPORT_SYMBOL(transport_generic_request_failure);
1747
1748void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1749{
1750 sense_reason_t ret;
1751
1752 if (!cmd->execute_cmd) {
1753 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1754 goto err;
1755 }
1756 if (do_checks) {
1757
1758
1759
1760
1761
1762
1763 ret = target_scsi3_ua_check(cmd);
1764 if (ret)
1765 goto err;
1766
1767 ret = target_alua_state_check(cmd);
1768 if (ret)
1769 goto err;
1770
1771 ret = target_check_reservation(cmd);
1772 if (ret) {
1773 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1774 goto err;
1775 }
1776 }
1777
1778 ret = cmd->execute_cmd(cmd);
1779 if (!ret)
1780 return;
1781err:
1782 spin_lock_irq(&cmd->t_state_lock);
1783 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1784 spin_unlock_irq(&cmd->t_state_lock);
1785
1786 transport_generic_request_failure(cmd, ret);
1787}
1788
1789static int target_write_prot_action(struct se_cmd *cmd)
1790{
1791 u32 sectors;
1792
1793
1794
1795
1796
1797 switch (cmd->prot_op) {
1798 case TARGET_PROT_DOUT_INSERT:
1799 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1800 sbc_dif_generate(cmd);
1801 break;
1802 case TARGET_PROT_DOUT_STRIP:
1803 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1804 break;
1805
1806 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1807 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1808 sectors, 0, cmd->t_prot_sg, 0);
1809 if (unlikely(cmd->pi_err)) {
1810 spin_lock_irq(&cmd->t_state_lock);
1811 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1812 spin_unlock_irq(&cmd->t_state_lock);
1813 transport_generic_request_failure(cmd, cmd->pi_err);
1814 return -1;
1815 }
1816 break;
1817 default:
1818 break;
1819 }
1820
1821 return 0;
1822}
1823
1824static bool target_handle_task_attr(struct se_cmd *cmd)
1825{
1826 struct se_device *dev = cmd->se_dev;
1827
1828 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1829 return false;
1830
1831 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1832
1833
1834
1835
1836
1837 switch (cmd->sam_task_attr) {
1838 case TCM_HEAD_TAG:
1839 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1840 cmd->t_task_cdb[0]);
1841 return false;
1842 case TCM_ORDERED_TAG:
1843 atomic_inc_mb(&dev->dev_ordered_sync);
1844
1845 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1846 cmd->t_task_cdb[0]);
1847
1848
1849
1850
1851
1852 if (!atomic_read(&dev->simple_cmds))
1853 return false;
1854 break;
1855 default:
1856
1857
1858
1859 atomic_inc_mb(&dev->simple_cmds);
1860 break;
1861 }
1862
1863 if (atomic_read(&dev->dev_ordered_sync) == 0)
1864 return false;
1865
1866 spin_lock(&dev->delayed_cmd_lock);
1867 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1868 spin_unlock(&dev->delayed_cmd_lock);
1869
1870 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1871 cmd->t_task_cdb[0], cmd->sam_task_attr);
1872 return true;
1873}
1874
1875static int __transport_check_aborted_status(struct se_cmd *, int);
1876
1877void target_execute_cmd(struct se_cmd *cmd)
1878{
1879
1880
1881
1882
1883
1884
1885 spin_lock_irq(&cmd->t_state_lock);
1886 if (__transport_check_aborted_status(cmd, 1)) {
1887 spin_unlock_irq(&cmd->t_state_lock);
1888 return;
1889 }
1890 if (cmd->transport_state & CMD_T_STOP) {
1891 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1892 __func__, __LINE__, cmd->tag);
1893
1894 spin_unlock_irq(&cmd->t_state_lock);
1895 complete_all(&cmd->t_transport_stop_comp);
1896 return;
1897 }
1898
1899 cmd->t_state = TRANSPORT_PROCESSING;
1900 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1901 spin_unlock_irq(&cmd->t_state_lock);
1902
1903 if (target_write_prot_action(cmd))
1904 return;
1905
1906 if (target_handle_task_attr(cmd)) {
1907 spin_lock_irq(&cmd->t_state_lock);
1908 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1909 spin_unlock_irq(&cmd->t_state_lock);
1910 return;
1911 }
1912
1913 __target_execute_cmd(cmd, true);
1914}
1915EXPORT_SYMBOL(target_execute_cmd);
1916
1917
1918
1919
1920
1921static void target_restart_delayed_cmds(struct se_device *dev)
1922{
1923 for (;;) {
1924 struct se_cmd *cmd;
1925
1926 spin_lock(&dev->delayed_cmd_lock);
1927 if (list_empty(&dev->delayed_cmd_list)) {
1928 spin_unlock(&dev->delayed_cmd_lock);
1929 break;
1930 }
1931
1932 cmd = list_entry(dev->delayed_cmd_list.next,
1933 struct se_cmd, se_delayed_node);
1934 list_del(&cmd->se_delayed_node);
1935 spin_unlock(&dev->delayed_cmd_lock);
1936
1937 __target_execute_cmd(cmd, true);
1938
1939 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1940 break;
1941 }
1942}
1943
1944
1945
1946
1947
1948static void transport_complete_task_attr(struct se_cmd *cmd)
1949{
1950 struct se_device *dev = cmd->se_dev;
1951
1952 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1953 return;
1954
1955 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
1956 goto restart;
1957
1958 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1959 atomic_dec_mb(&dev->simple_cmds);
1960 dev->dev_cur_ordered_id++;
1961 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
1962 dev->dev_cur_ordered_id);
1963 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1964 dev->dev_cur_ordered_id++;
1965 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
1966 dev->dev_cur_ordered_id);
1967 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1968 atomic_dec_mb(&dev->dev_ordered_sync);
1969
1970 dev->dev_cur_ordered_id++;
1971 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
1972 dev->dev_cur_ordered_id);
1973 }
1974restart:
1975 target_restart_delayed_cmds(dev);
1976}
1977
1978static void transport_complete_qf(struct se_cmd *cmd)
1979{
1980 int ret = 0;
1981
1982 transport_complete_task_attr(cmd);
1983
1984 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1985 trace_target_cmd_complete(cmd);
1986 ret = cmd->se_tfo->queue_status(cmd);
1987 goto out;
1988 }
1989
1990 switch (cmd->data_direction) {
1991 case DMA_FROM_DEVICE:
1992 if (cmd->scsi_status)
1993 goto queue_status;
1994
1995 trace_target_cmd_complete(cmd);
1996 ret = cmd->se_tfo->queue_data_in(cmd);
1997 break;
1998 case DMA_TO_DEVICE:
1999 if (cmd->se_cmd_flags & SCF_BIDI) {
2000 ret = cmd->se_tfo->queue_data_in(cmd);
2001 break;
2002 }
2003
2004 case DMA_NONE:
2005queue_status:
2006 trace_target_cmd_complete(cmd);
2007 ret = cmd->se_tfo->queue_status(cmd);
2008 break;
2009 default:
2010 break;
2011 }
2012
2013out:
2014 if (ret < 0) {
2015 transport_handle_queue_full(cmd, cmd->se_dev);
2016 return;
2017 }
2018 transport_lun_remove_cmd(cmd);
2019 transport_cmd_check_stop_to_fabric(cmd);
2020}
2021
2022static void transport_handle_queue_full(
2023 struct se_cmd *cmd,
2024 struct se_device *dev)
2025{
2026 spin_lock_irq(&dev->qf_cmd_lock);
2027 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2028 atomic_inc_mb(&dev->dev_qf_count);
2029 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2030
2031 schedule_work(&cmd->se_dev->qf_work_queue);
2032}
2033
2034static bool target_read_prot_action(struct se_cmd *cmd)
2035{
2036 switch (cmd->prot_op) {
2037 case TARGET_PROT_DIN_STRIP:
2038 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2039 u32 sectors = cmd->data_length >>
2040 ilog2(cmd->se_dev->dev_attrib.block_size);
2041
2042 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2043 sectors, 0, cmd->t_prot_sg,
2044 0);
2045 if (cmd->pi_err)
2046 return true;
2047 }
2048 break;
2049 case TARGET_PROT_DIN_INSERT:
2050 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2051 break;
2052
2053 sbc_dif_generate(cmd);
2054 break;
2055 default:
2056 break;
2057 }
2058
2059 return false;
2060}
2061
2062static void target_complete_ok_work(struct work_struct *work)
2063{
2064 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2065 int ret;
2066
2067
2068
2069
2070
2071
2072 transport_complete_task_attr(cmd);
2073
2074
2075
2076
2077
2078 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2079 schedule_work(&cmd->se_dev->qf_work_queue);
2080
2081
2082
2083
2084
2085 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2086 WARN_ON(!cmd->scsi_status);
2087 ret = transport_send_check_condition_and_sense(
2088 cmd, 0, 1);
2089 if (ret == -EAGAIN || ret == -ENOMEM)
2090 goto queue_full;
2091
2092 transport_lun_remove_cmd(cmd);
2093 transport_cmd_check_stop_to_fabric(cmd);
2094 return;
2095 }
2096
2097
2098
2099
2100 if (cmd->transport_complete_callback) {
2101 sense_reason_t rc;
2102 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2103 bool zero_dl = !(cmd->data_length);
2104 int post_ret = 0;
2105
2106 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2107 if (!rc && !post_ret) {
2108 if (caw && zero_dl)
2109 goto queue_rsp;
2110
2111 return;
2112 } else if (rc) {
2113 ret = transport_send_check_condition_and_sense(cmd,
2114 rc, 0);
2115 if (ret == -EAGAIN || ret == -ENOMEM)
2116 goto queue_full;
2117
2118 transport_lun_remove_cmd(cmd);
2119 transport_cmd_check_stop_to_fabric(cmd);
2120 return;
2121 }
2122 }
2123
2124queue_rsp:
2125 switch (cmd->data_direction) {
2126 case DMA_FROM_DEVICE:
2127 if (cmd->scsi_status)
2128 goto queue_status;
2129
2130 atomic_long_add(cmd->data_length,
2131 &cmd->se_lun->lun_stats.tx_data_octets);
2132
2133
2134
2135
2136
2137 if (target_read_prot_action(cmd)) {
2138 ret = transport_send_check_condition_and_sense(cmd,
2139 cmd->pi_err, 0);
2140 if (ret == -EAGAIN || ret == -ENOMEM)
2141 goto queue_full;
2142
2143 transport_lun_remove_cmd(cmd);
2144 transport_cmd_check_stop_to_fabric(cmd);
2145 return;
2146 }
2147
2148 trace_target_cmd_complete(cmd);
2149 ret = cmd->se_tfo->queue_data_in(cmd);
2150 if (ret == -EAGAIN || ret == -ENOMEM)
2151 goto queue_full;
2152 break;
2153 case DMA_TO_DEVICE:
2154 atomic_long_add(cmd->data_length,
2155 &cmd->se_lun->lun_stats.rx_data_octets);
2156
2157
2158
2159 if (cmd->se_cmd_flags & SCF_BIDI) {
2160 atomic_long_add(cmd->data_length,
2161 &cmd->se_lun->lun_stats.tx_data_octets);
2162 ret = cmd->se_tfo->queue_data_in(cmd);
2163 if (ret == -EAGAIN || ret == -ENOMEM)
2164 goto queue_full;
2165 break;
2166 }
2167
2168 case DMA_NONE:
2169queue_status:
2170 trace_target_cmd_complete(cmd);
2171 ret = cmd->se_tfo->queue_status(cmd);
2172 if (ret == -EAGAIN || ret == -ENOMEM)
2173 goto queue_full;
2174 break;
2175 default:
2176 break;
2177 }
2178
2179 transport_lun_remove_cmd(cmd);
2180 transport_cmd_check_stop_to_fabric(cmd);
2181 return;
2182
2183queue_full:
2184 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2185 " data_direction: %d\n", cmd, cmd->data_direction);
2186 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2187 transport_handle_queue_full(cmd, cmd->se_dev);
2188}
2189
2190void target_free_sgl(struct scatterlist *sgl, int nents)
2191{
2192 struct scatterlist *sg;
2193 int count;
2194
2195 for_each_sg(sgl, sg, nents, count)
2196 __free_page(sg_page(sg));
2197
2198 kfree(sgl);
2199}
2200EXPORT_SYMBOL(target_free_sgl);
2201
2202static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2203{
2204
2205
2206
2207
2208 if (!cmd->t_data_sg_orig)
2209 return;
2210
2211 kfree(cmd->t_data_sg);
2212 cmd->t_data_sg = cmd->t_data_sg_orig;
2213 cmd->t_data_sg_orig = NULL;
2214 cmd->t_data_nents = cmd->t_data_nents_orig;
2215 cmd->t_data_nents_orig = 0;
2216}
2217
2218static inline void transport_free_pages(struct se_cmd *cmd)
2219{
2220 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2221 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2222 cmd->t_prot_sg = NULL;
2223 cmd->t_prot_nents = 0;
2224 }
2225
2226 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2227
2228
2229
2230
2231 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2232 target_free_sgl(cmd->t_bidi_data_sg,
2233 cmd->t_bidi_data_nents);
2234 cmd->t_bidi_data_sg = NULL;
2235 cmd->t_bidi_data_nents = 0;
2236 }
2237 transport_reset_sgl_orig(cmd);
2238 return;
2239 }
2240 transport_reset_sgl_orig(cmd);
2241
2242 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2243 cmd->t_data_sg = NULL;
2244 cmd->t_data_nents = 0;
2245
2246 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2247 cmd->t_bidi_data_sg = NULL;
2248 cmd->t_bidi_data_nents = 0;
2249}
2250
2251
2252
2253
2254
2255
2256
2257static int transport_put_cmd(struct se_cmd *cmd)
2258{
2259 BUG_ON(!cmd->se_tfo);
2260
2261
2262
2263
2264 return target_put_sess_cmd(cmd);
2265}
2266
2267void *transport_kmap_data_sg(struct se_cmd *cmd)
2268{
2269 struct scatterlist *sg = cmd->t_data_sg;
2270 struct page **pages;
2271 int i;
2272
2273
2274
2275
2276
2277
2278 if (!cmd->t_data_nents)
2279 return NULL;
2280
2281 BUG_ON(!sg);
2282 if (cmd->t_data_nents == 1)
2283 return kmap(sg_page(sg)) + sg->offset;
2284
2285
2286 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2287 if (!pages)
2288 return NULL;
2289
2290
2291 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2292 pages[i] = sg_page(sg);
2293 }
2294
2295 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2296 kfree(pages);
2297 if (!cmd->t_data_vmap)
2298 return NULL;
2299
2300 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2301}
2302EXPORT_SYMBOL(transport_kmap_data_sg);
2303
2304void transport_kunmap_data_sg(struct se_cmd *cmd)
2305{
2306 if (!cmd->t_data_nents) {
2307 return;
2308 } else if (cmd->t_data_nents == 1) {
2309 kunmap(sg_page(cmd->t_data_sg));
2310 return;
2311 }
2312
2313 vunmap(cmd->t_data_vmap);
2314 cmd->t_data_vmap = NULL;
2315}
2316EXPORT_SYMBOL(transport_kunmap_data_sg);
2317
2318int
2319target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2320 bool zero_page, bool chainable)
2321{
2322 struct scatterlist *sg;
2323 struct page *page;
2324 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2325 unsigned int nalloc, nent;
2326 int i = 0;
2327
2328 nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
2329 if (chainable)
2330 nalloc++;
2331 sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
2332 if (!sg)
2333 return -ENOMEM;
2334
2335 sg_init_table(sg, nalloc);
2336
2337 while (length) {
2338 u32 page_len = min_t(u32, length, PAGE_SIZE);
2339 page = alloc_page(GFP_KERNEL | zero_flag);
2340 if (!page)
2341 goto out;
2342
2343 sg_set_page(&sg[i], page, page_len, 0);
2344 length -= page_len;
2345 i++;
2346 }
2347 *sgl = sg;
2348 *nents = nent;
2349 return 0;
2350
2351out:
2352 while (i > 0) {
2353 i--;
2354 __free_page(sg_page(&sg[i]));
2355 }
2356 kfree(sg);
2357 return -ENOMEM;
2358}
2359EXPORT_SYMBOL(target_alloc_sgl);
2360
2361
2362
2363
2364
2365
2366sense_reason_t
2367transport_generic_new_cmd(struct se_cmd *cmd)
2368{
2369 int ret = 0;
2370 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2371
2372 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2373 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2374 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2375 cmd->prot_length, true, false);
2376 if (ret < 0)
2377 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2378 }
2379
2380
2381
2382
2383
2384
2385 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2386 cmd->data_length) {
2387
2388 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2389 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2390 u32 bidi_length;
2391
2392 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2393 bidi_length = cmd->t_task_nolb *
2394 cmd->se_dev->dev_attrib.block_size;
2395 else
2396 bidi_length = cmd->data_length;
2397
2398 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2399 &cmd->t_bidi_data_nents,
2400 bidi_length, zero_flag, false);
2401 if (ret < 0)
2402 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2403 }
2404
2405 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2406 cmd->data_length, zero_flag, false);
2407 if (ret < 0)
2408 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2409 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2410 cmd->data_length) {
2411
2412
2413
2414
2415 u32 caw_length = cmd->t_task_nolb *
2416 cmd->se_dev->dev_attrib.block_size;
2417
2418 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2419 &cmd->t_bidi_data_nents,
2420 caw_length, zero_flag, false);
2421 if (ret < 0)
2422 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2423 }
2424
2425
2426
2427
2428
2429 target_add_to_state_list(cmd);
2430 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2431 target_execute_cmd(cmd);
2432 return 0;
2433 }
2434 transport_cmd_check_stop(cmd, false, true);
2435
2436 ret = cmd->se_tfo->write_pending(cmd);
2437 if (ret == -EAGAIN || ret == -ENOMEM)
2438 goto queue_full;
2439
2440
2441 WARN_ON(ret);
2442
2443 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2444
2445queue_full:
2446 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2447 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2448 transport_handle_queue_full(cmd, cmd->se_dev);
2449 return 0;
2450}
2451EXPORT_SYMBOL(transport_generic_new_cmd);
2452
2453static void transport_write_pending_qf(struct se_cmd *cmd)
2454{
2455 int ret;
2456
2457 ret = cmd->se_tfo->write_pending(cmd);
2458 if (ret == -EAGAIN || ret == -ENOMEM) {
2459 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2460 cmd);
2461 transport_handle_queue_full(cmd, cmd->se_dev);
2462 }
2463}
2464
2465static bool
2466__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2467 unsigned long *flags);
2468
2469static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2470{
2471 unsigned long flags;
2472
2473 spin_lock_irqsave(&cmd->t_state_lock, flags);
2474 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2475 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2476}
2477
2478int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2479{
2480 int ret = 0;
2481 bool aborted = false, tas = false;
2482
2483 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2484 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2485 target_wait_free_cmd(cmd, &aborted, &tas);
2486
2487 if (!aborted || tas)
2488 ret = transport_put_cmd(cmd);
2489 } else {
2490 if (wait_for_tasks)
2491 target_wait_free_cmd(cmd, &aborted, &tas);
2492
2493
2494
2495
2496
2497 if (cmd->state_active)
2498 target_remove_from_state_list(cmd);
2499
2500 if (cmd->se_lun)
2501 transport_lun_remove_cmd(cmd);
2502
2503 if (!aborted || tas)
2504 ret = transport_put_cmd(cmd);
2505 }
2506
2507
2508
2509
2510
2511
2512 if (aborted) {
2513 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2514 wait_for_completion(&cmd->cmd_wait_comp);
2515 cmd->se_tfo->release_cmd(cmd);
2516 ret = 1;
2517 }
2518 return ret;
2519}
2520EXPORT_SYMBOL(transport_generic_free_cmd);
2521
2522
2523
2524
2525
2526int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2527{
2528 struct se_session *se_sess = se_cmd->se_sess;
2529 unsigned long flags;
2530 int ret = 0;
2531
2532
2533
2534
2535
2536
2537 if (ack_kref) {
2538 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2539 return -EINVAL;
2540
2541 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2542 }
2543
2544 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2545 if (se_sess->sess_tearing_down) {
2546 ret = -ESHUTDOWN;
2547 goto out;
2548 }
2549 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2550out:
2551 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2552
2553 if (ret && ack_kref)
2554 target_put_sess_cmd(se_cmd);
2555
2556 return ret;
2557}
2558EXPORT_SYMBOL(target_get_sess_cmd);
2559
2560static void target_free_cmd_mem(struct se_cmd *cmd)
2561{
2562 transport_free_pages(cmd);
2563
2564 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2565 core_tmr_release_req(cmd->se_tmr_req);
2566 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2567 kfree(cmd->t_task_cdb);
2568}
2569
2570static void target_release_cmd_kref(struct kref *kref)
2571{
2572 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2573 struct se_session *se_sess = se_cmd->se_sess;
2574 unsigned long flags;
2575 bool fabric_stop;
2576
2577 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2578
2579 spin_lock(&se_cmd->t_state_lock);
2580 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
2581 (se_cmd->transport_state & CMD_T_ABORTED);
2582 spin_unlock(&se_cmd->t_state_lock);
2583
2584 if (se_cmd->cmd_wait_set || fabric_stop) {
2585 list_del_init(&se_cmd->se_cmd_list);
2586 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2587 target_free_cmd_mem(se_cmd);
2588 complete(&se_cmd->cmd_wait_comp);
2589 return;
2590 }
2591 list_del_init(&se_cmd->se_cmd_list);
2592 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2593
2594 target_free_cmd_mem(se_cmd);
2595 se_cmd->se_tfo->release_cmd(se_cmd);
2596}
2597
2598
2599
2600
2601int target_put_sess_cmd(struct se_cmd *se_cmd)
2602{
2603 struct se_session *se_sess = se_cmd->se_sess;
2604
2605 if (!se_sess) {
2606 target_free_cmd_mem(se_cmd);
2607 se_cmd->se_tfo->release_cmd(se_cmd);
2608 return 1;
2609 }
2610 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2611}
2612EXPORT_SYMBOL(target_put_sess_cmd);
2613
2614
2615
2616
2617
2618
2619void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2620{
2621 struct se_cmd *se_cmd, *tmp_cmd;
2622 unsigned long flags;
2623 int rc;
2624
2625 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2626 if (se_sess->sess_tearing_down) {
2627 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2628 return;
2629 }
2630 se_sess->sess_tearing_down = 1;
2631 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2632
2633 list_for_each_entry_safe(se_cmd, tmp_cmd,
2634 &se_sess->sess_wait_list, se_cmd_list) {
2635 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2636 if (rc) {
2637 se_cmd->cmd_wait_set = 1;
2638 spin_lock(&se_cmd->t_state_lock);
2639 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2640 spin_unlock(&se_cmd->t_state_lock);
2641 } else
2642 list_del_init(&se_cmd->se_cmd_list);
2643 }
2644
2645 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2646}
2647EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2648
2649
2650
2651
2652void target_wait_for_sess_cmds(struct se_session *se_sess)
2653{
2654 struct se_cmd *se_cmd, *tmp_cmd;
2655 unsigned long flags;
2656 bool tas;
2657
2658 list_for_each_entry_safe(se_cmd, tmp_cmd,
2659 &se_sess->sess_wait_list, se_cmd_list) {
2660 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2661 " %d\n", se_cmd, se_cmd->t_state,
2662 se_cmd->se_tfo->get_cmd_state(se_cmd));
2663
2664 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2665 tas = (se_cmd->transport_state & CMD_T_TAS);
2666 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2667
2668 if (!target_put_sess_cmd(se_cmd)) {
2669 if (tas)
2670 target_put_sess_cmd(se_cmd);
2671 }
2672
2673 wait_for_completion(&se_cmd->cmd_wait_comp);
2674 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2675 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2676 se_cmd->se_tfo->get_cmd_state(se_cmd));
2677
2678 se_cmd->se_tfo->release_cmd(se_cmd);
2679 }
2680
2681 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2682 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2683 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2684
2685}
2686EXPORT_SYMBOL(target_wait_for_sess_cmds);
2687
2688void transport_clear_lun_ref(struct se_lun *lun)
2689{
2690 percpu_ref_kill(&lun->lun_ref);
2691 wait_for_completion(&lun->lun_ref_comp);
2692}
2693
2694static bool
2695__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2696 bool *aborted, bool *tas, unsigned long *flags)
2697 __releases(&cmd->t_state_lock)
2698 __acquires(&cmd->t_state_lock)
2699{
2700
2701 assert_spin_locked(&cmd->t_state_lock);
2702 WARN_ON_ONCE(!irqs_disabled());
2703
2704 if (fabric_stop)
2705 cmd->transport_state |= CMD_T_FABRIC_STOP;
2706
2707 if (cmd->transport_state & CMD_T_ABORTED)
2708 *aborted = true;
2709
2710 if (cmd->transport_state & CMD_T_TAS)
2711 *tas = true;
2712
2713 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2714 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2715 return false;
2716
2717 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2718 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2719 return false;
2720
2721 if (!(cmd->transport_state & CMD_T_ACTIVE))
2722 return false;
2723
2724 if (fabric_stop && *aborted)
2725 return false;
2726
2727 cmd->transport_state |= CMD_T_STOP;
2728
2729 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2730 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2731 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2732
2733 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2734
2735 wait_for_completion(&cmd->t_transport_stop_comp);
2736
2737 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2738 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2739
2740 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2741 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2742
2743 return true;
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753bool transport_wait_for_tasks(struct se_cmd *cmd)
2754{
2755 unsigned long flags;
2756 bool ret, aborted = false, tas = false;
2757
2758 spin_lock_irqsave(&cmd->t_state_lock, flags);
2759 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2760 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2761
2762 return ret;
2763}
2764EXPORT_SYMBOL(transport_wait_for_tasks);
2765
2766struct sense_info {
2767 u8 key;
2768 u8 asc;
2769 u8 ascq;
2770 bool add_sector_info;
2771};
2772
2773static const struct sense_info sense_info_table[] = {
2774 [TCM_NO_SENSE] = {
2775 .key = NOT_READY
2776 },
2777 [TCM_NON_EXISTENT_LUN] = {
2778 .key = ILLEGAL_REQUEST,
2779 .asc = 0x25
2780 },
2781 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2782 .key = ILLEGAL_REQUEST,
2783 .asc = 0x20,
2784 },
2785 [TCM_SECTOR_COUNT_TOO_MANY] = {
2786 .key = ILLEGAL_REQUEST,
2787 .asc = 0x20,
2788 },
2789 [TCM_UNKNOWN_MODE_PAGE] = {
2790 .key = ILLEGAL_REQUEST,
2791 .asc = 0x24,
2792 },
2793 [TCM_CHECK_CONDITION_ABORT_CMD] = {
2794 .key = ABORTED_COMMAND,
2795 .asc = 0x29,
2796 .ascq = 0x03,
2797 },
2798 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2799 .key = ABORTED_COMMAND,
2800 .asc = 0x0c,
2801 .ascq = 0x0d,
2802 },
2803 [TCM_INVALID_CDB_FIELD] = {
2804 .key = ILLEGAL_REQUEST,
2805 .asc = 0x24,
2806 },
2807 [TCM_INVALID_PARAMETER_LIST] = {
2808 .key = ILLEGAL_REQUEST,
2809 .asc = 0x26,
2810 },
2811 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2812 .key = ILLEGAL_REQUEST,
2813 .asc = 0x1a,
2814 },
2815 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2816 .key = ILLEGAL_REQUEST,
2817 .asc = 0x0c,
2818 .ascq = 0x0c,
2819 },
2820 [TCM_SERVICE_CRC_ERROR] = {
2821 .key = ABORTED_COMMAND,
2822 .asc = 0x47,
2823 .ascq = 0x05,
2824 },
2825 [TCM_SNACK_REJECTED] = {
2826 .key = ABORTED_COMMAND,
2827 .asc = 0x11,
2828 .ascq = 0x13,
2829 },
2830 [TCM_WRITE_PROTECTED] = {
2831 .key = DATA_PROTECT,
2832 .asc = 0x27,
2833 },
2834 [TCM_ADDRESS_OUT_OF_RANGE] = {
2835 .key = ILLEGAL_REQUEST,
2836 .asc = 0x21,
2837 },
2838 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2839 .key = UNIT_ATTENTION,
2840 },
2841 [TCM_CHECK_CONDITION_NOT_READY] = {
2842 .key = NOT_READY,
2843 },
2844 [TCM_MISCOMPARE_VERIFY] = {
2845 .key = MISCOMPARE,
2846 .asc = 0x1d,
2847 .ascq = 0x00,
2848 },
2849 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2850 .key = ABORTED_COMMAND,
2851 .asc = 0x10,
2852 .ascq = 0x01,
2853 .add_sector_info = true,
2854 },
2855 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2856 .key = ABORTED_COMMAND,
2857 .asc = 0x10,
2858 .ascq = 0x02,
2859 .add_sector_info = true,
2860 },
2861 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2862 .key = ABORTED_COMMAND,
2863 .asc = 0x10,
2864 .ascq = 0x03,
2865 .add_sector_info = true,
2866 },
2867 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2868 .key = COPY_ABORTED,
2869 .asc = 0x0d,
2870 .ascq = 0x02,
2871
2872 },
2873 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2874
2875
2876
2877
2878
2879
2880 .key = NOT_READY,
2881 .asc = 0x08,
2882 },
2883};
2884
2885static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2886{
2887 const struct sense_info *si;
2888 u8 *buffer = cmd->sense_buffer;
2889 int r = (__force int)reason;
2890 u8 asc, ascq;
2891 bool desc_format = target_sense_desc_format(cmd->se_dev);
2892
2893 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
2894 si = &sense_info_table[r];
2895 else
2896 si = &sense_info_table[(__force int)
2897 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
2898
2899 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
2900 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2901 WARN_ON_ONCE(asc == 0);
2902 } else if (si->asc == 0) {
2903 WARN_ON_ONCE(cmd->scsi_asc == 0);
2904 asc = cmd->scsi_asc;
2905 ascq = cmd->scsi_ascq;
2906 } else {
2907 asc = si->asc;
2908 ascq = si->ascq;
2909 }
2910
2911 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2912 if (si->add_sector_info)
2913 return scsi_set_sense_information(buffer,
2914 cmd->scsi_sense_length,
2915 cmd->bad_sector);
2916
2917 return 0;
2918}
2919
2920int
2921transport_send_check_condition_and_sense(struct se_cmd *cmd,
2922 sense_reason_t reason, int from_transport)
2923{
2924 unsigned long flags;
2925
2926 spin_lock_irqsave(&cmd->t_state_lock, flags);
2927 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2928 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2929 return 0;
2930 }
2931 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2932 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2933
2934 if (!from_transport) {
2935 int rc;
2936
2937 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2938 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2939 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2940 rc = translate_sense_reason(cmd, reason);
2941 if (rc)
2942 return rc;
2943 }
2944
2945 trace_target_cmd_complete(cmd);
2946 return cmd->se_tfo->queue_status(cmd);
2947}
2948EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2949
2950static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2951 __releases(&cmd->t_state_lock)
2952 __acquires(&cmd->t_state_lock)
2953{
2954 assert_spin_locked(&cmd->t_state_lock);
2955 WARN_ON_ONCE(!irqs_disabled());
2956
2957 if (!(cmd->transport_state & CMD_T_ABORTED))
2958 return 0;
2959
2960
2961
2962
2963 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2964 if (send_status)
2965 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2966 return 1;
2967 }
2968
2969 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2970 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2971
2972 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2973 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2974 trace_target_cmd_complete(cmd);
2975
2976 spin_unlock_irq(&cmd->t_state_lock);
2977 cmd->se_tfo->queue_status(cmd);
2978 spin_lock_irq(&cmd->t_state_lock);
2979
2980 return 1;
2981}
2982
2983int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2984{
2985 int ret;
2986
2987 spin_lock_irq(&cmd->t_state_lock);
2988 ret = __transport_check_aborted_status(cmd, send_status);
2989 spin_unlock_irq(&cmd->t_state_lock);
2990
2991 return ret;
2992}
2993EXPORT_SYMBOL(transport_check_aborted_status);
2994
2995void transport_send_task_abort(struct se_cmd *cmd)
2996{
2997 unsigned long flags;
2998
2999 spin_lock_irqsave(&cmd->t_state_lock, flags);
3000 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3001 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3002 return;
3003 }
3004 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3005
3006
3007
3008
3009
3010
3011
3012 if (cmd->data_direction == DMA_TO_DEVICE) {
3013 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3014 spin_lock_irqsave(&cmd->t_state_lock, flags);
3015 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3016 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3017 goto send_abort;
3018 }
3019 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3020 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3021 return;
3022 }
3023 }
3024send_abort:
3025 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3026
3027 transport_lun_remove_cmd(cmd);
3028
3029 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3030 cmd->t_task_cdb[0], cmd->tag);
3031
3032 trace_target_cmd_complete(cmd);
3033 cmd->se_tfo->queue_status(cmd);
3034}
3035
3036static void target_tmr_work(struct work_struct *work)
3037{
3038 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3039 struct se_device *dev = cmd->se_dev;
3040 struct se_tmr_req *tmr = cmd->se_tmr_req;
3041 unsigned long flags;
3042 int ret;
3043
3044 spin_lock_irqsave(&cmd->t_state_lock, flags);
3045 if (cmd->transport_state & CMD_T_ABORTED) {
3046 tmr->response = TMR_FUNCTION_REJECTED;
3047 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3048 goto check_stop;
3049 }
3050 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3051
3052 switch (tmr->function) {
3053 case TMR_ABORT_TASK:
3054 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3055 break;
3056 case TMR_ABORT_TASK_SET:
3057 case TMR_CLEAR_ACA:
3058 case TMR_CLEAR_TASK_SET:
3059 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3060 break;
3061 case TMR_LUN_RESET:
3062 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3063 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3064 TMR_FUNCTION_REJECTED;
3065 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3066 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3067 cmd->orig_fe_lun, 0x29,
3068 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3069 }
3070 break;
3071 case TMR_TARGET_WARM_RESET:
3072 tmr->response = TMR_FUNCTION_REJECTED;
3073 break;
3074 case TMR_TARGET_COLD_RESET:
3075 tmr->response = TMR_FUNCTION_REJECTED;
3076 break;
3077 default:
3078 pr_err("Uknown TMR function: 0x%02x.\n",
3079 tmr->function);
3080 tmr->response = TMR_FUNCTION_REJECTED;
3081 break;
3082 }
3083
3084 spin_lock_irqsave(&cmd->t_state_lock, flags);
3085 if (cmd->transport_state & CMD_T_ABORTED) {
3086 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3087 goto check_stop;
3088 }
3089 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3090 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3091
3092 cmd->se_tfo->queue_tm_rsp(cmd);
3093
3094check_stop:
3095 transport_cmd_check_stop_to_fabric(cmd);
3096}
3097
3098int transport_generic_handle_tmr(
3099 struct se_cmd *cmd)
3100{
3101 unsigned long flags;
3102
3103 spin_lock_irqsave(&cmd->t_state_lock, flags);
3104 cmd->transport_state |= CMD_T_ACTIVE;
3105 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3106
3107 INIT_WORK(&cmd->work, target_tmr_work);
3108 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3109 return 0;
3110}
3111EXPORT_SYMBOL(transport_generic_handle_tmr);
3112
3113bool
3114target_check_wce(struct se_device *dev)
3115{
3116 bool wce = false;
3117
3118 if (dev->transport->get_write_cache)
3119 wce = dev->transport->get_write_cache(dev);
3120 else if (dev->dev_attrib.emulate_write_cache > 0)
3121 wce = true;
3122
3123 return wce;
3124}
3125
3126bool
3127target_check_fua(struct se_device *dev)
3128{
3129 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3130}
3131