1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/string.h>
28#include <linux/timer.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/in.h>
32#include <linux/export.h>
33#include <net/sock.h>
34#include <net/tcp.h>
35#include <scsi/scsi_proto.h>
36
37#include <target/target_core_base.h>
38#include <target/target_core_backend.h>
39#include <target/target_core_fabric.h>
40
41#include "target_core_internal.h"
42#include "target_core_alua.h"
43#include "target_core_pr.h"
44#include "target_core_ua.h"
45
46extern struct se_device *g_lun0_dev;
47
48static DEFINE_SPINLOCK(tpg_lock);
49static LIST_HEAD(tpg_list);
50
51
52
53
54
55struct se_node_acl *__core_tpg_get_initiator_node_acl(
56 struct se_portal_group *tpg,
57 const char *initiatorname)
58{
59 struct se_node_acl *acl;
60
61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62 if (!strcmp(acl->initiatorname, initiatorname))
63 return acl;
64 }
65
66 return NULL;
67}
68
69
70
71
72
73struct se_node_acl *core_tpg_get_initiator_node_acl(
74 struct se_portal_group *tpg,
75 unsigned char *initiatorname)
76{
77 struct se_node_acl *acl;
78
79
80
81
82
83
84
85
86
87 mutex_lock(&tpg->acl_node_mutex);
88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
92 }
93 mutex_unlock(&tpg->acl_node_mutex);
94
95 return acl;
96}
97EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
98
99void core_allocate_nexus_loss_ua(
100 struct se_node_acl *nacl)
101{
102 struct se_dev_entry *deve;
103
104 if (!nacl)
105 return;
106
107 rcu_read_lock();
108 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109 core_scsi3_ua_allocate(deve, 0x29,
110 ASCQ_29H_NEXUS_LOSS_OCCURRED);
111 rcu_read_unlock();
112}
113EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
114
115
116
117
118
119void core_tpg_add_node_to_devs(
120 struct se_node_acl *acl,
121 struct se_portal_group *tpg,
122 struct se_lun *lun_orig)
123{
124 bool lun_access_ro = true;
125 struct se_lun *lun;
126 struct se_device *dev;
127
128 mutex_lock(&tpg->tpg_lun_mutex);
129 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130 if (lun_orig && lun != lun_orig)
131 continue;
132
133 dev = rcu_dereference_check(lun->lun_se_dev,
134 lockdep_is_held(&tpg->tpg_lun_mutex));
135
136
137
138
139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140 lun_access_ro = false;
141 } else {
142
143
144
145
146 if (dev->transport->get_device_type(dev) == TYPE_DISK)
147 lun_access_ro = true;
148 else
149 lun_access_ro = false;
150 }
151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(),
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157
158 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
159 lun_access_ro, acl, tpg);
160
161
162
163
164
165 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
166 lun->unpacked_lun);
167 }
168 mutex_unlock(&tpg->tpg_lun_mutex);
169}
170
171static void
172target_set_nacl_queue_depth(struct se_portal_group *tpg,
173 struct se_node_acl *acl, u32 queue_depth)
174{
175 acl->queue_depth = queue_depth;
176
177 if (!acl->queue_depth) {
178 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
180 acl->initiatorname);
181 acl->queue_depth = 1;
182 }
183}
184
185static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
186 const unsigned char *initiatorname)
187{
188 struct se_node_acl *acl;
189 u32 queue_depth;
190
191 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
192 GFP_KERNEL);
193 if (!acl)
194 return NULL;
195
196 INIT_LIST_HEAD(&acl->acl_list);
197 INIT_LIST_HEAD(&acl->acl_sess_list);
198 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
199 kref_init(&acl->acl_kref);
200 init_completion(&acl->acl_free_comp);
201 spin_lock_init(&acl->nacl_sess_lock);
202 mutex_init(&acl->lun_entry_mutex);
203 atomic_set(&acl->acl_pr_ref_count, 0);
204
205 if (tpg->se_tpg_tfo->tpg_get_default_depth)
206 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
207 else
208 queue_depth = 1;
209 target_set_nacl_queue_depth(tpg, acl, queue_depth);
210
211 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
212 acl->se_tpg = tpg;
213 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
214
215 tpg->se_tpg_tfo->set_default_node_attributes(acl);
216
217 return acl;
218}
219
220static void target_add_node_acl(struct se_node_acl *acl)
221{
222 struct se_portal_group *tpg = acl->se_tpg;
223
224 mutex_lock(&tpg->acl_node_mutex);
225 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
226 mutex_unlock(&tpg->acl_node_mutex);
227
228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229 " Initiator Node: %s\n",
230 tpg->se_tpg_tfo->get_fabric_name(),
231 tpg->se_tpg_tfo->tpg_get_tag(tpg),
232 acl->dynamic_node_acl ? "DYNAMIC" : "",
233 acl->queue_depth,
234 tpg->se_tpg_tfo->get_fabric_name(),
235 acl->initiatorname);
236}
237
238bool target_tpg_has_node_acl(struct se_portal_group *tpg,
239 const char *initiatorname)
240{
241 struct se_node_acl *acl;
242 bool found = false;
243
244 mutex_lock(&tpg->acl_node_mutex);
245 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
246 if (!strcmp(acl->initiatorname, initiatorname)) {
247 found = true;
248 break;
249 }
250 }
251 mutex_unlock(&tpg->acl_node_mutex);
252
253 return found;
254}
255EXPORT_SYMBOL(target_tpg_has_node_acl);
256
257struct se_node_acl *core_tpg_check_initiator_node_acl(
258 struct se_portal_group *tpg,
259 unsigned char *initiatorname)
260{
261 struct se_node_acl *acl;
262
263 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
264 if (acl)
265 return acl;
266
267 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
268 return NULL;
269
270 acl = target_alloc_node_acl(tpg, initiatorname);
271 if (!acl)
272 return NULL;
273
274
275
276
277
278
279
280
281 kref_get(&acl->acl_kref);
282 acl->dynamic_node_acl = 1;
283
284
285
286
287
288
289 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
290 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
291 core_tpg_add_node_to_devs(acl, tpg, NULL);
292
293 target_add_node_acl(acl);
294 return acl;
295}
296EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
297
298void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
299{
300 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
301 cpu_relax();
302}
303
304struct se_node_acl *core_tpg_add_initiator_node_acl(
305 struct se_portal_group *tpg,
306 const char *initiatorname)
307{
308 struct se_node_acl *acl;
309
310 mutex_lock(&tpg->acl_node_mutex);
311 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
312 if (acl) {
313 if (acl->dynamic_node_acl) {
314 acl->dynamic_node_acl = 0;
315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318 mutex_unlock(&tpg->acl_node_mutex);
319 return acl;
320 }
321
322 pr_err("ACL entry for %s Initiator"
323 " Node %s already exists for TPG %u, ignoring"
324 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326 mutex_unlock(&tpg->acl_node_mutex);
327 return ERR_PTR(-EEXIST);
328 }
329 mutex_unlock(&tpg->acl_node_mutex);
330
331 acl = target_alloc_node_acl(tpg, initiatorname);
332 if (!acl)
333 return ERR_PTR(-ENOMEM);
334
335 target_add_node_acl(acl);
336 return acl;
337}
338
339void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
340{
341 struct se_portal_group *tpg = acl->se_tpg;
342 LIST_HEAD(sess_list);
343 struct se_session *sess, *sess_tmp;
344 unsigned long flags;
345 int rc;
346
347 mutex_lock(&tpg->acl_node_mutex);
348 if (acl->dynamic_node_acl) {
349 acl->dynamic_node_acl = 0;
350 }
351 list_del(&acl->acl_list);
352 mutex_unlock(&tpg->acl_node_mutex);
353
354 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
355 acl->acl_stop = 1;
356
357 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
358 sess_acl_list) {
359 if (sess->sess_tearing_down != 0)
360 continue;
361
362 if (!target_get_session(sess))
363 continue;
364 list_move(&sess->sess_acl_list, &sess_list);
365 }
366 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
367
368 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
369 list_del(&sess->sess_acl_list);
370
371 rc = tpg->se_tpg_tfo->shutdown_session(sess);
372 target_put_session(sess);
373 if (!rc)
374 continue;
375 target_put_session(sess);
376 }
377 target_put_nacl(acl);
378
379
380
381
382 wait_for_completion(&acl->acl_free_comp);
383
384 core_tpg_wait_for_nacl_pr_ref(acl);
385 core_free_device_list_for_node(acl, tpg);
386
387 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
388 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
389 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
390 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
391
392 kfree(acl);
393}
394
395
396
397
398
399int core_tpg_set_initiator_node_queue_depth(
400 struct se_node_acl *acl,
401 u32 queue_depth)
402{
403 LIST_HEAD(sess_list);
404 struct se_portal_group *tpg = acl->se_tpg;
405 struct se_session *sess, *sess_tmp;
406 unsigned long flags;
407 int rc;
408
409
410
411
412
413
414 target_set_nacl_queue_depth(tpg, acl, queue_depth);
415
416 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
417 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
418 sess_acl_list) {
419 if (sess->sess_tearing_down != 0)
420 continue;
421 if (!target_get_session(sess))
422 continue;
423 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
424
425
426
427
428
429
430 rc = tpg->se_tpg_tfo->shutdown_session(sess);
431 target_put_session(sess);
432 if (!rc) {
433 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
434 continue;
435 }
436 target_put_session(sess);
437 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
438 }
439 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
440
441 pr_debug("Successfully changed queue depth to: %d for Initiator"
442 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
443 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
444 tpg->se_tpg_tfo->tpg_get_tag(tpg));
445
446 return 0;
447}
448EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
449
450
451
452
453
454
455
456int core_tpg_set_initiator_node_tag(
457 struct se_portal_group *tpg,
458 struct se_node_acl *acl,
459 const char *new_tag)
460{
461 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
462 return -EINVAL;
463
464 if (!strncmp("NULL", new_tag, 4)) {
465 acl->acl_tag[0] = '\0';
466 return 0;
467 }
468
469 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
470}
471EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
472
473static void core_tpg_lun_ref_release(struct percpu_ref *ref)
474{
475 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
476
477 complete(&lun->lun_ref_comp);
478}
479
480int core_tpg_register(
481 struct se_wwn *se_wwn,
482 struct se_portal_group *se_tpg,
483 int proto_id)
484{
485 int ret;
486
487 if (!se_tpg)
488 return -EINVAL;
489
490
491
492
493
494
495
496
497
498
499 if (se_wwn)
500 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
501
502 if (!se_tpg->se_tpg_tfo) {
503 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
504 return -EINVAL;
505 }
506
507 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
508 se_tpg->proto_id = proto_id;
509 se_tpg->se_tpg_wwn = se_wwn;
510 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
511 INIT_LIST_HEAD(&se_tpg->acl_node_list);
512 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
513 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
514 spin_lock_init(&se_tpg->session_lock);
515 mutex_init(&se_tpg->tpg_lun_mutex);
516 mutex_init(&se_tpg->acl_node_mutex);
517
518 if (se_tpg->proto_id >= 0) {
519 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
520 if (IS_ERR(se_tpg->tpg_virt_lun0))
521 return PTR_ERR(se_tpg->tpg_virt_lun0);
522
523 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
524 true, g_lun0_dev);
525 if (ret < 0) {
526 kfree(se_tpg->tpg_virt_lun0);
527 return ret;
528 }
529 }
530
531 spin_lock_bh(&tpg_lock);
532 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
533 spin_unlock_bh(&tpg_lock);
534
535 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
536 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
537 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
538 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
539 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
540
541 return 0;
542}
543EXPORT_SYMBOL(core_tpg_register);
544
545int core_tpg_deregister(struct se_portal_group *se_tpg)
546{
547 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
548 struct se_node_acl *nacl, *nacl_tmp;
549 LIST_HEAD(node_list);
550
551 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
552 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
553 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
554 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
555
556 spin_lock_bh(&tpg_lock);
557 list_del(&se_tpg->se_tpg_node);
558 spin_unlock_bh(&tpg_lock);
559
560 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
561 cpu_relax();
562
563 mutex_lock(&se_tpg->acl_node_mutex);
564 list_splice_init(&se_tpg->acl_node_list, &node_list);
565 mutex_unlock(&se_tpg->acl_node_mutex);
566
567
568
569
570
571 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
572 list_del(&nacl->acl_list);
573
574 core_tpg_wait_for_nacl_pr_ref(nacl);
575 core_free_device_list_for_node(nacl, se_tpg);
576 kfree(nacl);
577 }
578
579 if (se_tpg->proto_id >= 0) {
580 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
581 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
582 }
583
584 return 0;
585}
586EXPORT_SYMBOL(core_tpg_deregister);
587
588struct se_lun *core_tpg_alloc_lun(
589 struct se_portal_group *tpg,
590 u64 unpacked_lun)
591{
592 struct se_lun *lun;
593
594 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
595 if (!lun) {
596 pr_err("Unable to allocate se_lun memory\n");
597 return ERR_PTR(-ENOMEM);
598 }
599 lun->unpacked_lun = unpacked_lun;
600 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
601 atomic_set(&lun->lun_acl_count, 0);
602 init_completion(&lun->lun_ref_comp);
603 INIT_LIST_HEAD(&lun->lun_deve_list);
604 INIT_LIST_HEAD(&lun->lun_dev_link);
605 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
606 spin_lock_init(&lun->lun_deve_lock);
607 mutex_init(&lun->lun_tg_pt_md_mutex);
608 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
609 spin_lock_init(&lun->lun_tg_pt_gp_lock);
610 lun->lun_tpg = tpg;
611
612 return lun;
613}
614
615int core_tpg_add_lun(
616 struct se_portal_group *tpg,
617 struct se_lun *lun,
618 bool lun_access_ro,
619 struct se_device *dev)
620{
621 int ret;
622
623 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
624 GFP_KERNEL);
625 if (ret < 0)
626 goto out;
627
628 ret = core_alloc_rtpi(lun, dev);
629 if (ret)
630 goto out_kill_ref;
631
632 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
633 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
634 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
635
636 mutex_lock(&tpg->tpg_lun_mutex);
637
638 spin_lock(&dev->se_port_lock);
639 lun->lun_index = dev->dev_index;
640 rcu_assign_pointer(lun->lun_se_dev, dev);
641 dev->export_count++;
642 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
643 spin_unlock(&dev->se_port_lock);
644
645 if (dev->dev_flags & DF_READ_ONLY)
646 lun->lun_access_ro = true;
647 else
648 lun->lun_access_ro = lun_access_ro;
649 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
650 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
651 mutex_unlock(&tpg->tpg_lun_mutex);
652
653 return 0;
654
655out_kill_ref:
656 percpu_ref_exit(&lun->lun_ref);
657out:
658 return ret;
659}
660
661void core_tpg_remove_lun(
662 struct se_portal_group *tpg,
663 struct se_lun *lun)
664{
665
666
667
668
669 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
670
671 core_clear_lun_from_tpg(lun, tpg);
672
673
674
675
676
677
678 transport_clear_lun_ref(lun);
679
680 mutex_lock(&tpg->tpg_lun_mutex);
681 if (lun->lun_se_dev) {
682 target_detach_tg_pt_gp(lun);
683
684 spin_lock(&dev->se_port_lock);
685 list_del(&lun->lun_dev_link);
686 dev->export_count--;
687 rcu_assign_pointer(lun->lun_se_dev, NULL);
688 spin_unlock(&dev->se_port_lock);
689 }
690 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
691 hlist_del_rcu(&lun->link);
692 mutex_unlock(&tpg->tpg_lun_mutex);
693
694 percpu_ref_exit(&lun->lun_ref);
695}
696