1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/string.h>
28#include <linux/timer.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/in.h>
32#include <linux/export.h>
33#include <net/sock.h>
34#include <net/tcp.h>
35#include <scsi/scsi_proto.h>
36
37#include <target/target_core_base.h>
38#include <target/target_core_backend.h>
39#include <target/target_core_fabric.h>
40
41#include "target_core_internal.h"
42#include "target_core_alua.h"
43#include "target_core_pr.h"
44#include "target_core_ua.h"
45
46extern struct se_device *g_lun0_dev;
47
48static DEFINE_SPINLOCK(tpg_lock);
49static LIST_HEAD(tpg_list);
50
51
52
53
54
55struct se_node_acl *__core_tpg_get_initiator_node_acl(
56 struct se_portal_group *tpg,
57 const char *initiatorname)
58{
59 struct se_node_acl *acl;
60
61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62 if (!strcmp(acl->initiatorname, initiatorname))
63 return acl;
64 }
65
66 return NULL;
67}
68
69
70
71
72
73struct se_node_acl *core_tpg_get_initiator_node_acl(
74 struct se_portal_group *tpg,
75 unsigned char *initiatorname)
76{
77 struct se_node_acl *acl;
78
79
80
81
82
83
84
85
86
87 mutex_lock(&tpg->acl_node_mutex);
88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
92 }
93 mutex_unlock(&tpg->acl_node_mutex);
94
95 return acl;
96}
97EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
98
99void core_allocate_nexus_loss_ua(
100 struct se_node_acl *nacl)
101{
102 struct se_dev_entry *deve;
103
104 if (!nacl)
105 return;
106
107 rcu_read_lock();
108 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109 core_scsi3_ua_allocate(deve, 0x29,
110 ASCQ_29H_NEXUS_LOSS_OCCURRED);
111 rcu_read_unlock();
112}
113EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
114
115
116
117
118
119void core_tpg_add_node_to_devs(
120 struct se_node_acl *acl,
121 struct se_portal_group *tpg,
122 struct se_lun *lun_orig)
123{
124 bool lun_access_ro = true;
125 struct se_lun *lun;
126 struct se_device *dev;
127
128 mutex_lock(&tpg->tpg_lun_mutex);
129 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130 if (lun_orig && lun != lun_orig)
131 continue;
132
133 dev = rcu_dereference_check(lun->lun_se_dev,
134 lockdep_is_held(&tpg->tpg_lun_mutex));
135
136
137
138
139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140 lun_access_ro = false;
141 } else {
142
143
144
145
146 if (dev->transport->get_device_type(dev) == TYPE_DISK)
147 lun_access_ro = true;
148 else
149 lun_access_ro = false;
150 }
151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(),
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157
158 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
159 lun_access_ro, acl, tpg);
160
161
162
163
164
165 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
166 lun->unpacked_lun);
167 }
168 mutex_unlock(&tpg->tpg_lun_mutex);
169}
170
171static void
172target_set_nacl_queue_depth(struct se_portal_group *tpg,
173 struct se_node_acl *acl, u32 queue_depth)
174{
175 acl->queue_depth = queue_depth;
176
177 if (!acl->queue_depth) {
178 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
180 acl->initiatorname);
181 acl->queue_depth = 1;
182 }
183}
184
185static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
186 const unsigned char *initiatorname)
187{
188 struct se_node_acl *acl;
189 u32 queue_depth;
190
191 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
192 GFP_KERNEL);
193 if (!acl)
194 return NULL;
195
196 INIT_LIST_HEAD(&acl->acl_list);
197 INIT_LIST_HEAD(&acl->acl_sess_list);
198 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
199 kref_init(&acl->acl_kref);
200 init_completion(&acl->acl_free_comp);
201 spin_lock_init(&acl->nacl_sess_lock);
202 mutex_init(&acl->lun_entry_mutex);
203 atomic_set(&acl->acl_pr_ref_count, 0);
204
205 if (tpg->se_tpg_tfo->tpg_get_default_depth)
206 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
207 else
208 queue_depth = 1;
209 target_set_nacl_queue_depth(tpg, acl, queue_depth);
210
211 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
212 acl->se_tpg = tpg;
213 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
214
215 tpg->se_tpg_tfo->set_default_node_attributes(acl);
216
217 return acl;
218}
219
220static void target_add_node_acl(struct se_node_acl *acl)
221{
222 struct se_portal_group *tpg = acl->se_tpg;
223
224 mutex_lock(&tpg->acl_node_mutex);
225 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
226 mutex_unlock(&tpg->acl_node_mutex);
227
228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229 " Initiator Node: %s\n",
230 tpg->se_tpg_tfo->get_fabric_name(),
231 tpg->se_tpg_tfo->tpg_get_tag(tpg),
232 acl->dynamic_node_acl ? "DYNAMIC" : "",
233 acl->queue_depth,
234 tpg->se_tpg_tfo->get_fabric_name(),
235 acl->initiatorname);
236}
237
238bool target_tpg_has_node_acl(struct se_portal_group *tpg,
239 const char *initiatorname)
240{
241 struct se_node_acl *acl;
242 bool found = false;
243
244 mutex_lock(&tpg->acl_node_mutex);
245 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
246 if (!strcmp(acl->initiatorname, initiatorname)) {
247 found = true;
248 break;
249 }
250 }
251 mutex_unlock(&tpg->acl_node_mutex);
252
253 return found;
254}
255EXPORT_SYMBOL(target_tpg_has_node_acl);
256
257struct se_node_acl *core_tpg_check_initiator_node_acl(
258 struct se_portal_group *tpg,
259 unsigned char *initiatorname)
260{
261 struct se_node_acl *acl;
262
263 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
264 if (acl)
265 return acl;
266
267 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
268 return NULL;
269
270 acl = target_alloc_node_acl(tpg, initiatorname);
271 if (!acl)
272 return NULL;
273
274
275
276
277
278
279
280
281 kref_get(&acl->acl_kref);
282 acl->dynamic_node_acl = 1;
283
284
285
286
287
288
289 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
290 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
291 core_tpg_add_node_to_devs(acl, tpg, NULL);
292
293 target_add_node_acl(acl);
294 return acl;
295}
296EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
297
298void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
299{
300 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
301 cpu_relax();
302}
303
304struct se_node_acl *core_tpg_add_initiator_node_acl(
305 struct se_portal_group *tpg,
306 const char *initiatorname)
307{
308 struct se_node_acl *acl;
309
310 mutex_lock(&tpg->acl_node_mutex);
311 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
312 if (acl) {
313 if (acl->dynamic_node_acl) {
314 acl->dynamic_node_acl = 0;
315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318 mutex_unlock(&tpg->acl_node_mutex);
319 return acl;
320 }
321
322 pr_err("ACL entry for %s Initiator"
323 " Node %s already exists for TPG %u, ignoring"
324 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326 mutex_unlock(&tpg->acl_node_mutex);
327 return ERR_PTR(-EEXIST);
328 }
329 mutex_unlock(&tpg->acl_node_mutex);
330
331 acl = target_alloc_node_acl(tpg, initiatorname);
332 if (!acl)
333 return ERR_PTR(-ENOMEM);
334
335 target_add_node_acl(acl);
336 return acl;
337}
338
339static void target_shutdown_sessions(struct se_node_acl *acl)
340{
341 struct se_session *sess;
342 unsigned long flags;
343
344restart:
345 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
346 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
347 if (sess->sess_tearing_down)
348 continue;
349
350 list_del_init(&sess->sess_acl_list);
351 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
352
353 if (acl->se_tpg->se_tpg_tfo->close_session)
354 acl->se_tpg->se_tpg_tfo->close_session(sess);
355 goto restart;
356 }
357 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
358}
359
360void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
361{
362 struct se_portal_group *tpg = acl->se_tpg;
363
364 mutex_lock(&tpg->acl_node_mutex);
365 if (acl->dynamic_node_acl)
366 acl->dynamic_node_acl = 0;
367 list_del_init(&acl->acl_list);
368 mutex_unlock(&tpg->acl_node_mutex);
369
370 target_shutdown_sessions(acl);
371
372 target_put_nacl(acl);
373
374
375
376
377 wait_for_completion(&acl->acl_free_comp);
378
379 core_tpg_wait_for_nacl_pr_ref(acl);
380 core_free_device_list_for_node(acl, tpg);
381
382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
383 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
385 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
386
387 kfree(acl);
388}
389
390
391
392
393
394int core_tpg_set_initiator_node_queue_depth(
395 struct se_node_acl *acl,
396 u32 queue_depth)
397{
398 struct se_portal_group *tpg = acl->se_tpg;
399
400
401
402
403
404
405 if (acl->queue_depth == queue_depth)
406 return 0;
407
408
409
410
411
412 target_set_nacl_queue_depth(tpg, acl, queue_depth);
413
414
415
416
417 target_shutdown_sessions(acl);
418
419 pr_debug("Successfully changed queue depth to: %d for Initiator"
420 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
421 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
422 tpg->se_tpg_tfo->tpg_get_tag(tpg));
423
424 return 0;
425}
426EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
427
428
429
430
431
432
433
434int core_tpg_set_initiator_node_tag(
435 struct se_portal_group *tpg,
436 struct se_node_acl *acl,
437 const char *new_tag)
438{
439 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
440 return -EINVAL;
441
442 if (!strncmp("NULL", new_tag, 4)) {
443 acl->acl_tag[0] = '\0';
444 return 0;
445 }
446
447 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
448}
449EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
450
451static void core_tpg_lun_ref_release(struct percpu_ref *ref)
452{
453 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
454
455 complete(&lun->lun_shutdown_comp);
456}
457
458
459int core_tpg_register(
460 struct se_wwn *se_wwn,
461 struct se_portal_group *se_tpg,
462 int proto_id)
463{
464 int ret;
465
466 if (!se_tpg)
467 return -EINVAL;
468
469
470
471
472
473
474
475
476
477
478 if (se_wwn)
479 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
480
481 if (!se_tpg->se_tpg_tfo) {
482 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
483 return -EINVAL;
484 }
485
486 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
487 se_tpg->proto_id = proto_id;
488 se_tpg->se_tpg_wwn = se_wwn;
489 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
490 INIT_LIST_HEAD(&se_tpg->acl_node_list);
491 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
492 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
493 spin_lock_init(&se_tpg->session_lock);
494 mutex_init(&se_tpg->tpg_lun_mutex);
495 mutex_init(&se_tpg->acl_node_mutex);
496
497 if (se_tpg->proto_id >= 0) {
498 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
499 if (IS_ERR(se_tpg->tpg_virt_lun0))
500 return PTR_ERR(se_tpg->tpg_virt_lun0);
501
502 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
503 true, g_lun0_dev);
504 if (ret < 0) {
505 kfree(se_tpg->tpg_virt_lun0);
506 return ret;
507 }
508 }
509
510 spin_lock_bh(&tpg_lock);
511 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
512 spin_unlock_bh(&tpg_lock);
513
514 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
515 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
516 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
517 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
518 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
519
520 return 0;
521}
522EXPORT_SYMBOL(core_tpg_register);
523
524int core_tpg_deregister(struct se_portal_group *se_tpg)
525{
526 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
527 struct se_node_acl *nacl, *nacl_tmp;
528 LIST_HEAD(node_list);
529
530 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
531 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
532 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
533 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
534
535 spin_lock_bh(&tpg_lock);
536 list_del(&se_tpg->se_tpg_node);
537 spin_unlock_bh(&tpg_lock);
538
539 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
540 cpu_relax();
541
542 mutex_lock(&se_tpg->acl_node_mutex);
543 list_splice_init(&se_tpg->acl_node_list, &node_list);
544 mutex_unlock(&se_tpg->acl_node_mutex);
545
546
547
548
549
550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
551 list_del_init(&nacl->acl_list);
552
553 core_tpg_wait_for_nacl_pr_ref(nacl);
554 core_free_device_list_for_node(nacl, se_tpg);
555 kfree(nacl);
556 }
557
558 if (se_tpg->proto_id >= 0) {
559 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
560 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
561 }
562
563 return 0;
564}
565EXPORT_SYMBOL(core_tpg_deregister);
566
567struct se_lun *core_tpg_alloc_lun(
568 struct se_portal_group *tpg,
569 u64 unpacked_lun)
570{
571 struct se_lun *lun;
572
573 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
574 if (!lun) {
575 pr_err("Unable to allocate se_lun memory\n");
576 return ERR_PTR(-ENOMEM);
577 }
578 lun->unpacked_lun = unpacked_lun;
579 atomic_set(&lun->lun_acl_count, 0);
580 init_completion(&lun->lun_ref_comp);
581 init_completion(&lun->lun_shutdown_comp);
582 INIT_LIST_HEAD(&lun->lun_deve_list);
583 INIT_LIST_HEAD(&lun->lun_dev_link);
584 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
585 spin_lock_init(&lun->lun_deve_lock);
586 mutex_init(&lun->lun_tg_pt_md_mutex);
587 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
588 spin_lock_init(&lun->lun_tg_pt_gp_lock);
589 lun->lun_tpg = tpg;
590
591 return lun;
592}
593
594int core_tpg_add_lun(
595 struct se_portal_group *tpg,
596 struct se_lun *lun,
597 bool lun_access_ro,
598 struct se_device *dev)
599{
600 int ret;
601
602 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
603 GFP_KERNEL);
604 if (ret < 0)
605 goto out;
606
607 ret = core_alloc_rtpi(lun, dev);
608 if (ret)
609 goto out_kill_ref;
610
611 if (!(dev->transport->transport_flags &
612 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
613 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
614 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
615
616 mutex_lock(&tpg->tpg_lun_mutex);
617
618 spin_lock(&dev->se_port_lock);
619 lun->lun_index = dev->dev_index;
620 rcu_assign_pointer(lun->lun_se_dev, dev);
621 dev->export_count++;
622 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
623 spin_unlock(&dev->se_port_lock);
624
625 if (dev->dev_flags & DF_READ_ONLY)
626 lun->lun_access_ro = true;
627 else
628 lun->lun_access_ro = lun_access_ro;
629 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
630 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
631 mutex_unlock(&tpg->tpg_lun_mutex);
632
633 return 0;
634
635out_kill_ref:
636 percpu_ref_exit(&lun->lun_ref);
637out:
638 return ret;
639}
640
641void core_tpg_remove_lun(
642 struct se_portal_group *tpg,
643 struct se_lun *lun)
644{
645
646
647
648
649 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
650
651 lun->lun_shutdown = true;
652
653 core_clear_lun_from_tpg(lun, tpg);
654
655
656
657
658
659
660 transport_clear_lun_ref(lun);
661
662 mutex_lock(&tpg->tpg_lun_mutex);
663 if (lun->lun_se_dev) {
664 target_detach_tg_pt_gp(lun);
665
666 spin_lock(&dev->se_port_lock);
667 list_del(&lun->lun_dev_link);
668 dev->export_count--;
669 rcu_assign_pointer(lun->lun_se_dev, NULL);
670 spin_unlock(&dev->se_port_lock);
671 }
672 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
673 hlist_del_rcu(&lun->link);
674
675 lun->lun_shutdown = false;
676 mutex_unlock(&tpg->tpg_lun_mutex);
677
678 percpu_ref_exit(&lun->lun_ref);
679}
680