1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/net.h>
27#include <linux/string.h>
28#include <linux/timer.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/in.h>
32#include <linux/export.h>
33#include <net/sock.h>
34#include <net/tcp.h>
35#include <scsi/scsi_proto.h>
36
37#include <target/target_core_base.h>
38#include <target/target_core_backend.h>
39#include <target/target_core_fabric.h>
40
41#include "target_core_internal.h"
42#include "target_core_alua.h"
43#include "target_core_pr.h"
44#include "target_core_ua.h"
45
46extern struct se_device *g_lun0_dev;
47
48
49
50
51
52struct se_node_acl *__core_tpg_get_initiator_node_acl(
53 struct se_portal_group *tpg,
54 const char *initiatorname)
55{
56 struct se_node_acl *acl;
57
58 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
59 if (!strcmp(acl->initiatorname, initiatorname))
60 return acl;
61 }
62
63 return NULL;
64}
65
66
67
68
69
70struct se_node_acl *core_tpg_get_initiator_node_acl(
71 struct se_portal_group *tpg,
72 unsigned char *initiatorname)
73{
74 struct se_node_acl *acl;
75
76
77
78
79
80
81
82
83
84 mutex_lock(&tpg->acl_node_mutex);
85 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
86 if (acl) {
87 if (!kref_get_unless_zero(&acl->acl_kref))
88 acl = NULL;
89 }
90 mutex_unlock(&tpg->acl_node_mutex);
91
92 return acl;
93}
94EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
95
96void core_allocate_nexus_loss_ua(
97 struct se_node_acl *nacl)
98{
99 struct se_dev_entry *deve;
100
101 if (!nacl)
102 return;
103
104 rcu_read_lock();
105 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
106 core_scsi3_ua_allocate(deve, 0x29,
107 ASCQ_29H_NEXUS_LOSS_OCCURRED);
108 rcu_read_unlock();
109}
110EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
111
112
113
114
115
116void core_tpg_add_node_to_devs(
117 struct se_node_acl *acl,
118 struct se_portal_group *tpg,
119 struct se_lun *lun_orig)
120{
121 bool lun_access_ro = true;
122 struct se_lun *lun;
123 struct se_device *dev;
124
125 mutex_lock(&tpg->tpg_lun_mutex);
126 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
127 if (lun_orig && lun != lun_orig)
128 continue;
129
130 dev = rcu_dereference_check(lun->lun_se_dev,
131 lockdep_is_held(&tpg->tpg_lun_mutex));
132
133
134
135
136 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
137 lun_access_ro = false;
138 } else {
139
140
141
142
143 if (dev->transport->get_device_type(dev) == TYPE_DISK)
144 lun_access_ro = true;
145 else
146 lun_access_ro = false;
147 }
148
149 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
150 " access for LUN in Demo Mode\n",
151 tpg->se_tpg_tfo->fabric_name,
152 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
153 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
154
155 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
156 lun_access_ro, acl, tpg);
157
158
159
160
161
162 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
163 lun->unpacked_lun);
164 }
165 mutex_unlock(&tpg->tpg_lun_mutex);
166}
167
168static void
169target_set_nacl_queue_depth(struct se_portal_group *tpg,
170 struct se_node_acl *acl, u32 queue_depth)
171{
172 acl->queue_depth = queue_depth;
173
174 if (!acl->queue_depth) {
175 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
176 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
177 acl->initiatorname);
178 acl->queue_depth = 1;
179 }
180}
181
182static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
183 const unsigned char *initiatorname)
184{
185 struct se_node_acl *acl;
186 u32 queue_depth;
187
188 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
189 GFP_KERNEL);
190 if (!acl)
191 return NULL;
192
193 INIT_LIST_HEAD(&acl->acl_list);
194 INIT_LIST_HEAD(&acl->acl_sess_list);
195 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
196 kref_init(&acl->acl_kref);
197 init_completion(&acl->acl_free_comp);
198 spin_lock_init(&acl->nacl_sess_lock);
199 mutex_init(&acl->lun_entry_mutex);
200 atomic_set(&acl->acl_pr_ref_count, 0);
201
202 if (tpg->se_tpg_tfo->tpg_get_default_depth)
203 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
204 else
205 queue_depth = 1;
206 target_set_nacl_queue_depth(tpg, acl, queue_depth);
207
208 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
209 acl->se_tpg = tpg;
210 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
211
212 tpg->se_tpg_tfo->set_default_node_attributes(acl);
213
214 return acl;
215}
216
217static void target_add_node_acl(struct se_node_acl *acl)
218{
219 struct se_portal_group *tpg = acl->se_tpg;
220
221 mutex_lock(&tpg->acl_node_mutex);
222 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
223 mutex_unlock(&tpg->acl_node_mutex);
224
225 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
226 " Initiator Node: %s\n",
227 tpg->se_tpg_tfo->fabric_name,
228 tpg->se_tpg_tfo->tpg_get_tag(tpg),
229 acl->dynamic_node_acl ? "DYNAMIC" : "",
230 acl->queue_depth,
231 tpg->se_tpg_tfo->fabric_name,
232 acl->initiatorname);
233}
234
235bool target_tpg_has_node_acl(struct se_portal_group *tpg,
236 const char *initiatorname)
237{
238 struct se_node_acl *acl;
239 bool found = false;
240
241 mutex_lock(&tpg->acl_node_mutex);
242 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
243 if (!strcmp(acl->initiatorname, initiatorname)) {
244 found = true;
245 break;
246 }
247 }
248 mutex_unlock(&tpg->acl_node_mutex);
249
250 return found;
251}
252EXPORT_SYMBOL(target_tpg_has_node_acl);
253
254struct se_node_acl *core_tpg_check_initiator_node_acl(
255 struct se_portal_group *tpg,
256 unsigned char *initiatorname)
257{
258 struct se_node_acl *acl;
259
260 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
261 if (acl)
262 return acl;
263
264 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
265 return NULL;
266
267 acl = target_alloc_node_acl(tpg, initiatorname);
268 if (!acl)
269 return NULL;
270
271
272
273
274
275
276
277
278 kref_get(&acl->acl_kref);
279 acl->dynamic_node_acl = 1;
280
281
282
283
284
285
286 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
287 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
288 core_tpg_add_node_to_devs(acl, tpg, NULL);
289
290 target_add_node_acl(acl);
291 return acl;
292}
293EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
294
295void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
296{
297 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
298 cpu_relax();
299}
300
301struct se_node_acl *core_tpg_add_initiator_node_acl(
302 struct se_portal_group *tpg,
303 const char *initiatorname)
304{
305 struct se_node_acl *acl;
306
307 mutex_lock(&tpg->acl_node_mutex);
308 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
309 if (acl) {
310 if (acl->dynamic_node_acl) {
311 acl->dynamic_node_acl = 0;
312 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
313 " for %s\n", tpg->se_tpg_tfo->fabric_name,
314 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
315 mutex_unlock(&tpg->acl_node_mutex);
316 return acl;
317 }
318
319 pr_err("ACL entry for %s Initiator"
320 " Node %s already exists for TPG %u, ignoring"
321 " request.\n", tpg->se_tpg_tfo->fabric_name,
322 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
323 mutex_unlock(&tpg->acl_node_mutex);
324 return ERR_PTR(-EEXIST);
325 }
326 mutex_unlock(&tpg->acl_node_mutex);
327
328 acl = target_alloc_node_acl(tpg, initiatorname);
329 if (!acl)
330 return ERR_PTR(-ENOMEM);
331
332 target_add_node_acl(acl);
333 return acl;
334}
335
336static void target_shutdown_sessions(struct se_node_acl *acl)
337{
338 struct se_session *sess;
339 unsigned long flags;
340
341restart:
342 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
343 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
344 if (sess->sess_tearing_down)
345 continue;
346
347 list_del_init(&sess->sess_acl_list);
348 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
349
350 if (acl->se_tpg->se_tpg_tfo->close_session)
351 acl->se_tpg->se_tpg_tfo->close_session(sess);
352 goto restart;
353 }
354 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
355}
356
357void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
358{
359 struct se_portal_group *tpg = acl->se_tpg;
360
361 mutex_lock(&tpg->acl_node_mutex);
362 if (acl->dynamic_node_acl)
363 acl->dynamic_node_acl = 0;
364 list_del_init(&acl->acl_list);
365 mutex_unlock(&tpg->acl_node_mutex);
366
367 target_shutdown_sessions(acl);
368
369 target_put_nacl(acl);
370
371
372
373
374 wait_for_completion(&acl->acl_free_comp);
375
376 core_tpg_wait_for_nacl_pr_ref(acl);
377 core_free_device_list_for_node(acl, tpg);
378
379 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
380 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
381 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
382 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
383
384 kfree(acl);
385}
386
387
388
389
390
391int core_tpg_set_initiator_node_queue_depth(
392 struct se_node_acl *acl,
393 u32 queue_depth)
394{
395 struct se_portal_group *tpg = acl->se_tpg;
396
397
398
399
400
401
402 if (acl->queue_depth == queue_depth)
403 return 0;
404
405
406
407
408
409 target_set_nacl_queue_depth(tpg, acl, queue_depth);
410
411
412
413
414 target_shutdown_sessions(acl);
415
416 pr_debug("Successfully changed queue depth to: %d for Initiator"
417 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
418 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
419 tpg->se_tpg_tfo->tpg_get_tag(tpg));
420
421 return 0;
422}
423EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
424
425
426
427
428
429
430
431int core_tpg_set_initiator_node_tag(
432 struct se_portal_group *tpg,
433 struct se_node_acl *acl,
434 const char *new_tag)
435{
436 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
437 return -EINVAL;
438
439 if (!strncmp("NULL", new_tag, 4)) {
440 acl->acl_tag[0] = '\0';
441 return 0;
442 }
443
444 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
445}
446EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
447
448static void core_tpg_lun_ref_release(struct percpu_ref *ref)
449{
450 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
451
452 complete(&lun->lun_shutdown_comp);
453}
454
455
456int core_tpg_register(
457 struct se_wwn *se_wwn,
458 struct se_portal_group *se_tpg,
459 int proto_id)
460{
461 int ret;
462
463 if (!se_tpg)
464 return -EINVAL;
465
466
467
468
469
470
471
472
473
474
475 if (se_wwn)
476 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
477
478 if (!se_tpg->se_tpg_tfo) {
479 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
480 return -EINVAL;
481 }
482
483 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
484 se_tpg->proto_id = proto_id;
485 se_tpg->se_tpg_wwn = se_wwn;
486 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
487 INIT_LIST_HEAD(&se_tpg->acl_node_list);
488 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
489 spin_lock_init(&se_tpg->session_lock);
490 mutex_init(&se_tpg->tpg_lun_mutex);
491 mutex_init(&se_tpg->acl_node_mutex);
492
493 if (se_tpg->proto_id >= 0) {
494 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
495 if (IS_ERR(se_tpg->tpg_virt_lun0))
496 return PTR_ERR(se_tpg->tpg_virt_lun0);
497
498 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
499 true, g_lun0_dev);
500 if (ret < 0) {
501 kfree(se_tpg->tpg_virt_lun0);
502 return ret;
503 }
504 }
505
506 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
507 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
508 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
509 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
510 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
511
512 return 0;
513}
514EXPORT_SYMBOL(core_tpg_register);
515
516int core_tpg_deregister(struct se_portal_group *se_tpg)
517{
518 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
519 struct se_node_acl *nacl, *nacl_tmp;
520 LIST_HEAD(node_list);
521
522 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
523 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
524 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
525 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
526
527 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
528 cpu_relax();
529
530 mutex_lock(&se_tpg->acl_node_mutex);
531 list_splice_init(&se_tpg->acl_node_list, &node_list);
532 mutex_unlock(&se_tpg->acl_node_mutex);
533
534
535
536
537
538 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
539 list_del_init(&nacl->acl_list);
540
541 core_tpg_wait_for_nacl_pr_ref(nacl);
542 core_free_device_list_for_node(nacl, se_tpg);
543 kfree(nacl);
544 }
545
546 if (se_tpg->proto_id >= 0) {
547 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
548 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
549 }
550
551 return 0;
552}
553EXPORT_SYMBOL(core_tpg_deregister);
554
555struct se_lun *core_tpg_alloc_lun(
556 struct se_portal_group *tpg,
557 u64 unpacked_lun)
558{
559 struct se_lun *lun;
560
561 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
562 if (!lun) {
563 pr_err("Unable to allocate se_lun memory\n");
564 return ERR_PTR(-ENOMEM);
565 }
566 lun->unpacked_lun = unpacked_lun;
567 atomic_set(&lun->lun_acl_count, 0);
568 init_completion(&lun->lun_shutdown_comp);
569 INIT_LIST_HEAD(&lun->lun_deve_list);
570 INIT_LIST_HEAD(&lun->lun_dev_link);
571 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
572 spin_lock_init(&lun->lun_deve_lock);
573 mutex_init(&lun->lun_tg_pt_md_mutex);
574 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
575 spin_lock_init(&lun->lun_tg_pt_gp_lock);
576 lun->lun_tpg = tpg;
577
578 return lun;
579}
580
581int core_tpg_add_lun(
582 struct se_portal_group *tpg,
583 struct se_lun *lun,
584 bool lun_access_ro,
585 struct se_device *dev)
586{
587 int ret;
588
589 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
590 GFP_KERNEL);
591 if (ret < 0)
592 goto out;
593
594 ret = core_alloc_rtpi(lun, dev);
595 if (ret)
596 goto out_kill_ref;
597
598 if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
599 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
600 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
601
602 mutex_lock(&tpg->tpg_lun_mutex);
603
604 spin_lock(&dev->se_port_lock);
605 lun->lun_index = dev->dev_index;
606 rcu_assign_pointer(lun->lun_se_dev, dev);
607 dev->export_count++;
608 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
609 spin_unlock(&dev->se_port_lock);
610
611 if (dev->dev_flags & DF_READ_ONLY)
612 lun->lun_access_ro = true;
613 else
614 lun->lun_access_ro = lun_access_ro;
615 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
616 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
617 mutex_unlock(&tpg->tpg_lun_mutex);
618
619 return 0;
620
621out_kill_ref:
622 percpu_ref_exit(&lun->lun_ref);
623out:
624 return ret;
625}
626
627void core_tpg_remove_lun(
628 struct se_portal_group *tpg,
629 struct se_lun *lun)
630{
631
632
633
634
635 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
636
637 lun->lun_shutdown = true;
638
639 core_clear_lun_from_tpg(lun, tpg);
640
641
642
643
644
645
646 transport_clear_lun_ref(lun);
647
648 mutex_lock(&tpg->tpg_lun_mutex);
649 if (lun->lun_se_dev) {
650 target_detach_tg_pt_gp(lun);
651
652 spin_lock(&dev->se_port_lock);
653 list_del(&lun->lun_dev_link);
654 dev->export_count--;
655 rcu_assign_pointer(lun->lun_se_dev, NULL);
656 spin_unlock(&dev->se_port_lock);
657 }
658 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
659 hlist_del_rcu(&lun->link);
660
661 lun->lun_shutdown = false;
662 mutex_unlock(&tpg->tpg_lun_mutex);
663
664 percpu_ref_exit(&lun->lun_ref);
665}
666