1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/configfs.h>
16#include <linux/delay.h>
17#include <linux/export.h>
18#include <linux/fcntl.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <scsi/scsi_proto.h>
22#include <asm/unaligned.h>
23
24#include <target/target_core_base.h>
25#include <target/target_core_backend.h>
26#include <target/target_core_fabric.h>
27
28#include "target_core_internal.h"
29#include "target_core_alua.h"
30#include "target_core_ua.h"
31
32static sense_reason_t core_alua_check_transition(int state, int valid,
33 int *primary, int explicit);
34static int core_alua_set_tg_pt_secondary_state(
35 struct se_lun *lun, int explicit, int offline);
36
37static char *core_alua_dump_state(int state);
38
39static void __target_attach_tg_pt_gp(struct se_lun *lun,
40 struct t10_alua_tg_pt_gp *tg_pt_gp);
41
42static u16 alua_lu_gps_counter;
43static u32 alua_lu_gps_count;
44
45static DEFINE_SPINLOCK(lu_gps_lock);
46static LIST_HEAD(lu_gps_list);
47
48struct t10_alua_lu_gp *default_lu_gp;
49
50
51
52
53
54
55sense_reason_t
56target_emulate_report_referrals(struct se_cmd *cmd)
57{
58 struct se_device *dev = cmd->se_dev;
59 struct t10_alua_lba_map *map;
60 struct t10_alua_lba_map_member *map_mem;
61 unsigned char *buf;
62 u32 rd_len = 0, off;
63
64 if (cmd->data_length < 4) {
65 pr_warn("REPORT REFERRALS allocation length %u too"
66 " small\n", cmd->data_length);
67 return TCM_INVALID_CDB_FIELD;
68 }
69
70 buf = transport_kmap_data_sg(cmd);
71 if (!buf)
72 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
73
74 off = 4;
75 spin_lock(&dev->t10_alua.lba_map_lock);
76 if (list_empty(&dev->t10_alua.lba_map_list)) {
77 spin_unlock(&dev->t10_alua.lba_map_lock);
78 transport_kunmap_data_sg(cmd);
79
80 return TCM_UNSUPPORTED_SCSI_OPCODE;
81 }
82
83 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
84 lba_map_list) {
85 int desc_num = off + 3;
86 int pg_num;
87
88 off += 4;
89 if (cmd->data_length > off)
90 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
91 off += 8;
92 if (cmd->data_length > off)
93 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
94 off += 8;
95 rd_len += 20;
96 pg_num = 0;
97 list_for_each_entry(map_mem, &map->lba_map_mem_list,
98 lba_map_mem_list) {
99 int alua_state = map_mem->lba_map_mem_alua_state;
100 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
101
102 if (cmd->data_length > off)
103 buf[off] = alua_state & 0x0f;
104 off += 2;
105 if (cmd->data_length > off)
106 buf[off] = (alua_pg_id >> 8) & 0xff;
107 off++;
108 if (cmd->data_length > off)
109 buf[off] = (alua_pg_id & 0xff);
110 off++;
111 rd_len += 4;
112 pg_num++;
113 }
114 if (cmd->data_length > desc_num)
115 buf[desc_num] = pg_num;
116 }
117 spin_unlock(&dev->t10_alua.lba_map_lock);
118
119
120
121
122 put_unaligned_be16(rd_len, &buf[2]);
123
124 transport_kunmap_data_sg(cmd);
125
126 target_complete_cmd(cmd, GOOD);
127 return 0;
128}
129
130
131
132
133
134
135sense_reason_t
136target_emulate_report_target_port_groups(struct se_cmd *cmd)
137{
138 struct se_device *dev = cmd->se_dev;
139 struct t10_alua_tg_pt_gp *tg_pt_gp;
140 struct se_lun *lun;
141 unsigned char *buf;
142 u32 rd_len = 0, off;
143 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
144
145
146
147
148
149 if (ext_hdr != 0)
150 off = 8;
151 else
152 off = 4;
153
154 if (cmd->data_length < off) {
155 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
156 " small for %s header\n", cmd->data_length,
157 (ext_hdr) ? "extended" : "normal");
158 return TCM_INVALID_CDB_FIELD;
159 }
160 buf = transport_kmap_data_sg(cmd);
161 if (!buf)
162 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
163
164 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
165 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
166 tg_pt_gp_list) {
167
168
169
170
171
172
173 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
174 cmd->data_length) {
175 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
176 continue;
177 }
178
179
180
181
182 if (tg_pt_gp->tg_pt_gp_pref)
183 buf[off] = 0x80;
184
185
186
187 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
188
189
190
191 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
192
193
194
195 put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
196 off += 2;
197
198 off++;
199
200
201
202 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
203
204
205
206 buf[off++] = 0x00;
207
208
209
210 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
211 rd_len += 8;
212
213 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
214 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
215 lun_tg_pt_gp_link) {
216
217
218
219
220
221 off += 2;
222
223
224
225 put_unaligned_be16(lun->lun_rtpi, &buf[off]);
226 off += 2;
227 rd_len += 4;
228 }
229 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
230 }
231 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
232
233
234
235 put_unaligned_be32(rd_len, &buf[0]);
236
237
238
239
240 if (ext_hdr != 0) {
241 buf[4] = 0x10;
242
243
244
245
246
247
248
249
250 spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
251 tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
252 if (tg_pt_gp)
253 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
254 spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
255 }
256 transport_kunmap_data_sg(cmd);
257
258 target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
259 return 0;
260}
261
262
263
264
265
266
267sense_reason_t
268target_emulate_set_target_port_groups(struct se_cmd *cmd)
269{
270 struct se_device *dev = cmd->se_dev;
271 struct se_lun *l_lun = cmd->se_lun;
272 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
273 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
274 unsigned char *buf;
275 unsigned char *ptr;
276 sense_reason_t rc = TCM_NO_SENSE;
277 u32 len = 4;
278 int alua_access_state, primary = 0, valid_states;
279 u16 tg_pt_id, rtpi;
280
281 if (cmd->data_length < 4) {
282 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
283 " small\n", cmd->data_length);
284 return TCM_INVALID_PARAMETER_LIST;
285 }
286
287 buf = transport_kmap_data_sg(cmd);
288 if (!buf)
289 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
290
291
292
293
294
295 spin_lock(&l_lun->lun_tg_pt_gp_lock);
296 l_tg_pt_gp = l_lun->lun_tg_pt_gp;
297 if (!l_tg_pt_gp) {
298 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
299 pr_err("Unable to access l_lun->tg_pt_gp\n");
300 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
301 goto out;
302 }
303
304 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
305 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
306 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
307 " while TPGS_EXPLICIT_ALUA is disabled\n");
308 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
309 goto out;
310 }
311 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
312 spin_unlock(&l_lun->lun_tg_pt_gp_lock);
313
314 ptr = &buf[4];
315
316 while (len < cmd->data_length) {
317 bool found = false;
318 alua_access_state = (ptr[0] & 0x0f);
319
320
321
322
323
324 rc = core_alua_check_transition(alua_access_state, valid_states,
325 &primary, 1);
326 if (rc) {
327
328
329
330
331
332
333
334
335
336
337 goto out;
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353 if (primary) {
354 tg_pt_id = get_unaligned_be16(ptr + 2);
355
356
357
358
359 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
360 list_for_each_entry(tg_pt_gp,
361 &dev->t10_alua.tg_pt_gps_list,
362 tg_pt_gp_list) {
363 if (!tg_pt_gp->tg_pt_gp_valid_id)
364 continue;
365
366 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
367 continue;
368
369 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
370
371 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
372
373 if (!core_alua_do_port_transition(tg_pt_gp,
374 dev, l_lun, nacl,
375 alua_access_state, 1))
376 found = true;
377
378 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
379 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
380 break;
381 }
382 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
383 } else {
384 struct se_lun *lun;
385
386
387
388
389
390
391 rtpi = get_unaligned_be16(ptr + 2);
392
393
394
395
396 spin_lock(&dev->se_port_lock);
397 list_for_each_entry(lun, &dev->dev_sep_list,
398 lun_dev_link) {
399 if (lun->lun_rtpi != rtpi)
400 continue;
401
402
403 spin_unlock(&dev->se_port_lock);
404
405 if (!core_alua_set_tg_pt_secondary_state(
406 lun, 1, 1))
407 found = true;
408
409 spin_lock(&dev->se_port_lock);
410 break;
411 }
412 spin_unlock(&dev->se_port_lock);
413 }
414
415 if (!found) {
416 rc = TCM_INVALID_PARAMETER_LIST;
417 goto out;
418 }
419
420 ptr += 4;
421 len += 4;
422 }
423
424out:
425 transport_kunmap_data_sg(cmd);
426 if (!rc)
427 target_complete_cmd(cmd, GOOD);
428 return rc;
429}
430
431static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
432{
433
434
435
436
437
438 pr_debug("[%s]: ALUA TG Port not available, "
439 "SenseKey: NOT_READY, ASC/ASCQ: "
440 "0x04/0x%02x\n",
441 cmd->se_tfo->fabric_name, alua_ascq);
442
443 cmd->scsi_asc = 0x04;
444 cmd->scsi_ascq = alua_ascq;
445}
446
447static inline void core_alua_state_nonoptimized(
448 struct se_cmd *cmd,
449 unsigned char *cdb,
450 int nonop_delay_msecs)
451{
452
453
454
455
456
457 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
458 cmd->alua_nonop_delay = nonop_delay_msecs;
459}
460
461static inline int core_alua_state_lba_dependent(
462 struct se_cmd *cmd,
463 struct t10_alua_tg_pt_gp *tg_pt_gp)
464{
465 struct se_device *dev = cmd->se_dev;
466 u64 segment_size, segment_mult, sectors, lba;
467
468
469 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
470 return 0;
471
472 spin_lock(&dev->t10_alua.lba_map_lock);
473 segment_size = dev->t10_alua.lba_map_segment_size;
474 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
475 sectors = cmd->data_length / dev->dev_attrib.block_size;
476
477 lba = cmd->t_task_lba;
478 while (lba < cmd->t_task_lba + sectors) {
479 struct t10_alua_lba_map *cur_map = NULL, *map;
480 struct t10_alua_lba_map_member *map_mem;
481
482 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
483 lba_map_list) {
484 u64 start_lba, last_lba;
485 u64 first_lba = map->lba_map_first_lba;
486
487 if (segment_mult) {
488 u64 tmp = lba;
489 start_lba = do_div(tmp, segment_size * segment_mult);
490
491 last_lba = first_lba + segment_size - 1;
492 if (start_lba >= first_lba &&
493 start_lba <= last_lba) {
494 lba += segment_size;
495 cur_map = map;
496 break;
497 }
498 } else {
499 last_lba = map->lba_map_last_lba;
500 if (lba >= first_lba && lba <= last_lba) {
501 lba = last_lba + 1;
502 cur_map = map;
503 break;
504 }
505 }
506 }
507 if (!cur_map) {
508 spin_unlock(&dev->t10_alua.lba_map_lock);
509 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
510 return 1;
511 }
512 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
513 lba_map_mem_list) {
514 if (map_mem->lba_map_mem_alua_pg_id !=
515 tg_pt_gp->tg_pt_gp_id)
516 continue;
517 switch(map_mem->lba_map_mem_alua_state) {
518 case ALUA_ACCESS_STATE_STANDBY:
519 spin_unlock(&dev->t10_alua.lba_map_lock);
520 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
521 return 1;
522 case ALUA_ACCESS_STATE_UNAVAILABLE:
523 spin_unlock(&dev->t10_alua.lba_map_lock);
524 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
525 return 1;
526 default:
527 break;
528 }
529 }
530 }
531 spin_unlock(&dev->t10_alua.lba_map_lock);
532 return 0;
533}
534
535static inline int core_alua_state_standby(
536 struct se_cmd *cmd,
537 unsigned char *cdb)
538{
539
540
541
542
543 switch (cdb[0]) {
544 case INQUIRY:
545 case LOG_SELECT:
546 case LOG_SENSE:
547 case MODE_SELECT:
548 case MODE_SENSE:
549 case REPORT_LUNS:
550 case RECEIVE_DIAGNOSTIC:
551 case SEND_DIAGNOSTIC:
552 case READ_CAPACITY:
553 return 0;
554 case SERVICE_ACTION_IN_16:
555 switch (cdb[1] & 0x1f) {
556 case SAI_READ_CAPACITY_16:
557 return 0;
558 default:
559 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
560 return 1;
561 }
562 case MAINTENANCE_IN:
563 switch (cdb[1] & 0x1f) {
564 case MI_REPORT_TARGET_PGS:
565 return 0;
566 default:
567 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
568 return 1;
569 }
570 case MAINTENANCE_OUT:
571 switch (cdb[1]) {
572 case MO_SET_TARGET_PGS:
573 return 0;
574 default:
575 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
576 return 1;
577 }
578 case REQUEST_SENSE:
579 case PERSISTENT_RESERVE_IN:
580 case PERSISTENT_RESERVE_OUT:
581 case READ_BUFFER:
582 case WRITE_BUFFER:
583 return 0;
584 default:
585 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
586 return 1;
587 }
588
589 return 0;
590}
591
592static inline int core_alua_state_unavailable(
593 struct se_cmd *cmd,
594 unsigned char *cdb)
595{
596
597
598
599
600 switch (cdb[0]) {
601 case INQUIRY:
602 case REPORT_LUNS:
603 return 0;
604 case MAINTENANCE_IN:
605 switch (cdb[1] & 0x1f) {
606 case MI_REPORT_TARGET_PGS:
607 return 0;
608 default:
609 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
610 return 1;
611 }
612 case MAINTENANCE_OUT:
613 switch (cdb[1]) {
614 case MO_SET_TARGET_PGS:
615 return 0;
616 default:
617 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
618 return 1;
619 }
620 case REQUEST_SENSE:
621 case READ_BUFFER:
622 case WRITE_BUFFER:
623 return 0;
624 default:
625 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
626 return 1;
627 }
628
629 return 0;
630}
631
632static inline int core_alua_state_transition(
633 struct se_cmd *cmd,
634 unsigned char *cdb)
635{
636
637
638
639
640 switch (cdb[0]) {
641 case INQUIRY:
642 case REPORT_LUNS:
643 return 0;
644 case MAINTENANCE_IN:
645 switch (cdb[1] & 0x1f) {
646 case MI_REPORT_TARGET_PGS:
647 return 0;
648 default:
649 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
650 return 1;
651 }
652 case REQUEST_SENSE:
653 case READ_BUFFER:
654 case WRITE_BUFFER:
655 return 0;
656 default:
657 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
658 return 1;
659 }
660
661 return 0;
662}
663
664
665
666
667
668
669sense_reason_t
670target_alua_state_check(struct se_cmd *cmd)
671{
672 struct se_device *dev = cmd->se_dev;
673 unsigned char *cdb = cmd->t_task_cdb;
674 struct se_lun *lun = cmd->se_lun;
675 struct t10_alua_tg_pt_gp *tg_pt_gp;
676 int out_alua_state, nonop_delay_msecs;
677
678 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
679 return 0;
680 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
681 return 0;
682
683
684
685
686
687 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
688 pr_debug("ALUA: Got secondary offline status for local"
689 " target port\n");
690 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
691 return TCM_CHECK_CONDITION_NOT_READY;
692 }
693
694 if (!lun->lun_tg_pt_gp)
695 return 0;
696
697 spin_lock(&lun->lun_tg_pt_gp_lock);
698 tg_pt_gp = lun->lun_tg_pt_gp;
699 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
700 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
701
702
703 spin_unlock(&lun->lun_tg_pt_gp_lock);
704
705
706
707
708
709
710 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
711 return 0;
712
713 switch (out_alua_state) {
714 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
715 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
716 break;
717 case ALUA_ACCESS_STATE_STANDBY:
718 if (core_alua_state_standby(cmd, cdb))
719 return TCM_CHECK_CONDITION_NOT_READY;
720 break;
721 case ALUA_ACCESS_STATE_UNAVAILABLE:
722 if (core_alua_state_unavailable(cmd, cdb))
723 return TCM_CHECK_CONDITION_NOT_READY;
724 break;
725 case ALUA_ACCESS_STATE_TRANSITION:
726 if (core_alua_state_transition(cmd, cdb))
727 return TCM_CHECK_CONDITION_NOT_READY;
728 break;
729 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
730 if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
731 return TCM_CHECK_CONDITION_NOT_READY;
732 break;
733
734
735
736
737 case ALUA_ACCESS_STATE_OFFLINE:
738 default:
739 pr_err("Unknown ALUA access state: 0x%02x\n",
740 out_alua_state);
741 return TCM_INVALID_CDB_FIELD;
742 }
743
744 return 0;
745}
746
747
748
749
750static sense_reason_t
751core_alua_check_transition(int state, int valid, int *primary, int explicit)
752{
753
754
755
756
757 switch (state) {
758 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
759 if (!(valid & ALUA_AO_SUP))
760 goto not_supported;
761 *primary = 1;
762 break;
763 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
764 if (!(valid & ALUA_AN_SUP))
765 goto not_supported;
766 *primary = 1;
767 break;
768 case ALUA_ACCESS_STATE_STANDBY:
769 if (!(valid & ALUA_S_SUP))
770 goto not_supported;
771 *primary = 1;
772 break;
773 case ALUA_ACCESS_STATE_UNAVAILABLE:
774 if (!(valid & ALUA_U_SUP))
775 goto not_supported;
776 *primary = 1;
777 break;
778 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
779 if (!(valid & ALUA_LBD_SUP))
780 goto not_supported;
781 *primary = 1;
782 break;
783 case ALUA_ACCESS_STATE_OFFLINE:
784
785
786
787
788 if (!(valid & ALUA_O_SUP))
789 goto not_supported;
790 *primary = 0;
791 break;
792 case ALUA_ACCESS_STATE_TRANSITION:
793 if (!(valid & ALUA_T_SUP) || explicit)
794
795
796
797
798 goto not_supported;
799 *primary = 0;
800 break;
801 default:
802 pr_err("Unknown ALUA access state: 0x%02x\n", state);
803 return TCM_INVALID_PARAMETER_LIST;
804 }
805
806 return 0;
807
808not_supported:
809 pr_err("ALUA access state %s not supported",
810 core_alua_dump_state(state));
811 return TCM_INVALID_PARAMETER_LIST;
812}
813
814static char *core_alua_dump_state(int state)
815{
816 switch (state) {
817 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
818 return "Active/Optimized";
819 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
820 return "Active/NonOptimized";
821 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
822 return "LBA Dependent";
823 case ALUA_ACCESS_STATE_STANDBY:
824 return "Standby";
825 case ALUA_ACCESS_STATE_UNAVAILABLE:
826 return "Unavailable";
827 case ALUA_ACCESS_STATE_OFFLINE:
828 return "Offline";
829 case ALUA_ACCESS_STATE_TRANSITION:
830 return "Transitioning";
831 default:
832 return "Unknown";
833 }
834
835 return NULL;
836}
837
838char *core_alua_dump_status(int status)
839{
840 switch (status) {
841 case ALUA_STATUS_NONE:
842 return "None";
843 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
844 return "Altered by Explicit STPG";
845 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
846 return "Altered by Implicit ALUA";
847 default:
848 return "Unknown";
849 }
850
851 return NULL;
852}
853
854
855
856
857
858int core_alua_check_nonop_delay(
859 struct se_cmd *cmd)
860{
861 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
862 return 0;
863 if (in_interrupt())
864 return 0;
865
866
867
868
869 if (!cmd->alua_nonop_delay)
870 return 0;
871
872
873
874
875 msleep_interruptible(cmd->alua_nonop_delay);
876 return 0;
877}
878EXPORT_SYMBOL(core_alua_check_nonop_delay);
879
880static int core_alua_write_tpg_metadata(
881 const char *path,
882 unsigned char *md_buf,
883 u32 md_buf_len)
884{
885 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
886 loff_t pos = 0;
887 int ret;
888
889 if (IS_ERR(file)) {
890 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
891 return -ENODEV;
892 }
893 ret = kernel_write(file, md_buf, md_buf_len, &pos);
894 if (ret < 0)
895 pr_err("Error writing ALUA metadata file: %s\n", path);
896 fput(file);
897 return (ret < 0) ? -EIO : 0;
898}
899
900static int core_alua_update_tpg_primary_metadata(
901 struct t10_alua_tg_pt_gp *tg_pt_gp)
902{
903 unsigned char *md_buf;
904 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
905 char *path;
906 int len, rc;
907
908 lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
909
910 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
911 if (!md_buf) {
912 pr_err("Unable to allocate buf for ALUA metadata\n");
913 return -ENOMEM;
914 }
915
916 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
917 "tg_pt_gp_id=%hu\n"
918 "alua_access_state=0x%02x\n"
919 "alua_access_status=0x%02x\n",
920 tg_pt_gp->tg_pt_gp_id,
921 tg_pt_gp->tg_pt_gp_alua_access_state,
922 tg_pt_gp->tg_pt_gp_alua_access_status);
923
924 rc = -ENOMEM;
925 path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
926 &wwn->unit_serial[0],
927 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
928 if (path) {
929 rc = core_alua_write_tpg_metadata(path, md_buf, len);
930 kfree(path);
931 }
932 kfree(md_buf);
933 return rc;
934}
935
936static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
937{
938 struct se_dev_entry *se_deve;
939 struct se_lun *lun;
940 struct se_lun_acl *lacl;
941
942 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
943 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
944 lun_tg_pt_gp_link) {
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 if (!percpu_ref_tryget_live(&lun->lun_ref))
960 continue;
961 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
962
963 spin_lock(&lun->lun_deve_lock);
964 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
965 lacl = rcu_dereference_check(se_deve->se_lun_acl,
966 lockdep_is_held(&lun->lun_deve_lock));
967
968
969
970
971
972
973
974
975
976
977
978 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
979 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
980 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
981 (tg_pt_gp->tg_pt_gp_alua_lun == lun))
982 continue;
983
984
985
986
987
988 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
989 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
990 continue;
991
992 core_scsi3_ua_allocate(se_deve, 0x2A,
993 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
994 }
995 spin_unlock(&lun->lun_deve_lock);
996
997 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
998 percpu_ref_put(&lun->lun_ref);
999 }
1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1001}
1002
1003static int core_alua_do_transition_tg_pt(
1004 struct t10_alua_tg_pt_gp *tg_pt_gp,
1005 int new_state,
1006 int explicit)
1007{
1008 int prev_state;
1009
1010 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1011
1012 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1013 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1014 return 0;
1015 }
1016
1017 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1018 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1019 return -EAGAIN;
1020 }
1021
1022
1023
1024
1025
1026 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1027 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1028 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1029 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1030 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1031
1032 core_alua_queue_state_change_ua(tg_pt_gp);
1033
1034 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1035 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1036 return 0;
1037 }
1038
1039
1040
1041
1042 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1043 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1044
1045
1046
1047
1048 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1063 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1064 }
1065
1066 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1067 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1068 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1069 tg_pt_gp->tg_pt_gp_id,
1070 core_alua_dump_state(prev_state),
1071 core_alua_dump_state(new_state));
1072
1073 core_alua_queue_state_change_ua(tg_pt_gp);
1074
1075 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1076 return 0;
1077}
1078
1079int core_alua_do_port_transition(
1080 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1081 struct se_device *l_dev,
1082 struct se_lun *l_lun,
1083 struct se_node_acl *l_nacl,
1084 int new_state,
1085 int explicit)
1086{
1087 struct se_device *dev;
1088 struct t10_alua_lu_gp *lu_gp;
1089 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1090 struct t10_alua_tg_pt_gp *tg_pt_gp;
1091 int primary, valid_states, rc = 0;
1092
1093 if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1094 return -ENODEV;
1095
1096 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1097 if (core_alua_check_transition(new_state, valid_states, &primary,
1098 explicit) != 0)
1099 return -EINVAL;
1100
1101 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1102 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1103 lu_gp = local_lu_gp_mem->lu_gp;
1104 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1105 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1106
1107
1108
1109
1110
1111 if (!lu_gp->lu_gp_id) {
1112
1113
1114
1115
1116 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1117 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1118 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1119 new_state, explicit);
1120 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1121 return rc;
1122 }
1123
1124
1125
1126
1127
1128 spin_lock(&lu_gp->lu_gp_lock);
1129 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1130 lu_gp_mem_list) {
1131
1132 dev = lu_gp_mem->lu_gp_mem_dev;
1133 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1134 spin_unlock(&lu_gp->lu_gp_lock);
1135
1136 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1137 list_for_each_entry(tg_pt_gp,
1138 &dev->t10_alua.tg_pt_gps_list,
1139 tg_pt_gp_list) {
1140
1141 if (!tg_pt_gp->tg_pt_gp_valid_id)
1142 continue;
1143
1144
1145
1146
1147
1148
1149
1150
1151 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1152 continue;
1153
1154 if (l_tg_pt_gp == tg_pt_gp) {
1155 tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1156 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1157 } else {
1158 tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1159 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1160 }
1161 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1162 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1163
1164
1165
1166
1167 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1168 new_state, explicit);
1169
1170 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1171 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1172 if (rc)
1173 break;
1174 }
1175 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1176
1177 spin_lock(&lu_gp->lu_gp_lock);
1178 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1179 }
1180 spin_unlock(&lu_gp->lu_gp_lock);
1181
1182 if (!rc) {
1183 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1184 " Group IDs: %hu %s transition to primary state: %s\n",
1185 config_item_name(&lu_gp->lu_gp_group.cg_item),
1186 l_tg_pt_gp->tg_pt_gp_id,
1187 (explicit) ? "explicit" : "implicit",
1188 core_alua_dump_state(new_state));
1189 }
1190
1191 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1192 return rc;
1193}
1194
1195static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1196{
1197 struct se_portal_group *se_tpg = lun->lun_tpg;
1198 unsigned char *md_buf;
1199 char *path;
1200 int len, rc;
1201
1202 mutex_lock(&lun->lun_tg_pt_md_mutex);
1203
1204 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1205 if (!md_buf) {
1206 pr_err("Unable to allocate buf for ALUA metadata\n");
1207 rc = -ENOMEM;
1208 goto out_unlock;
1209 }
1210
1211 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1212 "alua_tg_pt_status=0x%02x\n",
1213 atomic_read(&lun->lun_tg_pt_secondary_offline),
1214 lun->lun_tg_pt_secondary_stat);
1215
1216 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1217 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1218 db_root, se_tpg->se_tpg_tfo->fabric_name,
1219 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1220 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1221 lun->unpacked_lun);
1222 } else {
1223 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1224 db_root, se_tpg->se_tpg_tfo->fabric_name,
1225 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1226 lun->unpacked_lun);
1227 }
1228 if (!path) {
1229 rc = -ENOMEM;
1230 goto out_free;
1231 }
1232
1233 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1234 kfree(path);
1235out_free:
1236 kfree(md_buf);
1237out_unlock:
1238 mutex_unlock(&lun->lun_tg_pt_md_mutex);
1239 return rc;
1240}
1241
1242static int core_alua_set_tg_pt_secondary_state(
1243 struct se_lun *lun,
1244 int explicit,
1245 int offline)
1246{
1247 struct t10_alua_tg_pt_gp *tg_pt_gp;
1248 int trans_delay_msecs;
1249
1250 spin_lock(&lun->lun_tg_pt_gp_lock);
1251 tg_pt_gp = lun->lun_tg_pt_gp;
1252 if (!tg_pt_gp) {
1253 spin_unlock(&lun->lun_tg_pt_gp_lock);
1254 pr_err("Unable to complete secondary state"
1255 " transition\n");
1256 return -EINVAL;
1257 }
1258 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1259
1260
1261
1262
1263 if (offline)
1264 atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1265 else
1266 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1267
1268 lun->lun_tg_pt_secondary_stat = (explicit) ?
1269 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1270 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1271
1272 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1273 " to secondary access state: %s\n", (explicit) ? "explicit" :
1274 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1275 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1276
1277 spin_unlock(&lun->lun_tg_pt_gp_lock);
1278
1279
1280
1281
1282 if (trans_delay_msecs != 0)
1283 msleep_interruptible(trans_delay_msecs);
1284
1285
1286
1287
1288 if (lun->lun_tg_pt_secondary_write_md)
1289 core_alua_update_tpg_secondary_metadata(lun);
1290
1291 return 0;
1292}
1293
1294struct t10_alua_lba_map *
1295core_alua_allocate_lba_map(struct list_head *list,
1296 u64 first_lba, u64 last_lba)
1297{
1298 struct t10_alua_lba_map *lba_map;
1299
1300 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1301 if (!lba_map) {
1302 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1303 return ERR_PTR(-ENOMEM);
1304 }
1305 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1306 lba_map->lba_map_first_lba = first_lba;
1307 lba_map->lba_map_last_lba = last_lba;
1308
1309 list_add_tail(&lba_map->lba_map_list, list);
1310 return lba_map;
1311}
1312
1313int
1314core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1315 int pg_id, int state)
1316{
1317 struct t10_alua_lba_map_member *lba_map_mem;
1318
1319 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1320 lba_map_mem_list) {
1321 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1322 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1323 return -EINVAL;
1324 }
1325 }
1326
1327 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1328 if (!lba_map_mem) {
1329 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1330 return -ENOMEM;
1331 }
1332 lba_map_mem->lba_map_mem_alua_state = state;
1333 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1334
1335 list_add_tail(&lba_map_mem->lba_map_mem_list,
1336 &lba_map->lba_map_mem_list);
1337 return 0;
1338}
1339
1340void
1341core_alua_free_lba_map(struct list_head *lba_list)
1342{
1343 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1344 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1345
1346 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1347 lba_map_list) {
1348 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1349 &lba_map->lba_map_mem_list,
1350 lba_map_mem_list) {
1351 list_del(&lba_map_mem->lba_map_mem_list);
1352 kmem_cache_free(t10_alua_lba_map_mem_cache,
1353 lba_map_mem);
1354 }
1355 list_del(&lba_map->lba_map_list);
1356 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1357 }
1358}
1359
1360void
1361core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1362 int segment_size, int segment_mult)
1363{
1364 struct list_head old_lba_map_list;
1365 struct t10_alua_tg_pt_gp *tg_pt_gp;
1366 int activate = 0, supported;
1367
1368 INIT_LIST_HEAD(&old_lba_map_list);
1369 spin_lock(&dev->t10_alua.lba_map_lock);
1370 dev->t10_alua.lba_map_segment_size = segment_size;
1371 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1372 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1373 if (lba_map_list) {
1374 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1375 activate = 1;
1376 }
1377 spin_unlock(&dev->t10_alua.lba_map_lock);
1378 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1379 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1380 tg_pt_gp_list) {
1381
1382 if (!tg_pt_gp->tg_pt_gp_valid_id)
1383 continue;
1384 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1385 if (activate)
1386 supported |= ALUA_LBD_SUP;
1387 else
1388 supported &= ~ALUA_LBD_SUP;
1389 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1390 }
1391 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1392 core_alua_free_lba_map(&old_lba_map_list);
1393}
1394
1395struct t10_alua_lu_gp *
1396core_alua_allocate_lu_gp(const char *name, int def_group)
1397{
1398 struct t10_alua_lu_gp *lu_gp;
1399
1400 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1401 if (!lu_gp) {
1402 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1403 return ERR_PTR(-ENOMEM);
1404 }
1405 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1406 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1407 spin_lock_init(&lu_gp->lu_gp_lock);
1408 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1409
1410 if (def_group) {
1411 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1412 lu_gp->lu_gp_valid_id = 1;
1413 alua_lu_gps_count++;
1414 }
1415
1416 return lu_gp;
1417}
1418
1419int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1420{
1421 struct t10_alua_lu_gp *lu_gp_tmp;
1422 u16 lu_gp_id_tmp;
1423
1424
1425
1426 if (lu_gp->lu_gp_valid_id) {
1427 pr_warn("ALUA LU Group already has a valid ID,"
1428 " ignoring request\n");
1429 return -EINVAL;
1430 }
1431
1432 spin_lock(&lu_gps_lock);
1433 if (alua_lu_gps_count == 0x0000ffff) {
1434 pr_err("Maximum ALUA alua_lu_gps_count:"
1435 " 0x0000ffff reached\n");
1436 spin_unlock(&lu_gps_lock);
1437 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1438 return -ENOSPC;
1439 }
1440again:
1441 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1442 alua_lu_gps_counter++;
1443
1444 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1445 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1446 if (!lu_gp_id)
1447 goto again;
1448
1449 pr_warn("ALUA Logical Unit Group ID: %hu"
1450 " already exists, ignoring request\n",
1451 lu_gp_id);
1452 spin_unlock(&lu_gps_lock);
1453 return -EINVAL;
1454 }
1455 }
1456
1457 lu_gp->lu_gp_id = lu_gp_id_tmp;
1458 lu_gp->lu_gp_valid_id = 1;
1459 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1460 alua_lu_gps_count++;
1461 spin_unlock(&lu_gps_lock);
1462
1463 return 0;
1464}
1465
1466static struct t10_alua_lu_gp_member *
1467core_alua_allocate_lu_gp_mem(struct se_device *dev)
1468{
1469 struct t10_alua_lu_gp_member *lu_gp_mem;
1470
1471 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1472 if (!lu_gp_mem) {
1473 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1474 return ERR_PTR(-ENOMEM);
1475 }
1476 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1477 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1478 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1479
1480 lu_gp_mem->lu_gp_mem_dev = dev;
1481 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1482
1483 return lu_gp_mem;
1484}
1485
1486void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1487{
1488 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1489
1490
1491
1492
1493
1494
1495
1496
1497 spin_lock(&lu_gps_lock);
1498 list_del(&lu_gp->lu_gp_node);
1499 alua_lu_gps_count--;
1500 spin_unlock(&lu_gps_lock);
1501
1502
1503
1504
1505
1506 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1507 cpu_relax();
1508
1509
1510
1511
1512 spin_lock(&lu_gp->lu_gp_lock);
1513 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1514 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1515 if (lu_gp_mem->lu_gp_assoc) {
1516 list_del(&lu_gp_mem->lu_gp_mem_list);
1517 lu_gp->lu_gp_members--;
1518 lu_gp_mem->lu_gp_assoc = 0;
1519 }
1520 spin_unlock(&lu_gp->lu_gp_lock);
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1531 if (lu_gp != default_lu_gp)
1532 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1533 default_lu_gp);
1534 else
1535 lu_gp_mem->lu_gp = NULL;
1536 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1537
1538 spin_lock(&lu_gp->lu_gp_lock);
1539 }
1540 spin_unlock(&lu_gp->lu_gp_lock);
1541
1542 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1543}
1544
1545void core_alua_free_lu_gp_mem(struct se_device *dev)
1546{
1547 struct t10_alua_lu_gp *lu_gp;
1548 struct t10_alua_lu_gp_member *lu_gp_mem;
1549
1550 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1551 if (!lu_gp_mem)
1552 return;
1553
1554 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1555 cpu_relax();
1556
1557 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1558 lu_gp = lu_gp_mem->lu_gp;
1559 if (lu_gp) {
1560 spin_lock(&lu_gp->lu_gp_lock);
1561 if (lu_gp_mem->lu_gp_assoc) {
1562 list_del(&lu_gp_mem->lu_gp_mem_list);
1563 lu_gp->lu_gp_members--;
1564 lu_gp_mem->lu_gp_assoc = 0;
1565 }
1566 spin_unlock(&lu_gp->lu_gp_lock);
1567 lu_gp_mem->lu_gp = NULL;
1568 }
1569 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1570
1571 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1572}
1573
1574struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1575{
1576 struct t10_alua_lu_gp *lu_gp;
1577 struct config_item *ci;
1578
1579 spin_lock(&lu_gps_lock);
1580 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1581 if (!lu_gp->lu_gp_valid_id)
1582 continue;
1583 ci = &lu_gp->lu_gp_group.cg_item;
1584 if (!strcmp(config_item_name(ci), name)) {
1585 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1586 spin_unlock(&lu_gps_lock);
1587 return lu_gp;
1588 }
1589 }
1590 spin_unlock(&lu_gps_lock);
1591
1592 return NULL;
1593}
1594
1595void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1596{
1597 spin_lock(&lu_gps_lock);
1598 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1599 spin_unlock(&lu_gps_lock);
1600}
1601
1602
1603
1604
1605void __core_alua_attach_lu_gp_mem(
1606 struct t10_alua_lu_gp_member *lu_gp_mem,
1607 struct t10_alua_lu_gp *lu_gp)
1608{
1609 spin_lock(&lu_gp->lu_gp_lock);
1610 lu_gp_mem->lu_gp = lu_gp;
1611 lu_gp_mem->lu_gp_assoc = 1;
1612 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1613 lu_gp->lu_gp_members++;
1614 spin_unlock(&lu_gp->lu_gp_lock);
1615}
1616
1617
1618
1619
1620void __core_alua_drop_lu_gp_mem(
1621 struct t10_alua_lu_gp_member *lu_gp_mem,
1622 struct t10_alua_lu_gp *lu_gp)
1623{
1624 spin_lock(&lu_gp->lu_gp_lock);
1625 list_del(&lu_gp_mem->lu_gp_mem_list);
1626 lu_gp_mem->lu_gp = NULL;
1627 lu_gp_mem->lu_gp_assoc = 0;
1628 lu_gp->lu_gp_members--;
1629 spin_unlock(&lu_gp->lu_gp_lock);
1630}
1631
1632struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1633 const char *name, int def_group)
1634{
1635 struct t10_alua_tg_pt_gp *tg_pt_gp;
1636
1637 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1638 if (!tg_pt_gp) {
1639 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1640 return NULL;
1641 }
1642 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1643 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1644 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1645 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1646 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1647 tg_pt_gp->tg_pt_gp_dev = dev;
1648 tg_pt_gp->tg_pt_gp_alua_access_state =
1649 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1650
1651
1652
1653 tg_pt_gp->tg_pt_gp_alua_access_type =
1654 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1655
1656
1657
1658 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1659 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1660 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1661
1662
1663
1664
1665 tg_pt_gp->tg_pt_gp_alua_supported_states =
1666 ALUA_T_SUP | ALUA_O_SUP |
1667 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1668
1669 if (def_group) {
1670 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1671 tg_pt_gp->tg_pt_gp_id =
1672 dev->t10_alua.alua_tg_pt_gps_counter++;
1673 tg_pt_gp->tg_pt_gp_valid_id = 1;
1674 dev->t10_alua.alua_tg_pt_gps_count++;
1675 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1676 &dev->t10_alua.tg_pt_gps_list);
1677 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1678 }
1679
1680 return tg_pt_gp;
1681}
1682
1683int core_alua_set_tg_pt_gp_id(
1684 struct t10_alua_tg_pt_gp *tg_pt_gp,
1685 u16 tg_pt_gp_id)
1686{
1687 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1688 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1689 u16 tg_pt_gp_id_tmp;
1690
1691
1692
1693
1694 if (tg_pt_gp->tg_pt_gp_valid_id) {
1695 pr_warn("ALUA TG PT Group already has a valid ID,"
1696 " ignoring request\n");
1697 return -EINVAL;
1698 }
1699
1700 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1701 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1702 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1703 " 0x0000ffff reached\n");
1704 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1705 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1706 return -ENOSPC;
1707 }
1708again:
1709 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1710 dev->t10_alua.alua_tg_pt_gps_counter++;
1711
1712 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1713 tg_pt_gp_list) {
1714 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1715 if (!tg_pt_gp_id)
1716 goto again;
1717
1718 pr_err("ALUA Target Port Group ID: %hu already"
1719 " exists, ignoring request\n", tg_pt_gp_id);
1720 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1721 return -EINVAL;
1722 }
1723 }
1724
1725 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1726 tg_pt_gp->tg_pt_gp_valid_id = 1;
1727 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1728 &dev->t10_alua.tg_pt_gps_list);
1729 dev->t10_alua.alua_tg_pt_gps_count++;
1730 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1731
1732 return 0;
1733}
1734
1735void core_alua_free_tg_pt_gp(
1736 struct t10_alua_tg_pt_gp *tg_pt_gp)
1737{
1738 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1739 struct se_lun *lun, *next;
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1750 if (tg_pt_gp->tg_pt_gp_valid_id) {
1751 list_del(&tg_pt_gp->tg_pt_gp_list);
1752 dev->t10_alua.alua_tg_pt_gps_count--;
1753 }
1754 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1755
1756
1757
1758
1759
1760
1761
1762 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1763 cpu_relax();
1764
1765
1766
1767
1768
1769 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1770 list_for_each_entry_safe(lun, next,
1771 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1772 list_del_init(&lun->lun_tg_pt_gp_link);
1773 tg_pt_gp->tg_pt_gp_members--;
1774
1775 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1776
1777
1778
1779
1780
1781 spin_lock(&lun->lun_tg_pt_gp_lock);
1782 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1783 __target_attach_tg_pt_gp(lun,
1784 dev->t10_alua.default_tg_pt_gp);
1785 } else
1786 lun->lun_tg_pt_gp = NULL;
1787 spin_unlock(&lun->lun_tg_pt_gp_lock);
1788
1789 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1790 }
1791 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1792
1793 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1794}
1795
1796static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1797 struct se_device *dev, const char *name)
1798{
1799 struct t10_alua_tg_pt_gp *tg_pt_gp;
1800 struct config_item *ci;
1801
1802 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1803 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1804 tg_pt_gp_list) {
1805 if (!tg_pt_gp->tg_pt_gp_valid_id)
1806 continue;
1807 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1808 if (!strcmp(config_item_name(ci), name)) {
1809 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1810 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1811 return tg_pt_gp;
1812 }
1813 }
1814 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1815
1816 return NULL;
1817}
1818
1819static void core_alua_put_tg_pt_gp_from_name(
1820 struct t10_alua_tg_pt_gp *tg_pt_gp)
1821{
1822 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1823
1824 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1825 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1826 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1827}
1828
1829static void __target_attach_tg_pt_gp(struct se_lun *lun,
1830 struct t10_alua_tg_pt_gp *tg_pt_gp)
1831{
1832 struct se_dev_entry *se_deve;
1833
1834 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1835
1836 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1837 lun->lun_tg_pt_gp = tg_pt_gp;
1838 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1839 tg_pt_gp->tg_pt_gp_members++;
1840 spin_lock(&lun->lun_deve_lock);
1841 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1842 core_scsi3_ua_allocate(se_deve, 0x3f,
1843 ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1844 spin_unlock(&lun->lun_deve_lock);
1845 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1846}
1847
1848void target_attach_tg_pt_gp(struct se_lun *lun,
1849 struct t10_alua_tg_pt_gp *tg_pt_gp)
1850{
1851 spin_lock(&lun->lun_tg_pt_gp_lock);
1852 __target_attach_tg_pt_gp(lun, tg_pt_gp);
1853 spin_unlock(&lun->lun_tg_pt_gp_lock);
1854}
1855
1856static void __target_detach_tg_pt_gp(struct se_lun *lun,
1857 struct t10_alua_tg_pt_gp *tg_pt_gp)
1858{
1859 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1860
1861 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1862 list_del_init(&lun->lun_tg_pt_gp_link);
1863 tg_pt_gp->tg_pt_gp_members--;
1864 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1865
1866 lun->lun_tg_pt_gp = NULL;
1867}
1868
1869void target_detach_tg_pt_gp(struct se_lun *lun)
1870{
1871 struct t10_alua_tg_pt_gp *tg_pt_gp;
1872
1873 spin_lock(&lun->lun_tg_pt_gp_lock);
1874 tg_pt_gp = lun->lun_tg_pt_gp;
1875 if (tg_pt_gp)
1876 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1877 spin_unlock(&lun->lun_tg_pt_gp_lock);
1878}
1879
1880ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1881{
1882 struct config_item *tg_pt_ci;
1883 struct t10_alua_tg_pt_gp *tg_pt_gp;
1884 ssize_t len = 0;
1885
1886 spin_lock(&lun->lun_tg_pt_gp_lock);
1887 tg_pt_gp = lun->lun_tg_pt_gp;
1888 if (tg_pt_gp) {
1889 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1890 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1891 " %hu\nTG Port Primary Access State: %s\nTG Port "
1892 "Primary Access Status: %s\nTG Port Secondary Access"
1893 " State: %s\nTG Port Secondary Access Status: %s\n",
1894 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1895 core_alua_dump_state(
1896 tg_pt_gp->tg_pt_gp_alua_access_state),
1897 core_alua_dump_status(
1898 tg_pt_gp->tg_pt_gp_alua_access_status),
1899 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1900 "Offline" : "None",
1901 core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1902 }
1903 spin_unlock(&lun->lun_tg_pt_gp_lock);
1904
1905 return len;
1906}
1907
1908ssize_t core_alua_store_tg_pt_gp_info(
1909 struct se_lun *lun,
1910 const char *page,
1911 size_t count)
1912{
1913 struct se_portal_group *tpg = lun->lun_tpg;
1914
1915
1916
1917
1918 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1919 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1920 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1921 int move = 0;
1922
1923 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1924 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1925 return -ENODEV;
1926
1927 if (count > TG_PT_GROUP_NAME_BUF) {
1928 pr_err("ALUA Target Port Group alias too large!\n");
1929 return -EINVAL;
1930 }
1931 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1932 memcpy(buf, page, count);
1933
1934
1935
1936
1937 if (strcmp(strstrip(buf), "NULL")) {
1938
1939
1940
1941
1942
1943 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1944 strstrip(buf));
1945 if (!tg_pt_gp_new)
1946 return -ENODEV;
1947 }
1948
1949 spin_lock(&lun->lun_tg_pt_gp_lock);
1950 tg_pt_gp = lun->lun_tg_pt_gp;
1951 if (tg_pt_gp) {
1952
1953
1954
1955
1956 if (!tg_pt_gp_new) {
1957 pr_debug("Target_Core_ConfigFS: Moving"
1958 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1959 " alua/%s, ID: %hu back to"
1960 " default_tg_pt_gp\n",
1961 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1962 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1963 config_item_name(&lun->lun_group.cg_item),
1964 config_item_name(
1965 &tg_pt_gp->tg_pt_gp_group.cg_item),
1966 tg_pt_gp->tg_pt_gp_id);
1967
1968 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1969 __target_attach_tg_pt_gp(lun,
1970 dev->t10_alua.default_tg_pt_gp);
1971 spin_unlock(&lun->lun_tg_pt_gp_lock);
1972
1973 return count;
1974 }
1975 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1976 move = 1;
1977 }
1978
1979 __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1980 spin_unlock(&lun->lun_tg_pt_gp_lock);
1981 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1982 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1983 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1984 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1985 config_item_name(&lun->lun_group.cg_item),
1986 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1987 tg_pt_gp_new->tg_pt_gp_id);
1988
1989 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1990 return count;
1991}
1992
1993ssize_t core_alua_show_access_type(
1994 struct t10_alua_tg_pt_gp *tg_pt_gp,
1995 char *page)
1996{
1997 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1998 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1999 return sprintf(page, "Implicit and Explicit\n");
2000 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2001 return sprintf(page, "Implicit\n");
2002 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2003 return sprintf(page, "Explicit\n");
2004 else
2005 return sprintf(page, "None\n");
2006}
2007
2008ssize_t core_alua_store_access_type(
2009 struct t10_alua_tg_pt_gp *tg_pt_gp,
2010 const char *page,
2011 size_t count)
2012{
2013 unsigned long tmp;
2014 int ret;
2015
2016 ret = kstrtoul(page, 0, &tmp);
2017 if (ret < 0) {
2018 pr_err("Unable to extract alua_access_type\n");
2019 return ret;
2020 }
2021 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2022 pr_err("Illegal value for alua_access_type:"
2023 " %lu\n", tmp);
2024 return -EINVAL;
2025 }
2026 if (tmp == 3)
2027 tg_pt_gp->tg_pt_gp_alua_access_type =
2028 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2029 else if (tmp == 2)
2030 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2031 else if (tmp == 1)
2032 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2033 else
2034 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2035
2036 return count;
2037}
2038
2039ssize_t core_alua_show_nonop_delay_msecs(
2040 struct t10_alua_tg_pt_gp *tg_pt_gp,
2041 char *page)
2042{
2043 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2044}
2045
2046ssize_t core_alua_store_nonop_delay_msecs(
2047 struct t10_alua_tg_pt_gp *tg_pt_gp,
2048 const char *page,
2049 size_t count)
2050{
2051 unsigned long tmp;
2052 int ret;
2053
2054 ret = kstrtoul(page, 0, &tmp);
2055 if (ret < 0) {
2056 pr_err("Unable to extract nonop_delay_msecs\n");
2057 return ret;
2058 }
2059 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2060 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2061 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2062 ALUA_MAX_NONOP_DELAY_MSECS);
2063 return -EINVAL;
2064 }
2065 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2066
2067 return count;
2068}
2069
2070ssize_t core_alua_show_trans_delay_msecs(
2071 struct t10_alua_tg_pt_gp *tg_pt_gp,
2072 char *page)
2073{
2074 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2075}
2076
2077ssize_t core_alua_store_trans_delay_msecs(
2078 struct t10_alua_tg_pt_gp *tg_pt_gp,
2079 const char *page,
2080 size_t count)
2081{
2082 unsigned long tmp;
2083 int ret;
2084
2085 ret = kstrtoul(page, 0, &tmp);
2086 if (ret < 0) {
2087 pr_err("Unable to extract trans_delay_msecs\n");
2088 return ret;
2089 }
2090 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2091 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2092 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2093 ALUA_MAX_TRANS_DELAY_MSECS);
2094 return -EINVAL;
2095 }
2096 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2097
2098 return count;
2099}
2100
2101ssize_t core_alua_show_implicit_trans_secs(
2102 struct t10_alua_tg_pt_gp *tg_pt_gp,
2103 char *page)
2104{
2105 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2106}
2107
2108ssize_t core_alua_store_implicit_trans_secs(
2109 struct t10_alua_tg_pt_gp *tg_pt_gp,
2110 const char *page,
2111 size_t count)
2112{
2113 unsigned long tmp;
2114 int ret;
2115
2116 ret = kstrtoul(page, 0, &tmp);
2117 if (ret < 0) {
2118 pr_err("Unable to extract implicit_trans_secs\n");
2119 return ret;
2120 }
2121 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2122 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2123 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2124 ALUA_MAX_IMPLICIT_TRANS_SECS);
2125 return -EINVAL;
2126 }
2127 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2128
2129 return count;
2130}
2131
2132ssize_t core_alua_show_preferred_bit(
2133 struct t10_alua_tg_pt_gp *tg_pt_gp,
2134 char *page)
2135{
2136 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2137}
2138
2139ssize_t core_alua_store_preferred_bit(
2140 struct t10_alua_tg_pt_gp *tg_pt_gp,
2141 const char *page,
2142 size_t count)
2143{
2144 unsigned long tmp;
2145 int ret;
2146
2147 ret = kstrtoul(page, 0, &tmp);
2148 if (ret < 0) {
2149 pr_err("Unable to extract preferred ALUA value\n");
2150 return ret;
2151 }
2152 if ((tmp != 0) && (tmp != 1)) {
2153 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2154 return -EINVAL;
2155 }
2156 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2157
2158 return count;
2159}
2160
2161ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2162{
2163 return sprintf(page, "%d\n",
2164 atomic_read(&lun->lun_tg_pt_secondary_offline));
2165}
2166
2167ssize_t core_alua_store_offline_bit(
2168 struct se_lun *lun,
2169 const char *page,
2170 size_t count)
2171{
2172
2173
2174
2175
2176 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2177 unsigned long tmp;
2178 int ret;
2179
2180 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2181 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2182 return -ENODEV;
2183
2184 ret = kstrtoul(page, 0, &tmp);
2185 if (ret < 0) {
2186 pr_err("Unable to extract alua_tg_pt_offline value\n");
2187 return ret;
2188 }
2189 if ((tmp != 0) && (tmp != 1)) {
2190 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2191 tmp);
2192 return -EINVAL;
2193 }
2194
2195 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2196 if (ret < 0)
2197 return -EINVAL;
2198
2199 return count;
2200}
2201
2202ssize_t core_alua_show_secondary_status(
2203 struct se_lun *lun,
2204 char *page)
2205{
2206 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2207}
2208
2209ssize_t core_alua_store_secondary_status(
2210 struct se_lun *lun,
2211 const char *page,
2212 size_t count)
2213{
2214 unsigned long tmp;
2215 int ret;
2216
2217 ret = kstrtoul(page, 0, &tmp);
2218 if (ret < 0) {
2219 pr_err("Unable to extract alua_tg_pt_status\n");
2220 return ret;
2221 }
2222 if ((tmp != ALUA_STATUS_NONE) &&
2223 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2224 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2225 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2226 tmp);
2227 return -EINVAL;
2228 }
2229 lun->lun_tg_pt_secondary_stat = (int)tmp;
2230
2231 return count;
2232}
2233
2234ssize_t core_alua_show_secondary_write_metadata(
2235 struct se_lun *lun,
2236 char *page)
2237{
2238 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2239}
2240
2241ssize_t core_alua_store_secondary_write_metadata(
2242 struct se_lun *lun,
2243 const char *page,
2244 size_t count)
2245{
2246 unsigned long tmp;
2247 int ret;
2248
2249 ret = kstrtoul(page, 0, &tmp);
2250 if (ret < 0) {
2251 pr_err("Unable to extract alua_tg_pt_write_md\n");
2252 return ret;
2253 }
2254 if ((tmp != 0) && (tmp != 1)) {
2255 pr_err("Illegal value for alua_tg_pt_write_md:"
2256 " %lu\n", tmp);
2257 return -EINVAL;
2258 }
2259 lun->lun_tg_pt_secondary_write_md = (int)tmp;
2260
2261 return count;
2262}
2263
2264int core_setup_alua(struct se_device *dev)
2265{
2266 if (!(dev->transport_flags &
2267 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2268 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2269 struct t10_alua_lu_gp_member *lu_gp_mem;
2270
2271
2272
2273
2274
2275 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2276 if (IS_ERR(lu_gp_mem))
2277 return PTR_ERR(lu_gp_mem);
2278
2279 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2280 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2281 default_lu_gp);
2282 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2283
2284 pr_debug("%s: Adding to default ALUA LU Group:"
2285 " core/alua/lu_gps/default_lu_gp\n",
2286 dev->transport->name);
2287 }
2288
2289 return 0;
2290}
2291