1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <scsi/scsi_proto.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_fabric.h>
32
33#include "target_core_internal.h"
34#include "target_core_alua.h"
35#include "target_core_pr.h"
36#include "target_core_ua.h"
37
38sense_reason_t
39target_scsi3_ua_check(struct se_cmd *cmd)
40{
41 struct se_dev_entry *deve;
42 struct se_session *sess = cmd->se_sess;
43 struct se_node_acl *nacl;
44
45 if (!sess)
46 return 0;
47
48 nacl = sess->se_node_acl;
49 if (!nacl)
50 return 0;
51
52 rcu_read_lock();
53 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
54 if (!deve) {
55 rcu_read_unlock();
56 return 0;
57 }
58 if (list_empty_careful(&deve->ua_list)) {
59 rcu_read_unlock();
60 return 0;
61 }
62 rcu_read_unlock();
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 switch (cmd->t_task_cdb[0]) {
79 case INQUIRY:
80 case REPORT_LUNS:
81 case REQUEST_SENSE:
82 return 0;
83 default:
84 return TCM_CHECK_CONDITION_UNIT_ATTENTION;
85 }
86}
87
88int core_scsi3_ua_allocate(
89 struct se_dev_entry *deve,
90 u8 asc,
91 u8 ascq)
92{
93 struct se_ua *ua, *ua_p, *ua_tmp;
94
95 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
96 if (!ua) {
97 pr_err("Unable to allocate struct se_ua\n");
98 return -ENOMEM;
99 }
100 INIT_LIST_HEAD(&ua->ua_nacl_list);
101
102 ua->ua_asc = asc;
103 ua->ua_ascq = ascq;
104
105 spin_lock(&deve->ua_lock);
106 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
107
108
109
110 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
111 spin_unlock(&deve->ua_lock);
112 kmem_cache_free(se_ua_cache, ua);
113 return 0;
114 }
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134 if (ua_p->ua_asc == 0x29) {
135 if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
136 list_add(&ua->ua_nacl_list,
137 &deve->ua_list);
138 else
139 list_add_tail(&ua->ua_nacl_list,
140 &deve->ua_list);
141 } else if (ua_p->ua_asc == 0x2a) {
142
143
144
145
146 if ((asc == 0x29) || (ascq > ua_p->ua_asc))
147 list_add(&ua->ua_nacl_list,
148 &deve->ua_list);
149 else
150 list_add_tail(&ua->ua_nacl_list,
151 &deve->ua_list);
152 } else
153 list_add_tail(&ua->ua_nacl_list,
154 &deve->ua_list);
155 spin_unlock(&deve->ua_lock);
156
157 return 0;
158 }
159 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
160 spin_unlock(&deve->ua_lock);
161
162 pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
163 " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
164 asc, ascq);
165
166 return 0;
167}
168
169void target_ua_allocate_lun(struct se_node_acl *nacl,
170 u32 unpacked_lun, u8 asc, u8 ascq)
171{
172 struct se_dev_entry *deve;
173
174 if (!nacl)
175 return;
176
177 rcu_read_lock();
178 deve = target_nacl_find_deve(nacl, unpacked_lun);
179 if (!deve) {
180 rcu_read_unlock();
181 return;
182 }
183
184 core_scsi3_ua_allocate(deve, asc, ascq);
185 rcu_read_unlock();
186}
187
188void core_scsi3_ua_release_all(
189 struct se_dev_entry *deve)
190{
191 struct se_ua *ua, *ua_p;
192
193 spin_lock(&deve->ua_lock);
194 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
195 list_del(&ua->ua_nacl_list);
196 kmem_cache_free(se_ua_cache, ua);
197 }
198 spin_unlock(&deve->ua_lock);
199}
200
201
202
203
204
205
206bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
207 u8 *ascq)
208{
209 struct se_device *dev = cmd->se_dev;
210 struct se_dev_entry *deve;
211 struct se_session *sess = cmd->se_sess;
212 struct se_node_acl *nacl;
213 struct se_ua *ua = NULL, *ua_p;
214 int head = 1;
215
216 if (WARN_ON_ONCE(!sess))
217 return false;
218
219 nacl = sess->se_node_acl;
220 if (WARN_ON_ONCE(!nacl))
221 return false;
222
223 rcu_read_lock();
224 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
225 if (!deve) {
226 rcu_read_unlock();
227 *key = ILLEGAL_REQUEST;
228 *asc = 0x25;
229 *ascq = 0;
230 return true;
231 }
232 *key = UNIT_ATTENTION;
233
234
235
236
237
238 spin_lock(&deve->ua_lock);
239 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
240
241
242
243
244
245 if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
246 *asc = ua->ua_asc;
247 *ascq = ua->ua_ascq;
248 break;
249 }
250
251
252
253
254
255 if (head) {
256 *asc = ua->ua_asc;
257 *ascq = ua->ua_ascq;
258 head = 0;
259 }
260 list_del(&ua->ua_nacl_list);
261 kmem_cache_free(se_ua_cache, ua);
262 }
263 spin_unlock(&deve->ua_lock);
264 rcu_read_unlock();
265
266 pr_debug("[%s]: %s UNIT ATTENTION condition with"
267 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
269 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
270 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
271 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
273
274 return head == 0;
275}
276
277int core_scsi3_ua_clear_for_request_sense(
278 struct se_cmd *cmd,
279 u8 *asc,
280 u8 *ascq)
281{
282 struct se_dev_entry *deve;
283 struct se_session *sess = cmd->se_sess;
284 struct se_node_acl *nacl;
285 struct se_ua *ua = NULL, *ua_p;
286 int head = 1;
287
288 if (!sess)
289 return -EINVAL;
290
291 nacl = sess->se_node_acl;
292 if (!nacl)
293 return -EINVAL;
294
295 rcu_read_lock();
296 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
297 if (!deve) {
298 rcu_read_unlock();
299 return -EINVAL;
300 }
301 if (list_empty_careful(&deve->ua_list)) {
302 rcu_read_unlock();
303 return -EPERM;
304 }
305
306
307
308
309
310
311
312
313
314
315 spin_lock(&deve->ua_lock);
316 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
317 if (head) {
318 *asc = ua->ua_asc;
319 *ascq = ua->ua_ascq;
320 head = 0;
321 }
322 list_del(&ua->ua_nacl_list);
323 kmem_cache_free(se_ua_cache, ua);
324 }
325 spin_unlock(&deve->ua_lock);
326 rcu_read_unlock();
327
328 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
329 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
330 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
331 cmd->orig_fe_lun, *asc, *ascq);
332
333 return (head) ? -EPERM : 0;
334}
335