1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <scsi/scsi_proto.h>
16
17#include <target/target_core_base.h>
18#include <target/target_core_fabric.h>
19
20#include "target_core_internal.h"
21#include "target_core_alua.h"
22#include "target_core_pr.h"
23#include "target_core_ua.h"
24
25sense_reason_t
26target_scsi3_ua_check(struct se_cmd *cmd)
27{
28 struct se_dev_entry *deve;
29 struct se_session *sess = cmd->se_sess;
30 struct se_node_acl *nacl;
31
32 if (!sess)
33 return 0;
34
35 nacl = sess->se_node_acl;
36 if (!nacl)
37 return 0;
38
39 rcu_read_lock();
40 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
41 if (!deve) {
42 rcu_read_unlock();
43 return 0;
44 }
45 if (list_empty_careful(&deve->ua_list)) {
46 rcu_read_unlock();
47 return 0;
48 }
49 rcu_read_unlock();
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65 switch (cmd->t_task_cdb[0]) {
66 case INQUIRY:
67 case REPORT_LUNS:
68 case REQUEST_SENSE:
69 return 0;
70 default:
71 return TCM_CHECK_CONDITION_UNIT_ATTENTION;
72 }
73}
74
75int core_scsi3_ua_allocate(
76 struct se_dev_entry *deve,
77 u8 asc,
78 u8 ascq)
79{
80 struct se_ua *ua, *ua_p, *ua_tmp;
81
82 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
83 if (!ua) {
84 pr_err("Unable to allocate struct se_ua\n");
85 return -ENOMEM;
86 }
87 INIT_LIST_HEAD(&ua->ua_nacl_list);
88
89 ua->ua_asc = asc;
90 ua->ua_ascq = ascq;
91
92 spin_lock(&deve->ua_lock);
93 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
94
95
96
97 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
98 spin_unlock(&deve->ua_lock);
99 kmem_cache_free(se_ua_cache, ua);
100 return 0;
101 }
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121 if (ua_p->ua_asc == 0x29) {
122 if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
123 list_add(&ua->ua_nacl_list,
124 &deve->ua_list);
125 else
126 list_add_tail(&ua->ua_nacl_list,
127 &deve->ua_list);
128 } else if (ua_p->ua_asc == 0x2a) {
129
130
131
132
133 if ((asc == 0x29) || (ascq > ua_p->ua_asc))
134 list_add(&ua->ua_nacl_list,
135 &deve->ua_list);
136 else
137 list_add_tail(&ua->ua_nacl_list,
138 &deve->ua_list);
139 } else
140 list_add_tail(&ua->ua_nacl_list,
141 &deve->ua_list);
142 spin_unlock(&deve->ua_lock);
143
144 return 0;
145 }
146 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
147 spin_unlock(&deve->ua_lock);
148
149 pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
150 " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
151 asc, ascq);
152
153 return 0;
154}
155
156void target_ua_allocate_lun(struct se_node_acl *nacl,
157 u32 unpacked_lun, u8 asc, u8 ascq)
158{
159 struct se_dev_entry *deve;
160
161 if (!nacl)
162 return;
163
164 rcu_read_lock();
165 deve = target_nacl_find_deve(nacl, unpacked_lun);
166 if (!deve) {
167 rcu_read_unlock();
168 return;
169 }
170
171 core_scsi3_ua_allocate(deve, asc, ascq);
172 rcu_read_unlock();
173}
174
175void core_scsi3_ua_release_all(
176 struct se_dev_entry *deve)
177{
178 struct se_ua *ua, *ua_p;
179
180 spin_lock(&deve->ua_lock);
181 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
182 list_del(&ua->ua_nacl_list);
183 kmem_cache_free(se_ua_cache, ua);
184 }
185 spin_unlock(&deve->ua_lock);
186}
187
188
189
190
191
192
193bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
194 u8 *ascq)
195{
196 struct se_device *dev = cmd->se_dev;
197 struct se_dev_entry *deve;
198 struct se_session *sess = cmd->se_sess;
199 struct se_node_acl *nacl;
200 struct se_ua *ua = NULL, *ua_p;
201 int head = 1;
202 bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl
203 == TARGET_UA_INTLCK_CTRL_CLEAR);
204
205 if (WARN_ON_ONCE(!sess))
206 return false;
207
208 nacl = sess->se_node_acl;
209 if (WARN_ON_ONCE(!nacl))
210 return false;
211
212 rcu_read_lock();
213 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
214 if (!deve) {
215 rcu_read_unlock();
216 *key = ILLEGAL_REQUEST;
217 *asc = 0x25;
218 *ascq = 0;
219 return true;
220 }
221 *key = UNIT_ATTENTION;
222
223
224
225
226
227 spin_lock(&deve->ua_lock);
228 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
229
230
231
232
233
234 if (!dev_ua_intlck_clear) {
235 *asc = ua->ua_asc;
236 *ascq = ua->ua_ascq;
237 break;
238 }
239
240
241
242
243
244 if (head) {
245 *asc = ua->ua_asc;
246 *ascq = ua->ua_ascq;
247 head = 0;
248 }
249 list_del(&ua->ua_nacl_list);
250 kmem_cache_free(se_ua_cache, ua);
251 }
252 spin_unlock(&deve->ua_lock);
253 rcu_read_unlock();
254
255 pr_debug("[%s]: %s UNIT ATTENTION condition with"
256 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
257 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
258 nacl->se_tpg->se_tpg_tfo->fabric_name,
259 dev_ua_intlck_clear ? "Releasing" : "Reporting",
260 dev->dev_attrib.emulate_ua_intlck_ctrl,
261 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
262
263 return head == 0;
264}
265
266int core_scsi3_ua_clear_for_request_sense(
267 struct se_cmd *cmd,
268 u8 *asc,
269 u8 *ascq)
270{
271 struct se_dev_entry *deve;
272 struct se_session *sess = cmd->se_sess;
273 struct se_node_acl *nacl;
274 struct se_ua *ua = NULL, *ua_p;
275 int head = 1;
276
277 if (!sess)
278 return -EINVAL;
279
280 nacl = sess->se_node_acl;
281 if (!nacl)
282 return -EINVAL;
283
284 rcu_read_lock();
285 deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
286 if (!deve) {
287 rcu_read_unlock();
288 return -EINVAL;
289 }
290 if (list_empty_careful(&deve->ua_list)) {
291 rcu_read_unlock();
292 return -EPERM;
293 }
294
295
296
297
298
299
300
301
302
303
304 spin_lock(&deve->ua_lock);
305 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
306 if (head) {
307 *asc = ua->ua_asc;
308 *ascq = ua->ua_ascq;
309 head = 0;
310 }
311 list_del(&ua->ua_nacl_list);
312 kmem_cache_free(se_ua_cache, ua);
313 }
314 spin_unlock(&deve->ua_lock);
315 rcu_read_unlock();
316
317 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
318 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
319 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
320 cmd->orig_fe_lun, *asc, *ascq);
321
322 return (head) ? -EPERM : 0;
323}
324