1
2
3
4
5
6
7
8
9#include <linux/slab.h>
10#include <linux/blk-mq.h>
11#include <linux/delay.h>
12#include <linux/scatterlist.h>
13#include <linux/bsg-lib.h>
14#include <linux/export.h>
15#include <scsi/scsi_cmnd.h>
16#include <scsi/sg.h>
17
18#define uptr64(val) ((void __user *)(uintptr_t)(val))
19
20struct bsg_set {
21 struct blk_mq_tag_set tag_set;
22 bsg_job_fn *job_fn;
23 bsg_timeout_fn *timeout_fn;
24};
25
26static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
27{
28 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
29 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
30 return -EINVAL;
31 if (!capable(CAP_SYS_RAWIO))
32 return -EPERM;
33 return 0;
34}
35
36static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
37 fmode_t mode)
38{
39 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
40 int ret;
41
42 job->request_len = hdr->request_len;
43 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
44 if (IS_ERR(job->request))
45 return PTR_ERR(job->request);
46
47 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
49 if (IS_ERR(job->bidi_rq)) {
50 ret = PTR_ERR(job->bidi_rq);
51 goto out;
52 }
53
54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
55 uptr64(hdr->din_xferp), hdr->din_xfer_len,
56 GFP_KERNEL);
57 if (ret)
58 goto out_free_bidi_rq;
59
60 job->bidi_bio = job->bidi_rq->bio;
61 } else {
62 job->bidi_rq = NULL;
63 job->bidi_bio = NULL;
64 }
65
66 return 0;
67
68out_free_bidi_rq:
69 if (job->bidi_rq)
70 blk_put_request(job->bidi_rq);
71out:
72 kfree(job->request);
73 return ret;
74}
75
76static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
77{
78 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
79 int ret = 0;
80
81
82
83
84
85 hdr->device_status = job->result & 0xff;
86 hdr->transport_status = host_byte(job->result);
87 hdr->driver_status = driver_byte(job->result);
88 hdr->info = 0;
89 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
90 hdr->info |= SG_INFO_CHECK;
91 hdr->response_len = 0;
92
93 if (job->result < 0) {
94
95 job->reply_len = sizeof(u32);
96 ret = job->result;
97 }
98
99 if (job->reply_len && hdr->response) {
100 int len = min(hdr->max_response_len, job->reply_len);
101
102 if (copy_to_user(uptr64(hdr->response), job->reply, len))
103 ret = -EFAULT;
104 else
105 hdr->response_len = len;
106 }
107
108
109 hdr->dout_resid = 0;
110
111 if (job->bidi_rq) {
112 unsigned int rsp_len = job->reply_payload.payload_len;
113
114 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
115 hdr->din_resid = 0;
116 else
117 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
118 } else {
119 hdr->din_resid = 0;
120 }
121
122 return ret;
123}
124
125static void bsg_transport_free_rq(struct request *rq)
126{
127 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
128
129 if (job->bidi_rq) {
130 blk_rq_unmap_user(job->bidi_bio);
131 blk_put_request(job->bidi_rq);
132 }
133
134 kfree(job->request);
135}
136
137static const struct bsg_ops bsg_transport_ops = {
138 .check_proto = bsg_transport_check_proto,
139 .fill_hdr = bsg_transport_fill_hdr,
140 .complete_rq = bsg_transport_complete_rq,
141 .free_rq = bsg_transport_free_rq,
142};
143
144
145
146
147
148static void bsg_teardown_job(struct kref *kref)
149{
150 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
151 struct request *rq = blk_mq_rq_from_pdu(job);
152
153 put_device(job->dev);
154
155 kfree(job->request_payload.sg_list);
156 kfree(job->reply_payload.sg_list);
157
158 blk_mq_end_request(rq, BLK_STS_OK);
159}
160
161void bsg_job_put(struct bsg_job *job)
162{
163 kref_put(&job->kref, bsg_teardown_job);
164}
165EXPORT_SYMBOL_GPL(bsg_job_put);
166
167int bsg_job_get(struct bsg_job *job)
168{
169 return kref_get_unless_zero(&job->kref);
170}
171EXPORT_SYMBOL_GPL(bsg_job_get);
172
173
174
175
176
177
178
179
180
181void bsg_job_done(struct bsg_job *job, int result,
182 unsigned int reply_payload_rcv_len)
183{
184 job->result = result;
185 job->reply_payload_rcv_len = reply_payload_rcv_len;
186 blk_mq_complete_request(blk_mq_rq_from_pdu(job));
187}
188EXPORT_SYMBOL_GPL(bsg_job_done);
189
190
191
192
193
194static void bsg_complete(struct request *rq)
195{
196 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
197
198 bsg_job_put(job);
199}
200
201static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
202{
203 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
204
205 BUG_ON(!req->nr_phys_segments);
206
207 buf->sg_list = kzalloc(sz, GFP_KERNEL);
208 if (!buf->sg_list)
209 return -ENOMEM;
210 sg_init_table(buf->sg_list, req->nr_phys_segments);
211 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
212 buf->payload_len = blk_rq_bytes(req);
213 return 0;
214}
215
216
217
218
219
220
221static bool bsg_prepare_job(struct device *dev, struct request *req)
222{
223 struct bsg_job *job = blk_mq_rq_to_pdu(req);
224 int ret;
225
226 job->timeout = req->timeout;
227
228 if (req->bio) {
229 ret = bsg_map_buffer(&job->request_payload, req);
230 if (ret)
231 goto failjob_rls_job;
232 }
233 if (job->bidi_rq) {
234 ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
235 if (ret)
236 goto failjob_rls_rqst_payload;
237 }
238 job->dev = dev;
239
240 get_device(job->dev);
241 kref_init(&job->kref);
242 return true;
243
244failjob_rls_rqst_payload:
245 kfree(job->request_payload.sg_list);
246failjob_rls_job:
247 job->result = -ENOMEM;
248 return false;
249}
250
251
252
253
254
255
256
257
258
259
260
261static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
262 const struct blk_mq_queue_data *bd)
263{
264 struct request_queue *q = hctx->queue;
265 struct device *dev = q->queuedata;
266 struct request *req = bd->rq;
267 struct bsg_set *bset =
268 container_of(q->tag_set, struct bsg_set, tag_set);
269 int ret;
270
271 blk_mq_start_request(req);
272
273 if (!get_device(dev))
274 return BLK_STS_IOERR;
275
276 if (!bsg_prepare_job(dev, req))
277 return BLK_STS_IOERR;
278
279 ret = bset->job_fn(blk_mq_rq_to_pdu(req));
280 if (ret)
281 return BLK_STS_IOERR;
282
283 put_device(dev);
284 return BLK_STS_OK;
285}
286
287
288static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
289 unsigned int hctx_idx, unsigned int numa_node)
290{
291 struct bsg_job *job = blk_mq_rq_to_pdu(req);
292
293 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
294 if (!job->reply)
295 return -ENOMEM;
296 return 0;
297}
298
299
300static void bsg_initialize_rq(struct request *req)
301{
302 struct bsg_job *job = blk_mq_rq_to_pdu(req);
303 void *reply = job->reply;
304
305 memset(job, 0, sizeof(*job));
306 job->reply = reply;
307 job->reply_len = SCSI_SENSE_BUFFERSIZE;
308 job->dd_data = job + 1;
309}
310
311static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
312 unsigned int hctx_idx)
313{
314 struct bsg_job *job = blk_mq_rq_to_pdu(req);
315
316 kfree(job->reply);
317}
318
319void bsg_remove_queue(struct request_queue *q)
320{
321 if (q) {
322 struct bsg_set *bset =
323 container_of(q->tag_set, struct bsg_set, tag_set);
324
325 bsg_unregister_queue(q);
326 blk_cleanup_queue(q);
327 blk_mq_free_tag_set(&bset->tag_set);
328 kfree(bset);
329 }
330}
331EXPORT_SYMBOL_GPL(bsg_remove_queue);
332
333static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
334{
335 struct bsg_set *bset =
336 container_of(rq->q->tag_set, struct bsg_set, tag_set);
337
338 if (!bset->timeout_fn)
339 return BLK_EH_DONE;
340 return bset->timeout_fn(rq);
341}
342
343static const struct blk_mq_ops bsg_mq_ops = {
344 .queue_rq = bsg_queue_rq,
345 .init_request = bsg_init_rq,
346 .exit_request = bsg_exit_rq,
347 .initialize_rq_fn = bsg_initialize_rq,
348 .complete = bsg_complete,
349 .timeout = bsg_timeout,
350};
351
352
353
354
355
356
357
358
359
360struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
361 bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
362{
363 struct bsg_set *bset;
364 struct blk_mq_tag_set *set;
365 struct request_queue *q;
366 int ret = -ENOMEM;
367
368 bset = kzalloc(sizeof(*bset), GFP_KERNEL);
369 if (!bset)
370 return ERR_PTR(-ENOMEM);
371
372 bset->job_fn = job_fn;
373 bset->timeout_fn = timeout;
374
375 set = &bset->tag_set;
376 set->ops = &bsg_mq_ops,
377 set->nr_hw_queues = 1;
378 set->queue_depth = 128;
379 set->numa_node = NUMA_NO_NODE;
380 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
381 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
382 if (blk_mq_alloc_tag_set(set))
383 goto out_tag_set;
384
385 q = blk_mq_init_queue(set);
386 if (IS_ERR(q)) {
387 ret = PTR_ERR(q);
388 goto out_queue;
389 }
390
391 q->queuedata = dev;
392 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
393
394 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
395 if (ret) {
396 printk(KERN_ERR "%s: bsg interface failed to "
397 "initialize - register queue\n", dev->kobj.name);
398 goto out_cleanup_queue;
399 }
400
401 return q;
402out_cleanup_queue:
403 blk_cleanup_queue(q);
404out_queue:
405 blk_mq_free_tag_set(set);
406out_tag_set:
407 kfree(bset);
408 return ERR_PTR(ret);
409}
410EXPORT_SYMBOL_GPL(bsg_setup_queue);
411