1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/module.h>
23#include <linux/blkdev.h>
24#include <linux/capability.h>
25#include <linux/completion.h>
26#include <linux/cdrom.h>
27#include <linux/ratelimit.h>
28#include <linux/slab.h>
29#include <linux/times.h>
30#include <linux/uio.h>
31#include <linux/uaccess.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_ioctl.h>
35#include <scsi/scsi_cmnd.h>
36
37struct blk_cmd_filter {
38 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
39 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
40};
41
42static struct blk_cmd_filter blk_default_cmd_filter;
43
44
45const unsigned char scsi_command_size_tbl[8] =
46{
47 6, 10, 10, 12,
48 16, 12, 10, 10
49};
50EXPORT_SYMBOL(scsi_command_size_tbl);
51
52#include <scsi/sg.h>
53
54static int sg_get_version(int __user *p)
55{
56 static const int sg_version_num = 30527;
57 return put_user(sg_version_num, p);
58}
59
60static int scsi_get_idlun(struct request_queue *q, int __user *p)
61{
62 return put_user(0, p);
63}
64
65static int scsi_get_bus(struct request_queue *q, int __user *p)
66{
67 return put_user(0, p);
68}
69
70static int sg_get_timeout(struct request_queue *q)
71{
72 return jiffies_to_clock_t(q->sg_timeout);
73}
74
75static int sg_set_timeout(struct request_queue *q, int __user *p)
76{
77 int timeout, err = get_user(timeout, p);
78
79 if (!err)
80 q->sg_timeout = clock_t_to_jiffies(timeout);
81
82 return err;
83}
84
85static int max_sectors_bytes(struct request_queue *q)
86{
87 unsigned int max_sectors = queue_max_sectors(q);
88
89 max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
90
91 return max_sectors << 9;
92}
93
94static int sg_get_reserved_size(struct request_queue *q, int __user *p)
95{
96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q));
97
98 return put_user(val, p);
99}
100
101static int sg_set_reserved_size(struct request_queue *q, int __user *p)
102{
103 int size, err = get_user(size, p);
104
105 if (err)
106 return err;
107
108 if (size < 0)
109 return -EINVAL;
110
111 q->sg_reserved_size = min(size, max_sectors_bytes(q));
112 return 0;
113}
114
115
116
117
118
119static int sg_emulated_host(struct request_queue *q, int __user *p)
120{
121 return put_user(1, p);
122}
123
124static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
125{
126
127 __set_bit(TEST_UNIT_READY, filter->read_ok);
128 __set_bit(REQUEST_SENSE, filter->read_ok);
129 __set_bit(READ_6, filter->read_ok);
130 __set_bit(READ_10, filter->read_ok);
131 __set_bit(READ_12, filter->read_ok);
132 __set_bit(READ_16, filter->read_ok);
133 __set_bit(READ_BUFFER, filter->read_ok);
134 __set_bit(READ_DEFECT_DATA, filter->read_ok);
135 __set_bit(READ_CAPACITY, filter->read_ok);
136 __set_bit(READ_LONG, filter->read_ok);
137 __set_bit(INQUIRY, filter->read_ok);
138 __set_bit(MODE_SENSE, filter->read_ok);
139 __set_bit(MODE_SENSE_10, filter->read_ok);
140 __set_bit(LOG_SENSE, filter->read_ok);
141 __set_bit(START_STOP, filter->read_ok);
142 __set_bit(GPCMD_VERIFY_10, filter->read_ok);
143 __set_bit(VERIFY_16, filter->read_ok);
144 __set_bit(REPORT_LUNS, filter->read_ok);
145 __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
146 __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
147 __set_bit(MAINTENANCE_IN, filter->read_ok);
148 __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
149
150
151 __set_bit(GPCMD_PLAY_CD, filter->read_ok);
152 __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
153 __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
154 __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
155 __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
156
157
158 __set_bit(GPCMD_READ_CD, filter->read_ok);
159 __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
160 __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
161 __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
162 __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
163 __set_bit(GPCMD_READ_HEADER, filter->read_ok);
164 __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
165 __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
166 __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
167 __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
168 __set_bit(GPCMD_SCAN, filter->read_ok);
169 __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
170 __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
171 __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
172 __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
173 __set_bit(GPCMD_SEEK, filter->read_ok);
174 __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
175
176
177 __set_bit(WRITE_6, filter->write_ok);
178 __set_bit(WRITE_10, filter->write_ok);
179 __set_bit(WRITE_VERIFY, filter->write_ok);
180 __set_bit(WRITE_12, filter->write_ok);
181 __set_bit(WRITE_VERIFY_12, filter->write_ok);
182 __set_bit(WRITE_16, filter->write_ok);
183 __set_bit(WRITE_LONG, filter->write_ok);
184 __set_bit(WRITE_LONG_2, filter->write_ok);
185 __set_bit(WRITE_SAME, filter->write_ok);
186 __set_bit(WRITE_SAME_16, filter->write_ok);
187 __set_bit(WRITE_SAME_32, filter->write_ok);
188 __set_bit(ERASE, filter->write_ok);
189 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
190 __set_bit(MODE_SELECT, filter->write_ok);
191 __set_bit(LOG_SELECT, filter->write_ok);
192 __set_bit(GPCMD_BLANK, filter->write_ok);
193 __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
194 __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
195 __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
196 __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
197 __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
198 __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
199 __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
200 __set_bit(GPCMD_SEND_KEY, filter->write_ok);
201 __set_bit(GPCMD_SEND_OPC, filter->write_ok);
202 __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
203 __set_bit(GPCMD_SET_SPEED, filter->write_ok);
204 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
205 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
206 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
207 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
208}
209
210int blk_verify_command(unsigned char *cmd, fmode_t mode)
211{
212 struct blk_cmd_filter *filter = &blk_default_cmd_filter;
213
214
215 if (capable(CAP_SYS_RAWIO))
216 return 0;
217
218
219 if (test_bit(cmd[0], filter->read_ok))
220 return 0;
221
222
223 if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE))
224 return 0;
225
226 return -EPERM;
227}
228EXPORT_SYMBOL(blk_verify_command);
229
230static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
231 struct sg_io_hdr *hdr, fmode_t mode)
232{
233 struct scsi_request *req = scsi_req(rq);
234
235 if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
236 return -EFAULT;
237 if (blk_verify_command(req->cmd, mode))
238 return -EPERM;
239
240
241
242
243 req->cmd_len = hdr->cmd_len;
244
245 rq->timeout = msecs_to_jiffies(hdr->timeout);
246 if (!rq->timeout)
247 rq->timeout = q->sg_timeout;
248 if (!rq->timeout)
249 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
250 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
251 rq->timeout = BLK_MIN_SG_TIMEOUT;
252
253 return 0;
254}
255
256static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
257 struct bio *bio)
258{
259 struct scsi_request *req = scsi_req(rq);
260 int r, ret = 0;
261
262
263
264
265 hdr->status = req->result & 0xff;
266 hdr->masked_status = status_byte(req->result);
267 hdr->msg_status = msg_byte(req->result);
268 hdr->host_status = host_byte(req->result);
269 hdr->driver_status = driver_byte(req->result);
270 hdr->info = 0;
271 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
272 hdr->info |= SG_INFO_CHECK;
273 hdr->resid = req->resid_len;
274 hdr->sb_len_wr = 0;
275
276 if (req->sense_len && hdr->sbp) {
277 int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
278
279 if (!copy_to_user(hdr->sbp, req->sense, len))
280 hdr->sb_len_wr = len;
281 else
282 ret = -EFAULT;
283 }
284
285 r = blk_rq_unmap_user(bio);
286 if (!ret)
287 ret = r;
288
289 return ret;
290}
291
292static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
293 struct sg_io_hdr *hdr, fmode_t mode)
294{
295 unsigned long start_time;
296 ssize_t ret = 0;
297 int writing = 0;
298 int at_head = 0;
299 struct request *rq;
300 struct scsi_request *req;
301 struct bio *bio;
302
303 if (hdr->interface_id != 'S')
304 return -EINVAL;
305
306 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
307 return -EIO;
308
309 if (hdr->dxfer_len)
310 switch (hdr->dxfer_direction) {
311 default:
312 return -EINVAL;
313 case SG_DXFER_TO_DEV:
314 writing = 1;
315 break;
316 case SG_DXFER_TO_FROM_DEV:
317 case SG_DXFER_FROM_DEV:
318 break;
319 }
320 if (hdr->flags & SG_FLAG_Q_AT_HEAD)
321 at_head = 1;
322
323 ret = -ENOMEM;
324 rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
325 GFP_KERNEL);
326 if (IS_ERR(rq))
327 return PTR_ERR(rq);
328 req = scsi_req(rq);
329
330 if (hdr->cmd_len > BLK_MAX_CDB) {
331 req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
332 if (!req->cmd)
333 goto out_put_request;
334 }
335
336 ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
337 if (ret < 0)
338 goto out_free_cdb;
339
340 ret = 0;
341 if (hdr->iovec_count) {
342 struct iov_iter i;
343 struct iovec *iov = NULL;
344
345 ret = import_iovec(rq_data_dir(rq),
346 hdr->dxferp, hdr->iovec_count,
347 0, &iov, &i);
348 if (ret < 0)
349 goto out_free_cdb;
350
351
352 iov_iter_truncate(&i, hdr->dxfer_len);
353
354 ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
355 kfree(iov);
356 } else if (hdr->dxfer_len)
357 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
358 GFP_KERNEL);
359
360 if (ret)
361 goto out_free_cdb;
362
363 bio = rq->bio;
364 req->retries = 0;
365
366 start_time = jiffies;
367
368
369
370
371
372 blk_execute_rq(q, bd_disk, rq, at_head);
373
374 hdr->duration = jiffies_to_msecs(jiffies - start_time);
375
376 ret = blk_complete_sghdr_rq(rq, hdr, bio);
377
378out_free_cdb:
379 scsi_req_free_cmd(req);
380out_put_request:
381 blk_put_request(rq);
382 return ret;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
420 struct scsi_ioctl_command __user *sic)
421{
422 enum { OMAX_SB_LEN = 16 };
423 struct request *rq;
424 struct scsi_request *req;
425 int err;
426 unsigned int in_len, out_len, bytes, opcode, cmdlen;
427 char *buffer = NULL;
428
429 if (!sic)
430 return -EINVAL;
431
432
433
434
435 if (get_user(in_len, &sic->inlen))
436 return -EFAULT;
437 if (get_user(out_len, &sic->outlen))
438 return -EFAULT;
439 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
440 return -EINVAL;
441 if (get_user(opcode, sic->data))
442 return -EFAULT;
443
444 bytes = max(in_len, out_len);
445 if (bytes) {
446 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
447 if (!buffer)
448 return -ENOMEM;
449
450 }
451
452 rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
453 __GFP_RECLAIM);
454 if (IS_ERR(rq)) {
455 err = PTR_ERR(rq);
456 goto error_free_buffer;
457 }
458 req = scsi_req(rq);
459
460 cmdlen = COMMAND_SIZE(opcode);
461
462
463
464
465 err = -EFAULT;
466 req->cmd_len = cmdlen;
467 if (copy_from_user(req->cmd, sic->data, cmdlen))
468 goto error;
469
470 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
471 goto error;
472
473 err = blk_verify_command(req->cmd, mode);
474 if (err)
475 goto error;
476
477
478 req->retries = 5;
479
480 switch (opcode) {
481 case SEND_DIAGNOSTIC:
482 case FORMAT_UNIT:
483 rq->timeout = FORMAT_UNIT_TIMEOUT;
484 req->retries = 1;
485 break;
486 case START_STOP:
487 rq->timeout = START_STOP_TIMEOUT;
488 break;
489 case MOVE_MEDIUM:
490 rq->timeout = MOVE_MEDIUM_TIMEOUT;
491 break;
492 case READ_ELEMENT_STATUS:
493 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
494 break;
495 case READ_DEFECT_DATA:
496 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
497 req->retries = 1;
498 break;
499 default:
500 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
501 break;
502 }
503
504 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
505 err = DRIVER_ERROR << 24;
506 goto error;
507 }
508
509 blk_execute_rq(q, disk, rq, 0);
510
511 err = req->result & 0xff;
512 if (err) {
513 if (req->sense_len && req->sense) {
514 bytes = (OMAX_SB_LEN > req->sense_len) ?
515 req->sense_len : OMAX_SB_LEN;
516 if (copy_to_user(sic->data, req->sense, bytes))
517 err = -EFAULT;
518 }
519 } else {
520 if (copy_to_user(sic->data, buffer, out_len))
521 err = -EFAULT;
522 }
523
524error:
525 blk_put_request(rq);
526
527error_free_buffer:
528 kfree(buffer);
529
530 return err;
531}
532EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
533
534
535static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
536 int cmd, int data)
537{
538 struct request *rq;
539 int err;
540
541 rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
542 if (IS_ERR(rq))
543 return PTR_ERR(rq);
544 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
545 scsi_req(rq)->cmd[0] = cmd;
546 scsi_req(rq)->cmd[4] = data;
547 scsi_req(rq)->cmd_len = 6;
548 blk_execute_rq(q, bd_disk, rq, 0);
549 err = scsi_req(rq)->result ? -EIO : 0;
550 blk_put_request(rq);
551
552 return err;
553}
554
555static inline int blk_send_start_stop(struct request_queue *q,
556 struct gendisk *bd_disk, int data)
557{
558 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
559}
560
561int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
562 unsigned int cmd, void __user *arg)
563{
564 int err;
565
566 if (!q)
567 return -ENXIO;
568
569 switch (cmd) {
570
571
572
573 case SG_GET_VERSION_NUM:
574 err = sg_get_version(arg);
575 break;
576 case SCSI_IOCTL_GET_IDLUN:
577 err = scsi_get_idlun(q, arg);
578 break;
579 case SCSI_IOCTL_GET_BUS_NUMBER:
580 err = scsi_get_bus(q, arg);
581 break;
582 case SG_SET_TIMEOUT:
583 err = sg_set_timeout(q, arg);
584 break;
585 case SG_GET_TIMEOUT:
586 err = sg_get_timeout(q);
587 break;
588 case SG_GET_RESERVED_SIZE:
589 err = sg_get_reserved_size(q, arg);
590 break;
591 case SG_SET_RESERVED_SIZE:
592 err = sg_set_reserved_size(q, arg);
593 break;
594 case SG_EMULATED_HOST:
595 err = sg_emulated_host(q, arg);
596 break;
597 case SG_IO: {
598 struct sg_io_hdr hdr;
599
600 err = -EFAULT;
601 if (copy_from_user(&hdr, arg, sizeof(hdr)))
602 break;
603 err = sg_io(q, bd_disk, &hdr, mode);
604 if (err == -EFAULT)
605 break;
606
607 if (copy_to_user(arg, &hdr, sizeof(hdr)))
608 err = -EFAULT;
609 break;
610 }
611 case CDROM_SEND_PACKET: {
612 struct cdrom_generic_command cgc;
613 struct sg_io_hdr hdr;
614
615 err = -EFAULT;
616 if (copy_from_user(&cgc, arg, sizeof(cgc)))
617 break;
618 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
619 memset(&hdr, 0, sizeof(hdr));
620 hdr.interface_id = 'S';
621 hdr.cmd_len = sizeof(cgc.cmd);
622 hdr.dxfer_len = cgc.buflen;
623 err = 0;
624 switch (cgc.data_direction) {
625 case CGC_DATA_UNKNOWN:
626 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
627 break;
628 case CGC_DATA_WRITE:
629 hdr.dxfer_direction = SG_DXFER_TO_DEV;
630 break;
631 case CGC_DATA_READ:
632 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
633 break;
634 case CGC_DATA_NONE:
635 hdr.dxfer_direction = SG_DXFER_NONE;
636 break;
637 default:
638 err = -EINVAL;
639 }
640 if (err)
641 break;
642
643 hdr.dxferp = cgc.buffer;
644 hdr.sbp = cgc.sense;
645 if (hdr.sbp)
646 hdr.mx_sb_len = sizeof(struct request_sense);
647 hdr.timeout = jiffies_to_msecs(cgc.timeout);
648 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
649 hdr.cmd_len = sizeof(cgc.cmd);
650
651 err = sg_io(q, bd_disk, &hdr, mode);
652 if (err == -EFAULT)
653 break;
654
655 if (hdr.status)
656 err = -EIO;
657
658 cgc.stat = err;
659 cgc.buflen = hdr.resid;
660 if (copy_to_user(arg, &cgc, sizeof(cgc)))
661 err = -EFAULT;
662
663 break;
664 }
665
666
667
668
669 case SCSI_IOCTL_SEND_COMMAND:
670 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
671 err = -EINVAL;
672 if (!arg)
673 break;
674
675 err = sg_scsi_ioctl(q, bd_disk, mode, arg);
676 break;
677 case CDROMCLOSETRAY:
678 err = blk_send_start_stop(q, bd_disk, 0x03);
679 break;
680 case CDROMEJECT:
681 err = blk_send_start_stop(q, bd_disk, 0x02);
682 break;
683 default:
684 err = -ENOTTY;
685 }
686
687 return err;
688}
689EXPORT_SYMBOL(scsi_cmd_ioctl);
690
691int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
692{
693 if (bd && bd == bd->bd_contains)
694 return 0;
695
696 if (capable(CAP_SYS_RAWIO))
697 return 0;
698
699 return -ENOIOCTLCMD;
700}
701EXPORT_SYMBOL(scsi_verify_blk_ioctl);
702
703int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
704 unsigned int cmd, void __user *arg)
705{
706 int ret;
707
708 ret = scsi_verify_blk_ioctl(bd, cmd);
709 if (ret < 0)
710 return ret;
711
712 return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
713}
714EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
715
716
717
718
719
720
721
722void scsi_req_init(struct scsi_request *req)
723{
724 memset(req->__cmd, 0, sizeof(req->__cmd));
725 req->cmd = req->__cmd;
726 req->cmd_len = BLK_MAX_CDB;
727 req->sense_len = 0;
728}
729EXPORT_SYMBOL(scsi_req_init);
730
731static int __init blk_scsi_ioctl_init(void)
732{
733 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
734 return 0;
735}
736fs_initcall(blk_scsi_ioctl_init);
737