1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/module.h>
23#include <linux/blkdev.h>
24#include <linux/capability.h>
25#include <linux/completion.h>
26#include <linux/cdrom.h>
27#include <linux/ratelimit.h>
28#include <linux/slab.h>
29#include <linux/times.h>
30#include <linux/uio.h>
31#include <asm/uaccess.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_ioctl.h>
35#include <scsi/scsi_cmnd.h>
36
37struct blk_cmd_filter {
38 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
39 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
40};
41
42static struct blk_cmd_filter blk_default_cmd_filter;
43
44
45const unsigned char scsi_command_size_tbl[8] =
46{
47 6, 10, 10, 12,
48 16, 12, 10, 10
49};
50EXPORT_SYMBOL(scsi_command_size_tbl);
51
52#include <scsi/sg.h>
53
54static int sg_get_version(int __user *p)
55{
56 static const int sg_version_num = 30527;
57 return put_user(sg_version_num, p);
58}
59
60static int scsi_get_idlun(struct request_queue *q, int __user *p)
61{
62 return put_user(0, p);
63}
64
65static int scsi_get_bus(struct request_queue *q, int __user *p)
66{
67 return put_user(0, p);
68}
69
70static int sg_get_timeout(struct request_queue *q)
71{
72 return jiffies_to_clock_t(q->sg_timeout);
73}
74
75static int sg_set_timeout(struct request_queue *q, int __user *p)
76{
77 int timeout, err = get_user(timeout, p);
78
79 if (!err)
80 q->sg_timeout = clock_t_to_jiffies(timeout);
81
82 return err;
83}
84
85static int max_sectors_bytes(struct request_queue *q)
86{
87 unsigned int max_sectors = queue_max_sectors(q);
88
89 max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
90
91 return max_sectors << 9;
92}
93
94static int sg_get_reserved_size(struct request_queue *q, int __user *p)
95{
96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q));
97
98 return put_user(val, p);
99}
100
101static int sg_set_reserved_size(struct request_queue *q, int __user *p)
102{
103 int size, err = get_user(size, p);
104
105 if (err)
106 return err;
107
108 if (size < 0)
109 return -EINVAL;
110
111 q->sg_reserved_size = min(size, max_sectors_bytes(q));
112 return 0;
113}
114
115
116
117
118
119static int sg_emulated_host(struct request_queue *q, int __user *p)
120{
121 return put_user(1, p);
122}
123
124static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
125{
126
127 __set_bit(TEST_UNIT_READY, filter->read_ok);
128 __set_bit(REQUEST_SENSE, filter->read_ok);
129 __set_bit(READ_6, filter->read_ok);
130 __set_bit(READ_10, filter->read_ok);
131 __set_bit(READ_12, filter->read_ok);
132 __set_bit(READ_16, filter->read_ok);
133 __set_bit(READ_BUFFER, filter->read_ok);
134 __set_bit(READ_DEFECT_DATA, filter->read_ok);
135 __set_bit(READ_CAPACITY, filter->read_ok);
136 __set_bit(READ_LONG, filter->read_ok);
137 __set_bit(INQUIRY, filter->read_ok);
138 __set_bit(MODE_SENSE, filter->read_ok);
139 __set_bit(MODE_SENSE_10, filter->read_ok);
140 __set_bit(LOG_SENSE, filter->read_ok);
141 __set_bit(START_STOP, filter->read_ok);
142 __set_bit(GPCMD_VERIFY_10, filter->read_ok);
143 __set_bit(VERIFY_16, filter->read_ok);
144 __set_bit(REPORT_LUNS, filter->read_ok);
145 __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
146 __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
147 __set_bit(MAINTENANCE_IN, filter->read_ok);
148 __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
149
150
151 __set_bit(GPCMD_PLAY_CD, filter->read_ok);
152 __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
153 __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
154 __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
155 __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
156
157
158 __set_bit(GPCMD_READ_CD, filter->read_ok);
159 __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
160 __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
161 __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
162 __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
163 __set_bit(GPCMD_READ_HEADER, filter->read_ok);
164 __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
165 __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
166 __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
167 __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
168 __set_bit(GPCMD_SCAN, filter->read_ok);
169 __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
170 __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
171 __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
172 __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
173 __set_bit(GPCMD_SEEK, filter->read_ok);
174 __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
175
176
177 __set_bit(WRITE_6, filter->write_ok);
178 __set_bit(WRITE_10, filter->write_ok);
179 __set_bit(WRITE_SAME, filter->write_ok);
180 __set_bit(WRITE_VERIFY, filter->write_ok);
181 __set_bit(WRITE_12, filter->write_ok);
182 __set_bit(WRITE_VERIFY_12, filter->write_ok);
183 __set_bit(WRITE_16, filter->write_ok);
184 __set_bit(WRITE_SAME_16, filter->write_ok);
185 __set_bit(WRITE_LONG, filter->write_ok);
186 __set_bit(WRITE_LONG_2, filter->write_ok);
187 __set_bit(ERASE, filter->write_ok);
188 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
189 __set_bit(MODE_SELECT, filter->write_ok);
190 __set_bit(LOG_SELECT, filter->write_ok);
191 __set_bit(GPCMD_BLANK, filter->write_ok);
192 __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
193 __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
194 __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
195 __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
196 __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
197 __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
198 __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
199 __set_bit(GPCMD_SEND_KEY, filter->write_ok);
200 __set_bit(GPCMD_SEND_OPC, filter->write_ok);
201 __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
202 __set_bit(GPCMD_SET_SPEED, filter->write_ok);
203 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
204 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
205 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
206 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
207}
208
209int blk_verify_command(struct request_queue *q,
210 unsigned char *cmd, fmode_t has_write_perm)
211{
212 struct blk_cmd_filter *filter = &blk_default_cmd_filter;
213
214
215 if (capable(CAP_SYS_RAWIO) || blk_queue_unpriv_sgio(q))
216 return 0;
217
218
219 if (test_bit(cmd[0], filter->read_ok))
220 return 0;
221
222
223 if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
224 return 0;
225
226 return -EPERM;
227}
228EXPORT_SYMBOL(blk_verify_command);
229
230static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
231 struct sg_io_hdr *hdr, fmode_t mode)
232{
233 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
234 return -EFAULT;
235 if (blk_verify_command(q, rq->cmd, mode & FMODE_WRITE))
236 return -EPERM;
237
238
239
240
241 rq->cmd_len = hdr->cmd_len;
242
243 rq->timeout = msecs_to_jiffies(hdr->timeout);
244 if (!rq->timeout)
245 rq->timeout = q->sg_timeout;
246 if (!rq->timeout)
247 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
248 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
249 rq->timeout = BLK_MIN_SG_TIMEOUT;
250
251 return 0;
252}
253
254static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
255 struct bio *bio)
256{
257 int r, ret = 0;
258
259
260
261
262 hdr->status = rq->errors & 0xff;
263 hdr->masked_status = status_byte(rq->errors);
264 hdr->msg_status = msg_byte(rq->errors);
265 hdr->host_status = host_byte(rq->errors);
266 hdr->driver_status = driver_byte(rq->errors);
267 hdr->info = 0;
268 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
269 hdr->info |= SG_INFO_CHECK;
270 hdr->resid = rq->resid_len;
271 hdr->sb_len_wr = 0;
272
273 if (rq->sense_len && hdr->sbp) {
274 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
275
276 if (!copy_to_user(hdr->sbp, rq->sense, len))
277 hdr->sb_len_wr = len;
278 else
279 ret = -EFAULT;
280 }
281
282 r = blk_rq_unmap_user(bio);
283 if (!ret)
284 ret = r;
285 blk_put_request(rq);
286
287 return ret;
288}
289
290static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
291 struct sg_io_hdr *hdr, fmode_t mode)
292{
293 unsigned long start_time;
294 int writing = 0, ret = 0;
295 int at_head = 0;
296 struct request *rq;
297 char sense[SCSI_SENSE_BUFFERSIZE];
298 struct bio *bio;
299
300 if (hdr->interface_id != 'S')
301 return -EINVAL;
302 if (hdr->cmd_len > BLK_MAX_CDB)
303 return -EINVAL;
304
305 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
306 return -EIO;
307
308 if (hdr->dxfer_len)
309 switch (hdr->dxfer_direction) {
310 default:
311 return -EINVAL;
312 case SG_DXFER_TO_DEV:
313 writing = 1;
314 break;
315 case SG_DXFER_TO_FROM_DEV:
316 case SG_DXFER_FROM_DEV:
317 break;
318 }
319 if (hdr->flags & SG_FLAG_Q_AT_HEAD)
320 at_head = 1;
321
322 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
323 if (IS_ERR(rq))
324 return PTR_ERR(rq);
325 blk_rq_set_block_pc(rq);
326
327 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
328 blk_put_request(rq);
329 return -EFAULT;
330 }
331
332 if (hdr->iovec_count) {
333 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
334 size_t iov_data_len;
335 struct sg_iovec *sg_iov;
336 struct iovec *iov;
337 int i;
338
339 sg_iov = kmalloc(size, GFP_KERNEL);
340 if (!sg_iov) {
341 ret = -ENOMEM;
342 goto out;
343 }
344
345 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
346 kfree(sg_iov);
347 ret = -EFAULT;
348 goto out;
349 }
350
351
352
353
354 iov = (struct iovec *) sg_iov;
355 iov_data_len = 0;
356 for (i = 0; i < hdr->iovec_count; i++) {
357 if (iov_data_len + iov[i].iov_len < iov_data_len) {
358 kfree(sg_iov);
359 ret = -EINVAL;
360 goto out;
361 }
362 iov_data_len += iov[i].iov_len;
363 }
364
365
366 if (hdr->dxfer_len < iov_data_len) {
367 hdr->iovec_count = iov_shorten(iov,
368 hdr->iovec_count,
369 hdr->dxfer_len);
370 iov_data_len = hdr->dxfer_len;
371 }
372
373 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
374 iov_data_len, GFP_KERNEL);
375 kfree(sg_iov);
376 } else if (hdr->dxfer_len)
377 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
378 GFP_KERNEL);
379
380 if (ret)
381 goto out;
382
383 bio = rq->bio;
384 memset(sense, 0, sizeof(sense));
385 rq->sense = sense;
386 rq->sense_len = 0;
387 rq->retries = 0;
388
389 start_time = jiffies;
390
391
392
393
394
395 blk_execute_rq(q, bd_disk, rq, at_head);
396
397 hdr->duration = jiffies_to_msecs(jiffies - start_time);
398
399 return blk_complete_sghdr_rq(rq, hdr, bio);
400out:
401 blk_put_request(rq);
402 return ret;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438#define OMAX_SB_LEN 16
439int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
440 struct scsi_ioctl_command __user *sic)
441{
442 struct request *rq;
443 int err;
444 unsigned int in_len, out_len, bytes, opcode, cmdlen;
445 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
446
447 if (!sic)
448 return -EINVAL;
449
450
451
452
453 if (get_user(in_len, &sic->inlen))
454 return -EFAULT;
455 if (get_user(out_len, &sic->outlen))
456 return -EFAULT;
457 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
458 return -EINVAL;
459 if (get_user(opcode, sic->data))
460 return -EFAULT;
461
462 bytes = max(in_len, out_len);
463 if (bytes) {
464 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
465 if (!buffer)
466 return -ENOMEM;
467
468 }
469
470 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
471 if (IS_ERR(rq)) {
472 err = PTR_ERR(rq);
473 goto error_free_buffer;
474 }
475 blk_rq_set_block_pc(rq);
476
477 cmdlen = COMMAND_SIZE(opcode);
478
479
480
481
482 err = -EFAULT;
483 rq->cmd_len = cmdlen;
484 if (copy_from_user(rq->cmd, sic->data, cmdlen))
485 goto error;
486
487 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
488 goto error;
489
490 err = blk_verify_command(q, rq->cmd, mode & FMODE_WRITE);
491 if (err)
492 goto error;
493
494
495 rq->retries = 5;
496
497 switch (opcode) {
498 case SEND_DIAGNOSTIC:
499 case FORMAT_UNIT:
500 rq->timeout = FORMAT_UNIT_TIMEOUT;
501 rq->retries = 1;
502 break;
503 case START_STOP:
504 rq->timeout = START_STOP_TIMEOUT;
505 break;
506 case MOVE_MEDIUM:
507 rq->timeout = MOVE_MEDIUM_TIMEOUT;
508 break;
509 case READ_ELEMENT_STATUS:
510 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
511 break;
512 case READ_DEFECT_DATA:
513 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
514 rq->retries = 1;
515 break;
516 default:
517 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
518 break;
519 }
520
521 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
522 err = DRIVER_ERROR << 24;
523 goto error;
524 }
525
526 memset(sense, 0, sizeof(sense));
527 rq->sense = sense;
528 rq->sense_len = 0;
529
530 blk_execute_rq(q, disk, rq, 0);
531
532 err = rq->errors & 0xff;
533 if (err) {
534 if (rq->sense_len && rq->sense) {
535 bytes = (OMAX_SB_LEN > rq->sense_len) ?
536 rq->sense_len : OMAX_SB_LEN;
537 if (copy_to_user(sic->data, rq->sense, bytes))
538 err = -EFAULT;
539 }
540 } else {
541 if (copy_to_user(sic->data, buffer, out_len))
542 err = -EFAULT;
543 }
544
545error:
546 blk_put_request(rq);
547
548error_free_buffer:
549 kfree(buffer);
550
551 return err;
552}
553EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
554
555
556static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
557 int cmd, int data)
558{
559 struct request *rq;
560 int err;
561
562 rq = blk_get_request(q, WRITE, __GFP_WAIT);
563 if (IS_ERR(rq))
564 return PTR_ERR(rq);
565 blk_rq_set_block_pc(rq);
566 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
567 rq->cmd[0] = cmd;
568 rq->cmd[4] = data;
569 rq->cmd_len = 6;
570 err = blk_execute_rq(q, bd_disk, rq, 0);
571 blk_put_request(rq);
572
573 return err;
574}
575
576static inline int blk_send_start_stop(struct request_queue *q,
577 struct gendisk *bd_disk, int data)
578{
579 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
580}
581
582int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
583 unsigned int cmd, void __user *arg)
584{
585 int err;
586
587 if (!q)
588 return -ENXIO;
589
590 switch (cmd) {
591
592
593
594 case SG_GET_VERSION_NUM:
595 err = sg_get_version(arg);
596 break;
597 case SCSI_IOCTL_GET_IDLUN:
598 err = scsi_get_idlun(q, arg);
599 break;
600 case SCSI_IOCTL_GET_BUS_NUMBER:
601 err = scsi_get_bus(q, arg);
602 break;
603 case SG_SET_TIMEOUT:
604 err = sg_set_timeout(q, arg);
605 break;
606 case SG_GET_TIMEOUT:
607 err = sg_get_timeout(q);
608 break;
609 case SG_GET_RESERVED_SIZE:
610 err = sg_get_reserved_size(q, arg);
611 break;
612 case SG_SET_RESERVED_SIZE:
613 err = sg_set_reserved_size(q, arg);
614 break;
615 case SG_EMULATED_HOST:
616 err = sg_emulated_host(q, arg);
617 break;
618 case SG_IO: {
619 struct sg_io_hdr hdr;
620
621 err = -EFAULT;
622 if (copy_from_user(&hdr, arg, sizeof(hdr)))
623 break;
624 err = sg_io(q, bd_disk, &hdr, mode);
625 if (err == -EFAULT)
626 break;
627
628 if (copy_to_user(arg, &hdr, sizeof(hdr)))
629 err = -EFAULT;
630 break;
631 }
632 case CDROM_SEND_PACKET: {
633 struct cdrom_generic_command cgc;
634 struct sg_io_hdr hdr;
635
636 err = -EFAULT;
637 if (copy_from_user(&cgc, arg, sizeof(cgc)))
638 break;
639 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
640 memset(&hdr, 0, sizeof(hdr));
641 hdr.interface_id = 'S';
642 hdr.cmd_len = sizeof(cgc.cmd);
643 hdr.dxfer_len = cgc.buflen;
644 err = 0;
645 switch (cgc.data_direction) {
646 case CGC_DATA_UNKNOWN:
647 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
648 break;
649 case CGC_DATA_WRITE:
650 hdr.dxfer_direction = SG_DXFER_TO_DEV;
651 break;
652 case CGC_DATA_READ:
653 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
654 break;
655 case CGC_DATA_NONE:
656 hdr.dxfer_direction = SG_DXFER_NONE;
657 break;
658 default:
659 err = -EINVAL;
660 }
661 if (err)
662 break;
663
664 hdr.dxferp = cgc.buffer;
665 hdr.sbp = cgc.sense;
666 if (hdr.sbp)
667 hdr.mx_sb_len = sizeof(struct request_sense);
668 hdr.timeout = jiffies_to_msecs(cgc.timeout);
669 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
670 hdr.cmd_len = sizeof(cgc.cmd);
671
672 err = sg_io(q, bd_disk, &hdr, mode);
673 if (err == -EFAULT)
674 break;
675
676 if (hdr.status)
677 err = -EIO;
678
679 cgc.stat = err;
680 cgc.buflen = hdr.resid;
681 if (copy_to_user(arg, &cgc, sizeof(cgc)))
682 err = -EFAULT;
683
684 break;
685 }
686
687
688
689
690 case SCSI_IOCTL_SEND_COMMAND:
691 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
692 err = -EINVAL;
693 if (!arg)
694 break;
695
696 err = sg_scsi_ioctl(q, bd_disk, mode, arg);
697 break;
698 case CDROMCLOSETRAY:
699 err = blk_send_start_stop(q, bd_disk, 0x03);
700 break;
701 case CDROMEJECT:
702 err = blk_send_start_stop(q, bd_disk, 0x02);
703 break;
704 default:
705 err = -ENOTTY;
706 }
707
708 return err;
709}
710EXPORT_SYMBOL(scsi_cmd_ioctl);
711
712int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
713{
714 if (bd && bd == bd->bd_contains)
715 return 0;
716
717
718
719
720 switch (cmd) {
721 case SCSI_IOCTL_GET_IDLUN:
722 case SCSI_IOCTL_GET_BUS_NUMBER:
723 case SCSI_IOCTL_GET_PCI:
724 case SCSI_IOCTL_PROBE_HOST:
725 case SG_GET_VERSION_NUM:
726 case SG_SET_TIMEOUT:
727 case SG_GET_TIMEOUT:
728 case SG_GET_RESERVED_SIZE:
729 case SG_SET_RESERVED_SIZE:
730 case SG_EMULATED_HOST:
731 return 0;
732 case CDROM_GET_CAPABILITY:
733
734
735
736
737 return -ENOIOCTLCMD;
738 default:
739 break;
740 }
741
742 if (capable(CAP_SYS_RAWIO))
743 return 0;
744
745
746 printk_ratelimited(KERN_WARNING
747 "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
748
749 return -ENOIOCTLCMD;
750}
751EXPORT_SYMBOL(scsi_verify_blk_ioctl);
752
753int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
754 unsigned int cmd, void __user *arg)
755{
756 int ret;
757
758 ret = scsi_verify_blk_ioctl(bd, cmd);
759 if (ret < 0)
760 return ret;
761
762 return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
763}
764EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
765
766static int __init blk_scsi_ioctl_init(void)
767{
768 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
769 return 0;
770}
771fs_initcall(blk_scsi_ioctl_init);
772