1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/module.h>
23#include <linux/blkdev.h>
24#include <linux/capability.h>
25#include <linux/completion.h>
26#include <linux/cdrom.h>
27#include <linux/ratelimit.h>
28#include <linux/slab.h>
29#include <linux/times.h>
30#include <linux/uio.h>
31#include <asm/uaccess.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_ioctl.h>
35#include <scsi/scsi_cmnd.h>
36
37struct blk_cmd_filter {
38 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
39 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
40};
41
42static struct blk_cmd_filter blk_default_cmd_filter;
43
44
45const unsigned char scsi_command_size_tbl[8] =
46{
47 6, 10, 10, 12,
48 16, 12, 10, 10
49};
50EXPORT_SYMBOL(scsi_command_size_tbl);
51
52#include <scsi/sg.h>
53
54static int sg_get_version(int __user *p)
55{
56 static const int sg_version_num = 30527;
57 return put_user(sg_version_num, p);
58}
59
60static int scsi_get_idlun(struct request_queue *q, int __user *p)
61{
62 return put_user(0, p);
63}
64
65static int scsi_get_bus(struct request_queue *q, int __user *p)
66{
67 return put_user(0, p);
68}
69
70static int sg_get_timeout(struct request_queue *q)
71{
72 return jiffies_to_clock_t(q->sg_timeout);
73}
74
75static int sg_set_timeout(struct request_queue *q, int __user *p)
76{
77 int timeout, err = get_user(timeout, p);
78
79 if (!err)
80 q->sg_timeout = clock_t_to_jiffies(timeout);
81
82 return err;
83}
84
85static int sg_get_reserved_size(struct request_queue *q, int __user *p)
86{
87 unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
88
89 return put_user(val, p);
90}
91
92static int sg_set_reserved_size(struct request_queue *q, int __user *p)
93{
94 int size, err = get_user(size, p);
95
96 if (err)
97 return err;
98
99 if (size < 0)
100 return -EINVAL;
101 if (size > (queue_max_sectors(q) << 9))
102 size = queue_max_sectors(q) << 9;
103
104 q->sg_reserved_size = size;
105 return 0;
106}
107
108
109
110
111
112static int sg_emulated_host(struct request_queue *q, int __user *p)
113{
114 return put_user(1, p);
115}
116
117static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
118{
119
120 __set_bit(TEST_UNIT_READY, filter->read_ok);
121 __set_bit(REQUEST_SENSE, filter->read_ok);
122 __set_bit(READ_6, filter->read_ok);
123 __set_bit(READ_10, filter->read_ok);
124 __set_bit(READ_12, filter->read_ok);
125 __set_bit(READ_16, filter->read_ok);
126 __set_bit(READ_BUFFER, filter->read_ok);
127 __set_bit(READ_DEFECT_DATA, filter->read_ok);
128 __set_bit(READ_CAPACITY, filter->read_ok);
129 __set_bit(READ_LONG, filter->read_ok);
130 __set_bit(INQUIRY, filter->read_ok);
131 __set_bit(MODE_SENSE, filter->read_ok);
132 __set_bit(MODE_SENSE_10, filter->read_ok);
133 __set_bit(LOG_SENSE, filter->read_ok);
134 __set_bit(START_STOP, filter->read_ok);
135 __set_bit(GPCMD_VERIFY_10, filter->read_ok);
136 __set_bit(VERIFY_16, filter->read_ok);
137 __set_bit(REPORT_LUNS, filter->read_ok);
138 __set_bit(SERVICE_ACTION_IN, filter->read_ok);
139 __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
140 __set_bit(MAINTENANCE_IN, filter->read_ok);
141 __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
142
143
144 __set_bit(GPCMD_PLAY_CD, filter->read_ok);
145 __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
146 __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
147 __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
148 __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
149
150
151 __set_bit(GPCMD_READ_CD, filter->read_ok);
152 __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
153 __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
154 __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
155 __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
156 __set_bit(GPCMD_READ_HEADER, filter->read_ok);
157 __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
158 __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
159 __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
160 __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
161 __set_bit(GPCMD_SCAN, filter->read_ok);
162 __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
163 __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
164 __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
165 __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
166 __set_bit(GPCMD_SEEK, filter->read_ok);
167 __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
168
169
170 __set_bit(WRITE_6, filter->write_ok);
171 __set_bit(WRITE_10, filter->write_ok);
172 __set_bit(WRITE_VERIFY, filter->write_ok);
173 __set_bit(WRITE_12, filter->write_ok);
174 __set_bit(WRITE_VERIFY_12, filter->write_ok);
175 __set_bit(WRITE_16, filter->write_ok);
176 __set_bit(WRITE_LONG, filter->write_ok);
177 __set_bit(WRITE_LONG_2, filter->write_ok);
178 __set_bit(ERASE, filter->write_ok);
179 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
180 __set_bit(MODE_SELECT, filter->write_ok);
181 __set_bit(LOG_SELECT, filter->write_ok);
182 __set_bit(GPCMD_BLANK, filter->write_ok);
183 __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
184 __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
185 __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
186 __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
187 __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
188 __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
189 __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
190 __set_bit(GPCMD_SEND_KEY, filter->write_ok);
191 __set_bit(GPCMD_SEND_OPC, filter->write_ok);
192 __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
193 __set_bit(GPCMD_SET_SPEED, filter->write_ok);
194 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
195 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
196 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
197 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
198}
199
200int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
201{
202 struct blk_cmd_filter *filter = &blk_default_cmd_filter;
203
204
205 if (capable(CAP_SYS_RAWIO))
206 return 0;
207
208
209 if (!filter)
210 return -EPERM;
211
212
213 if (test_bit(cmd[0], filter->read_ok))
214 return 0;
215
216
217 if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
218 return 0;
219
220 return -EPERM;
221}
222EXPORT_SYMBOL(blk_verify_command);
223
224static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
225 struct sg_io_hdr *hdr, fmode_t mode)
226{
227 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
228 return -EFAULT;
229 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
230 return -EPERM;
231
232
233
234
235 rq->cmd_len = hdr->cmd_len;
236 rq->cmd_type = REQ_TYPE_BLOCK_PC;
237
238 rq->timeout = msecs_to_jiffies(hdr->timeout);
239 if (!rq->timeout)
240 rq->timeout = q->sg_timeout;
241 if (!rq->timeout)
242 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
243 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
244 rq->timeout = BLK_MIN_SG_TIMEOUT;
245
246 return 0;
247}
248
249static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
250 struct bio *bio)
251{
252 int r, ret = 0;
253
254
255
256
257 hdr->status = rq->errors & 0xff;
258 hdr->masked_status = status_byte(rq->errors);
259 hdr->msg_status = msg_byte(rq->errors);
260 hdr->host_status = host_byte(rq->errors);
261 hdr->driver_status = driver_byte(rq->errors);
262 hdr->info = 0;
263 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
264 hdr->info |= SG_INFO_CHECK;
265 hdr->resid = rq->resid_len;
266 hdr->sb_len_wr = 0;
267
268 if (rq->sense_len && hdr->sbp) {
269 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
270
271 if (!copy_to_user(hdr->sbp, rq->sense, len))
272 hdr->sb_len_wr = len;
273 else
274 ret = -EFAULT;
275 }
276
277 r = blk_rq_unmap_user(bio);
278 if (!ret)
279 ret = r;
280 blk_put_request(rq);
281
282 return ret;
283}
284
285static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
286 struct sg_io_hdr *hdr, fmode_t mode)
287{
288 unsigned long start_time;
289 int writing = 0, ret = 0;
290 struct request *rq;
291 char sense[SCSI_SENSE_BUFFERSIZE];
292 struct bio *bio;
293
294 if (hdr->interface_id != 'S')
295 return -EINVAL;
296 if (hdr->cmd_len > BLK_MAX_CDB)
297 return -EINVAL;
298
299 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
300 return -EIO;
301
302 if (hdr->dxfer_len)
303 switch (hdr->dxfer_direction) {
304 default:
305 return -EINVAL;
306 case SG_DXFER_TO_DEV:
307 writing = 1;
308 break;
309 case SG_DXFER_TO_FROM_DEV:
310 case SG_DXFER_FROM_DEV:
311 break;
312 }
313
314 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
315 if (!rq)
316 return -ENOMEM;
317
318 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
319 blk_put_request(rq);
320 return -EFAULT;
321 }
322
323 if (hdr->iovec_count) {
324 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
325 size_t iov_data_len;
326 struct sg_iovec *sg_iov;
327 struct iovec *iov;
328 int i;
329
330 sg_iov = kmalloc(size, GFP_KERNEL);
331 if (!sg_iov) {
332 ret = -ENOMEM;
333 goto out;
334 }
335
336 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
337 kfree(sg_iov);
338 ret = -EFAULT;
339 goto out;
340 }
341
342
343
344
345 iov = (struct iovec *) sg_iov;
346 iov_data_len = 0;
347 for (i = 0; i < hdr->iovec_count; i++) {
348 if (iov_data_len + iov[i].iov_len < iov_data_len) {
349 kfree(sg_iov);
350 ret = -EINVAL;
351 goto out;
352 }
353 iov_data_len += iov[i].iov_len;
354 }
355
356
357 if (hdr->dxfer_len < iov_data_len) {
358 hdr->iovec_count = iov_shorten(iov,
359 hdr->iovec_count,
360 hdr->dxfer_len);
361 iov_data_len = hdr->dxfer_len;
362 }
363
364 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
365 iov_data_len, GFP_KERNEL);
366 kfree(sg_iov);
367 } else if (hdr->dxfer_len)
368 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
369 GFP_KERNEL);
370
371 if (ret)
372 goto out;
373
374 bio = rq->bio;
375 memset(sense, 0, sizeof(sense));
376 rq->sense = sense;
377 rq->sense_len = 0;
378 rq->retries = 0;
379
380 start_time = jiffies;
381
382
383
384
385
386 blk_execute_rq(q, bd_disk, rq, 0);
387
388 hdr->duration = jiffies_to_msecs(jiffies - start_time);
389
390 return blk_complete_sghdr_rq(rq, hdr, bio);
391out:
392 blk_put_request(rq);
393 return ret;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429#define OMAX_SB_LEN 16
430int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
431 struct scsi_ioctl_command __user *sic)
432{
433 struct request *rq;
434 int err;
435 unsigned int in_len, out_len, bytes, opcode, cmdlen;
436 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
437
438 if (!sic)
439 return -EINVAL;
440
441
442
443
444 if (get_user(in_len, &sic->inlen))
445 return -EFAULT;
446 if (get_user(out_len, &sic->outlen))
447 return -EFAULT;
448 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
449 return -EINVAL;
450 if (get_user(opcode, sic->data))
451 return -EFAULT;
452
453 bytes = max(in_len, out_len);
454 if (bytes) {
455 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
456 if (!buffer)
457 return -ENOMEM;
458
459 }
460
461 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
462
463 cmdlen = COMMAND_SIZE(opcode);
464
465
466
467
468 err = -EFAULT;
469 rq->cmd_len = cmdlen;
470 if (copy_from_user(rq->cmd, sic->data, cmdlen))
471 goto error;
472
473 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
474 goto error;
475
476 err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
477 if (err)
478 goto error;
479
480
481 rq->retries = 5;
482
483 switch (opcode) {
484 case SEND_DIAGNOSTIC:
485 case FORMAT_UNIT:
486 rq->timeout = FORMAT_UNIT_TIMEOUT;
487 rq->retries = 1;
488 break;
489 case START_STOP:
490 rq->timeout = START_STOP_TIMEOUT;
491 break;
492 case MOVE_MEDIUM:
493 rq->timeout = MOVE_MEDIUM_TIMEOUT;
494 break;
495 case READ_ELEMENT_STATUS:
496 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
497 break;
498 case READ_DEFECT_DATA:
499 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
500 rq->retries = 1;
501 break;
502 default:
503 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
504 break;
505 }
506
507 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
508 err = DRIVER_ERROR << 24;
509 goto out;
510 }
511
512 memset(sense, 0, sizeof(sense));
513 rq->sense = sense;
514 rq->sense_len = 0;
515 rq->cmd_type = REQ_TYPE_BLOCK_PC;
516
517 blk_execute_rq(q, disk, rq, 0);
518
519out:
520 err = rq->errors & 0xff;
521 if (err) {
522 if (rq->sense_len && rq->sense) {
523 bytes = (OMAX_SB_LEN > rq->sense_len) ?
524 rq->sense_len : OMAX_SB_LEN;
525 if (copy_to_user(sic->data, rq->sense, bytes))
526 err = -EFAULT;
527 }
528 } else {
529 if (copy_to_user(sic->data, buffer, out_len))
530 err = -EFAULT;
531 }
532
533error:
534 kfree(buffer);
535 blk_put_request(rq);
536 return err;
537}
538EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
539
540
541static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
542 int cmd, int data)
543{
544 struct request *rq;
545 int err;
546
547 rq = blk_get_request(q, WRITE, __GFP_WAIT);
548 rq->cmd_type = REQ_TYPE_BLOCK_PC;
549 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
550 rq->cmd[0] = cmd;
551 rq->cmd[4] = data;
552 rq->cmd_len = 6;
553 err = blk_execute_rq(q, bd_disk, rq, 0);
554 blk_put_request(rq);
555
556 return err;
557}
558
559static inline int blk_send_start_stop(struct request_queue *q,
560 struct gendisk *bd_disk, int data)
561{
562 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
563}
564
565int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
566 unsigned int cmd, void __user *arg)
567{
568 int err;
569
570 if (!q)
571 return -ENXIO;
572
573 switch (cmd) {
574
575
576
577 case SG_GET_VERSION_NUM:
578 err = sg_get_version(arg);
579 break;
580 case SCSI_IOCTL_GET_IDLUN:
581 err = scsi_get_idlun(q, arg);
582 break;
583 case SCSI_IOCTL_GET_BUS_NUMBER:
584 err = scsi_get_bus(q, arg);
585 break;
586 case SG_SET_TIMEOUT:
587 err = sg_set_timeout(q, arg);
588 break;
589 case SG_GET_TIMEOUT:
590 err = sg_get_timeout(q);
591 break;
592 case SG_GET_RESERVED_SIZE:
593 err = sg_get_reserved_size(q, arg);
594 break;
595 case SG_SET_RESERVED_SIZE:
596 err = sg_set_reserved_size(q, arg);
597 break;
598 case SG_EMULATED_HOST:
599 err = sg_emulated_host(q, arg);
600 break;
601 case SG_IO: {
602 struct sg_io_hdr hdr;
603
604 err = -EFAULT;
605 if (copy_from_user(&hdr, arg, sizeof(hdr)))
606 break;
607 err = sg_io(q, bd_disk, &hdr, mode);
608 if (err == -EFAULT)
609 break;
610
611 if (copy_to_user(arg, &hdr, sizeof(hdr)))
612 err = -EFAULT;
613 break;
614 }
615 case CDROM_SEND_PACKET: {
616 struct cdrom_generic_command cgc;
617 struct sg_io_hdr hdr;
618
619 err = -EFAULT;
620 if (copy_from_user(&cgc, arg, sizeof(cgc)))
621 break;
622 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
623 memset(&hdr, 0, sizeof(hdr));
624 hdr.interface_id = 'S';
625 hdr.cmd_len = sizeof(cgc.cmd);
626 hdr.dxfer_len = cgc.buflen;
627 err = 0;
628 switch (cgc.data_direction) {
629 case CGC_DATA_UNKNOWN:
630 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
631 break;
632 case CGC_DATA_WRITE:
633 hdr.dxfer_direction = SG_DXFER_TO_DEV;
634 break;
635 case CGC_DATA_READ:
636 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
637 break;
638 case CGC_DATA_NONE:
639 hdr.dxfer_direction = SG_DXFER_NONE;
640 break;
641 default:
642 err = -EINVAL;
643 }
644 if (err)
645 break;
646
647 hdr.dxferp = cgc.buffer;
648 hdr.sbp = cgc.sense;
649 if (hdr.sbp)
650 hdr.mx_sb_len = sizeof(struct request_sense);
651 hdr.timeout = jiffies_to_msecs(cgc.timeout);
652 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
653 hdr.cmd_len = sizeof(cgc.cmd);
654
655 err = sg_io(q, bd_disk, &hdr, mode);
656 if (err == -EFAULT)
657 break;
658
659 if (hdr.status)
660 err = -EIO;
661
662 cgc.stat = err;
663 cgc.buflen = hdr.resid;
664 if (copy_to_user(arg, &cgc, sizeof(cgc)))
665 err = -EFAULT;
666
667 break;
668 }
669
670
671
672
673 case SCSI_IOCTL_SEND_COMMAND:
674 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
675 err = -EINVAL;
676 if (!arg)
677 break;
678
679 err = sg_scsi_ioctl(q, bd_disk, mode, arg);
680 break;
681 case CDROMCLOSETRAY:
682 err = blk_send_start_stop(q, bd_disk, 0x03);
683 break;
684 case CDROMEJECT:
685 err = blk_send_start_stop(q, bd_disk, 0x02);
686 break;
687 default:
688 err = -ENOTTY;
689 }
690
691 return err;
692}
693EXPORT_SYMBOL(scsi_cmd_ioctl);
694
695int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
696{
697 if (bd && bd == bd->bd_contains)
698 return 0;
699
700
701
702
703 switch (cmd) {
704 case SCSI_IOCTL_GET_IDLUN:
705 case SCSI_IOCTL_GET_BUS_NUMBER:
706 case SCSI_IOCTL_GET_PCI:
707 case SCSI_IOCTL_PROBE_HOST:
708 case SG_GET_VERSION_NUM:
709 case SG_SET_TIMEOUT:
710 case SG_GET_TIMEOUT:
711 case SG_GET_RESERVED_SIZE:
712 case SG_SET_RESERVED_SIZE:
713 case SG_EMULATED_HOST:
714 return 0;
715 case CDROM_GET_CAPABILITY:
716
717
718
719
720 return -ENOIOCTLCMD;
721 default:
722 break;
723 }
724
725 if (capable(CAP_SYS_RAWIO))
726 return 0;
727
728
729 printk_ratelimited(KERN_WARNING
730 "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
731
732 return -ENOIOCTLCMD;
733}
734EXPORT_SYMBOL(scsi_verify_blk_ioctl);
735
736int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
737 unsigned int cmd, void __user *arg)
738{
739 int ret;
740
741 ret = scsi_verify_blk_ioctl(bd, cmd);
742 if (ret < 0)
743 return ret;
744
745 return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
746}
747EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
748
749static int __init blk_scsi_ioctl_init(void)
750{
751 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
752 return 0;
753}
754fs_initcall(blk_scsi_ioctl_init);
755