1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/pktcdvd.h>
48#include <linux/module.h>
49#include <linux/types.h>
50#include <linux/kernel.h>
51#include <linux/compat.h>
52#include <linux/kthread.h>
53#include <linux/errno.h>
54#include <linux/spinlock.h>
55#include <linux/file.h>
56#include <linux/proc_fs.h>
57#include <linux/seq_file.h>
58#include <linux/miscdevice.h>
59#include <linux/freezer.h>
60#include <linux/mutex.h>
61#include <linux/slab.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_ioctl.h>
64#include <scsi/scsi.h>
65#include <linux/debugfs.h>
66#include <linux/device.h>
67
68#include <asm/uaccess.h>
69
70#define DRIVER_NAME "pktcdvd"
71
72#if PACKET_DEBUG
73#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
74#else
75#define DPRINTK(fmt, args...)
76#endif
77
78#if PACKET_DEBUG > 1
79#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
80#else
81#define VPRINTK(fmt, args...)
82#endif
83
84#define MAX_SPEED 0xffff
85
86#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
87 ~(sector_t)((pd)->settings.size - 1))
88
89static DEFINE_MUTEX(pktcdvd_mutex);
90static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
91static struct proc_dir_entry *pkt_proc;
92static int pktdev_major;
93static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
94static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
95static struct mutex ctl_mutex;
96static mempool_t *psd_pool;
97
98static struct class *class_pktcdvd = NULL;
99static struct dentry *pkt_debugfs_root = NULL;
100
101
102static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
103static int pkt_remove_dev(dev_t pkt_dev);
104static int pkt_seq_show(struct seq_file *m, void *p);
105
106
107
108
109
110
111static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
112 const char* name,
113 struct kobject* parent,
114 struct kobj_type* ktype)
115{
116 struct pktcdvd_kobj *p;
117 int error;
118
119 p = kzalloc(sizeof(*p), GFP_KERNEL);
120 if (!p)
121 return NULL;
122 p->pd = pd;
123 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
124 if (error) {
125 kobject_put(&p->kobj);
126 return NULL;
127 }
128 kobject_uevent(&p->kobj, KOBJ_ADD);
129 return p;
130}
131
132
133
134static void pkt_kobj_remove(struct pktcdvd_kobj *p)
135{
136 if (p)
137 kobject_put(&p->kobj);
138}
139
140
141
142static void pkt_kobj_release(struct kobject *kobj)
143{
144 kfree(to_pktcdvdkobj(kobj));
145}
146
147
148
149
150
151
152
153
154
155#define DEF_ATTR(_obj,_name,_mode) \
156 static struct attribute _obj = { .name = _name, .mode = _mode }
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
172DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
173DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
174DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
175DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
176DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
177
178static struct attribute *kobj_pkt_attrs_stat[] = {
179 &kobj_pkt_attr_st1,
180 &kobj_pkt_attr_st2,
181 &kobj_pkt_attr_st3,
182 &kobj_pkt_attr_st4,
183 &kobj_pkt_attr_st5,
184 &kobj_pkt_attr_st6,
185 NULL
186};
187
188DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
189DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
190DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
191
192static struct attribute *kobj_pkt_attrs_wqueue[] = {
193 &kobj_pkt_attr_wq1,
194 &kobj_pkt_attr_wq2,
195 &kobj_pkt_attr_wq3,
196 NULL
197};
198
199static ssize_t kobj_pkt_show(struct kobject *kobj,
200 struct attribute *attr, char *data)
201{
202 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
203 int n = 0;
204 int v;
205 if (strcmp(attr->name, "packets_started") == 0) {
206 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
207
208 } else if (strcmp(attr->name, "packets_finished") == 0) {
209 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
210
211 } else if (strcmp(attr->name, "kb_written") == 0) {
212 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
213
214 } else if (strcmp(attr->name, "kb_read") == 0) {
215 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
216
217 } else if (strcmp(attr->name, "kb_read_gather") == 0) {
218 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
219
220 } else if (strcmp(attr->name, "size") == 0) {
221 spin_lock(&pd->lock);
222 v = pd->bio_queue_size;
223 spin_unlock(&pd->lock);
224 n = sprintf(data, "%d\n", v);
225
226 } else if (strcmp(attr->name, "congestion_off") == 0) {
227 spin_lock(&pd->lock);
228 v = pd->write_congestion_off;
229 spin_unlock(&pd->lock);
230 n = sprintf(data, "%d\n", v);
231
232 } else if (strcmp(attr->name, "congestion_on") == 0) {
233 spin_lock(&pd->lock);
234 v = pd->write_congestion_on;
235 spin_unlock(&pd->lock);
236 n = sprintf(data, "%d\n", v);
237 }
238 return n;
239}
240
241static void init_write_congestion_marks(int* lo, int* hi)
242{
243 if (*hi > 0) {
244 *hi = max(*hi, 500);
245 *hi = min(*hi, 1000000);
246 if (*lo <= 0)
247 *lo = *hi - 100;
248 else {
249 *lo = min(*lo, *hi - 100);
250 *lo = max(*lo, 100);
251 }
252 } else {
253 *hi = -1;
254 *lo = -1;
255 }
256}
257
258static ssize_t kobj_pkt_store(struct kobject *kobj,
259 struct attribute *attr,
260 const char *data, size_t len)
261{
262 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
263 int val;
264
265 if (strcmp(attr->name, "reset") == 0 && len > 0) {
266 pd->stats.pkt_started = 0;
267 pd->stats.pkt_ended = 0;
268 pd->stats.secs_w = 0;
269 pd->stats.secs_rg = 0;
270 pd->stats.secs_r = 0;
271
272 } else if (strcmp(attr->name, "congestion_off") == 0
273 && sscanf(data, "%d", &val) == 1) {
274 spin_lock(&pd->lock);
275 pd->write_congestion_off = val;
276 init_write_congestion_marks(&pd->write_congestion_off,
277 &pd->write_congestion_on);
278 spin_unlock(&pd->lock);
279
280 } else if (strcmp(attr->name, "congestion_on") == 0
281 && sscanf(data, "%d", &val) == 1) {
282 spin_lock(&pd->lock);
283 pd->write_congestion_on = val;
284 init_write_congestion_marks(&pd->write_congestion_off,
285 &pd->write_congestion_on);
286 spin_unlock(&pd->lock);
287 }
288 return len;
289}
290
291static const struct sysfs_ops kobj_pkt_ops = {
292 .show = kobj_pkt_show,
293 .store = kobj_pkt_store
294};
295static struct kobj_type kobj_pkt_type_stat = {
296 .release = pkt_kobj_release,
297 .sysfs_ops = &kobj_pkt_ops,
298 .default_attrs = kobj_pkt_attrs_stat
299};
300static struct kobj_type kobj_pkt_type_wqueue = {
301 .release = pkt_kobj_release,
302 .sysfs_ops = &kobj_pkt_ops,
303 .default_attrs = kobj_pkt_attrs_wqueue
304};
305
306static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
307{
308 if (class_pktcdvd) {
309 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
310 "%s", pd->name);
311 if (IS_ERR(pd->dev))
312 pd->dev = NULL;
313 }
314 if (pd->dev) {
315 pd->kobj_stat = pkt_kobj_create(pd, "stat",
316 &pd->dev->kobj,
317 &kobj_pkt_type_stat);
318 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
319 &pd->dev->kobj,
320 &kobj_pkt_type_wqueue);
321 }
322}
323
324static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
325{
326 pkt_kobj_remove(pd->kobj_stat);
327 pkt_kobj_remove(pd->kobj_wqueue);
328 if (class_pktcdvd)
329 device_unregister(pd->dev);
330}
331
332
333
334
335
336
337
338
339
340static void class_pktcdvd_release(struct class *cls)
341{
342 kfree(cls);
343}
344static ssize_t class_pktcdvd_show_map(struct class *c,
345 struct class_attribute *attr,
346 char *data)
347{
348 int n = 0;
349 int idx;
350 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
351 for (idx = 0; idx < MAX_WRITERS; idx++) {
352 struct pktcdvd_device *pd = pkt_devs[idx];
353 if (!pd)
354 continue;
355 n += sprintf(data+n, "%s %u:%u %u:%u\n",
356 pd->name,
357 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
358 MAJOR(pd->bdev->bd_dev),
359 MINOR(pd->bdev->bd_dev));
360 }
361 mutex_unlock(&ctl_mutex);
362 return n;
363}
364
365static ssize_t class_pktcdvd_store_add(struct class *c,
366 struct class_attribute *attr,
367 const char *buf,
368 size_t count)
369{
370 unsigned int major, minor;
371
372 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
373
374 if (!try_module_get(THIS_MODULE))
375 return -ENODEV;
376
377 pkt_setup_dev(MKDEV(major, minor), NULL);
378
379 module_put(THIS_MODULE);
380
381 return count;
382 }
383
384 return -EINVAL;
385}
386
387static ssize_t class_pktcdvd_store_remove(struct class *c,
388 struct class_attribute *attr,
389 const char *buf,
390 size_t count)
391{
392 unsigned int major, minor;
393 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
394 pkt_remove_dev(MKDEV(major, minor));
395 return count;
396 }
397 return -EINVAL;
398}
399
400static struct class_attribute class_pktcdvd_attrs[] = {
401 __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
402 __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
403 __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
404 __ATTR_NULL
405};
406
407
408static int pkt_sysfs_init(void)
409{
410 int ret = 0;
411
412
413
414
415
416 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
417 if (!class_pktcdvd)
418 return -ENOMEM;
419 class_pktcdvd->name = DRIVER_NAME;
420 class_pktcdvd->owner = THIS_MODULE;
421 class_pktcdvd->class_release = class_pktcdvd_release;
422 class_pktcdvd->class_attrs = class_pktcdvd_attrs;
423 ret = class_register(class_pktcdvd);
424 if (ret) {
425 kfree(class_pktcdvd);
426 class_pktcdvd = NULL;
427 printk(DRIVER_NAME": failed to create class pktcdvd\n");
428 return ret;
429 }
430 return 0;
431}
432
433static void pkt_sysfs_cleanup(void)
434{
435 if (class_pktcdvd)
436 class_destroy(class_pktcdvd);
437 class_pktcdvd = NULL;
438}
439
440
441
442
443
444
445
446
447
448static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
449{
450 return pkt_seq_show(m, p);
451}
452
453static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
454{
455 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
456}
457
458static const struct file_operations debug_fops = {
459 .open = pkt_debugfs_fops_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = single_release,
463 .owner = THIS_MODULE,
464};
465
466static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
467{
468 if (!pkt_debugfs_root)
469 return;
470 pd->dfs_f_info = NULL;
471 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
472 if (IS_ERR(pd->dfs_d_root)) {
473 pd->dfs_d_root = NULL;
474 return;
475 }
476 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
477 pd->dfs_d_root, pd, &debug_fops);
478 if (IS_ERR(pd->dfs_f_info)) {
479 pd->dfs_f_info = NULL;
480 return;
481 }
482}
483
484static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
485{
486 if (!pkt_debugfs_root)
487 return;
488 if (pd->dfs_f_info)
489 debugfs_remove(pd->dfs_f_info);
490 pd->dfs_f_info = NULL;
491 if (pd->dfs_d_root)
492 debugfs_remove(pd->dfs_d_root);
493 pd->dfs_d_root = NULL;
494}
495
496static void pkt_debugfs_init(void)
497{
498 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
499 if (IS_ERR(pkt_debugfs_root)) {
500 pkt_debugfs_root = NULL;
501 return;
502 }
503}
504
505static void pkt_debugfs_cleanup(void)
506{
507 if (!pkt_debugfs_root)
508 return;
509 debugfs_remove(pkt_debugfs_root);
510 pkt_debugfs_root = NULL;
511}
512
513
514
515
516static void pkt_bio_finished(struct pktcdvd_device *pd)
517{
518 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
519 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
520 VPRINTK(DRIVER_NAME": queue empty\n");
521 atomic_set(&pd->iosched.attention, 1);
522 wake_up(&pd->wqueue);
523 }
524}
525
526
527
528
529static struct packet_data *pkt_alloc_packet_data(int frames)
530{
531 int i;
532 struct packet_data *pkt;
533
534 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
535 if (!pkt)
536 goto no_pkt;
537
538 pkt->frames = frames;
539 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
540 if (!pkt->w_bio)
541 goto no_bio;
542
543 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
544 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
545 if (!pkt->pages[i])
546 goto no_page;
547 }
548
549 spin_lock_init(&pkt->lock);
550 bio_list_init(&pkt->orig_bios);
551
552 for (i = 0; i < frames; i++) {
553 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
554 if (!bio)
555 goto no_rd_bio;
556
557 pkt->r_bios[i] = bio;
558 }
559
560 return pkt;
561
562no_rd_bio:
563 for (i = 0; i < frames; i++) {
564 struct bio *bio = pkt->r_bios[i];
565 if (bio)
566 bio_put(bio);
567 }
568
569no_page:
570 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
571 if (pkt->pages[i])
572 __free_page(pkt->pages[i]);
573 bio_put(pkt->w_bio);
574no_bio:
575 kfree(pkt);
576no_pkt:
577 return NULL;
578}
579
580
581
582
583static void pkt_free_packet_data(struct packet_data *pkt)
584{
585 int i;
586
587 for (i = 0; i < pkt->frames; i++) {
588 struct bio *bio = pkt->r_bios[i];
589 if (bio)
590 bio_put(bio);
591 }
592 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
593 __free_page(pkt->pages[i]);
594 bio_put(pkt->w_bio);
595 kfree(pkt);
596}
597
598static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
599{
600 struct packet_data *pkt, *next;
601
602 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
603
604 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
605 pkt_free_packet_data(pkt);
606 }
607 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
608}
609
610static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
611{
612 struct packet_data *pkt;
613
614 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
615
616 while (nr_packets > 0) {
617 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
618 if (!pkt) {
619 pkt_shrink_pktlist(pd);
620 return 0;
621 }
622 pkt->id = nr_packets;
623 pkt->pd = pd;
624 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
625 nr_packets--;
626 }
627 return 1;
628}
629
630static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
631{
632 struct rb_node *n = rb_next(&node->rb_node);
633 if (!n)
634 return NULL;
635 return rb_entry(n, struct pkt_rb_node, rb_node);
636}
637
638static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
639{
640 rb_erase(&node->rb_node, &pd->bio_queue);
641 mempool_free(node, pd->rb_pool);
642 pd->bio_queue_size--;
643 BUG_ON(pd->bio_queue_size < 0);
644}
645
646
647
648
649static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
650{
651 struct rb_node *n = pd->bio_queue.rb_node;
652 struct rb_node *next;
653 struct pkt_rb_node *tmp;
654
655 if (!n) {
656 BUG_ON(pd->bio_queue_size > 0);
657 return NULL;
658 }
659
660 for (;;) {
661 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
662 if (s <= tmp->bio->bi_sector)
663 next = n->rb_left;
664 else
665 next = n->rb_right;
666 if (!next)
667 break;
668 n = next;
669 }
670
671 if (s > tmp->bio->bi_sector) {
672 tmp = pkt_rbtree_next(tmp);
673 if (!tmp)
674 return NULL;
675 }
676 BUG_ON(s > tmp->bio->bi_sector);
677 return tmp;
678}
679
680
681
682
683static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
684{
685 struct rb_node **p = &pd->bio_queue.rb_node;
686 struct rb_node *parent = NULL;
687 sector_t s = node->bio->bi_sector;
688 struct pkt_rb_node *tmp;
689
690 while (*p) {
691 parent = *p;
692 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
693 if (s < tmp->bio->bi_sector)
694 p = &(*p)->rb_left;
695 else
696 p = &(*p)->rb_right;
697 }
698 rb_link_node(&node->rb_node, parent, p);
699 rb_insert_color(&node->rb_node, &pd->bio_queue);
700 pd->bio_queue_size++;
701}
702
703
704
705
706
707static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
708{
709 struct request_queue *q = bdev_get_queue(pd->bdev);
710 struct request *rq;
711 int ret = 0;
712
713 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
714 WRITE : READ, __GFP_WAIT);
715 if (IS_ERR(rq))
716 return PTR_ERR(rq);
717 blk_rq_set_block_pc(rq);
718
719 if (cgc->buflen) {
720 if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
721 goto out;
722 }
723
724 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
725 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
726
727 rq->timeout = 60*HZ;
728 if (cgc->quiet)
729 rq->cmd_flags |= REQ_QUIET;
730
731 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
732 if (rq->errors)
733 ret = -EIO;
734out:
735 blk_put_request(rq);
736 return ret;
737}
738
739
740
741
742
743static void pkt_dump_sense(struct packet_command *cgc)
744{
745 static char *info[9] = { "No sense", "Recovered error", "Not ready",
746 "Medium error", "Hardware error", "Illegal request",
747 "Unit attention", "Data protect", "Blank check" };
748 int i;
749 struct request_sense *sense = cgc->sense;
750
751 printk(DRIVER_NAME":");
752 for (i = 0; i < CDROM_PACKET_SIZE; i++)
753 printk(" %02x", cgc->cmd[i]);
754 printk(" - ");
755
756 if (sense == NULL) {
757 printk("no sense\n");
758 return;
759 }
760
761 printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
762
763 if (sense->sense_key > 8) {
764 printk(" (INVALID)\n");
765 return;
766 }
767
768 printk(" (%s)\n", info[sense->sense_key]);
769}
770
771
772
773
774static int pkt_flush_cache(struct pktcdvd_device *pd)
775{
776 struct packet_command cgc;
777
778 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
779 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
780 cgc.quiet = 1;
781
782
783
784
785
786#if 0
787 cgc.cmd[1] = 1 << 1;
788#endif
789 return pkt_generic_packet(pd, &cgc);
790}
791
792
793
794
795static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
796 unsigned write_speed, unsigned read_speed)
797{
798 struct packet_command cgc;
799 struct request_sense sense;
800 int ret;
801
802 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
803 cgc.sense = &sense;
804 cgc.cmd[0] = GPCMD_SET_SPEED;
805 cgc.cmd[2] = (read_speed >> 8) & 0xff;
806 cgc.cmd[3] = read_speed & 0xff;
807 cgc.cmd[4] = (write_speed >> 8) & 0xff;
808 cgc.cmd[5] = write_speed & 0xff;
809
810 if ((ret = pkt_generic_packet(pd, &cgc)))
811 pkt_dump_sense(&cgc);
812
813 return ret;
814}
815
816
817
818
819
820static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
821{
822 spin_lock(&pd->iosched.lock);
823 if (bio_data_dir(bio) == READ)
824 bio_list_add(&pd->iosched.read_queue, bio);
825 else
826 bio_list_add(&pd->iosched.write_queue, bio);
827 spin_unlock(&pd->iosched.lock);
828
829 atomic_set(&pd->iosched.attention, 1);
830 wake_up(&pd->wqueue);
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
850{
851
852 if (atomic_read(&pd->iosched.attention) == 0)
853 return;
854 atomic_set(&pd->iosched.attention, 0);
855
856 for (;;) {
857 struct bio *bio;
858 int reads_queued, writes_queued;
859
860 spin_lock(&pd->iosched.lock);
861 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
862 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
863 spin_unlock(&pd->iosched.lock);
864
865 if (!reads_queued && !writes_queued)
866 break;
867
868 if (pd->iosched.writing) {
869 int need_write_seek = 1;
870 spin_lock(&pd->iosched.lock);
871 bio = bio_list_peek(&pd->iosched.write_queue);
872 spin_unlock(&pd->iosched.lock);
873 if (bio && (bio->bi_sector == pd->iosched.last_write))
874 need_write_seek = 0;
875 if (need_write_seek && reads_queued) {
876 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
877 VPRINTK(DRIVER_NAME": write, waiting\n");
878 break;
879 }
880 pkt_flush_cache(pd);
881 pd->iosched.writing = 0;
882 }
883 } else {
884 if (!reads_queued && writes_queued) {
885 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
886 VPRINTK(DRIVER_NAME": read, waiting\n");
887 break;
888 }
889 pd->iosched.writing = 1;
890 }
891 }
892
893 spin_lock(&pd->iosched.lock);
894 if (pd->iosched.writing)
895 bio = bio_list_pop(&pd->iosched.write_queue);
896 else
897 bio = bio_list_pop(&pd->iosched.read_queue);
898 spin_unlock(&pd->iosched.lock);
899
900 if (!bio)
901 continue;
902
903 if (bio_data_dir(bio) == READ)
904 pd->iosched.successive_reads += bio->bi_size >> 10;
905 else {
906 pd->iosched.successive_reads = 0;
907 pd->iosched.last_write = bio_end_sector(bio);
908 }
909 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
910 if (pd->read_speed == pd->write_speed) {
911 pd->read_speed = MAX_SPEED;
912 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
913 }
914 } else {
915 if (pd->read_speed != pd->write_speed) {
916 pd->read_speed = pd->write_speed;
917 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
918 }
919 }
920
921 atomic_inc(&pd->cdrw.pending_bios);
922 generic_make_request(bio);
923 }
924}
925
926
927
928
929
930static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
931{
932 if ((pd->settings.size << 9) / CD_FRAMESIZE
933 <= queue_max_segments(q)) {
934
935
936
937 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
938 return 0;
939 } else if ((pd->settings.size << 9) / PAGE_SIZE
940 <= queue_max_segments(q)) {
941
942
943
944
945 set_bit(PACKET_MERGE_SEGS, &pd->flags);
946 return 0;
947 } else {
948 printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
949 return -EIO;
950 }
951}
952
953
954
955
956
957
958
959
960static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
961{
962 int f, p, offs;
963
964
965 p = 0;
966 offs = 0;
967 for (f = 0; f < pkt->frames; f++) {
968 if (bvec[f].bv_page != pkt->pages[p]) {
969 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
970 void *vto = page_address(pkt->pages[p]) + offs;
971 memcpy(vto, vfrom, CD_FRAMESIZE);
972 kunmap_atomic(vfrom);
973 bvec[f].bv_page = pkt->pages[p];
974 bvec[f].bv_offset = offs;
975 } else {
976 BUG_ON(bvec[f].bv_offset != offs);
977 }
978 offs += CD_FRAMESIZE;
979 if (offs >= PAGE_SIZE) {
980 offs = 0;
981 p++;
982 }
983 }
984}
985
986static void pkt_end_io_read(struct bio *bio, int err)
987{
988 struct packet_data *pkt = bio->bi_private;
989 struct pktcdvd_device *pd = pkt->pd;
990 BUG_ON(!pd);
991
992 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
993 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
994
995 if (err)
996 atomic_inc(&pkt->io_errors);
997 if (atomic_dec_and_test(&pkt->io_wait)) {
998 atomic_inc(&pkt->run_sm);
999 wake_up(&pd->wqueue);
1000 }
1001 pkt_bio_finished(pd);
1002}
1003
1004static void pkt_end_io_packet_write(struct bio *bio, int err)
1005{
1006 struct packet_data *pkt = bio->bi_private;
1007 struct pktcdvd_device *pd = pkt->pd;
1008 BUG_ON(!pd);
1009
1010 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
1011
1012 pd->stats.pkt_ended++;
1013
1014 pkt_bio_finished(pd);
1015 atomic_dec(&pkt->io_wait);
1016 atomic_inc(&pkt->run_sm);
1017 wake_up(&pd->wqueue);
1018}
1019
1020
1021
1022
1023static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1024{
1025 int frames_read = 0;
1026 struct bio *bio;
1027 int f;
1028 char written[PACKET_MAX_SIZE];
1029
1030 BUG_ON(bio_list_empty(&pkt->orig_bios));
1031
1032 atomic_set(&pkt->io_wait, 0);
1033 atomic_set(&pkt->io_errors, 0);
1034
1035
1036
1037
1038 memset(written, 0, sizeof(written));
1039 spin_lock(&pkt->lock);
1040 bio_list_for_each(bio, &pkt->orig_bios) {
1041 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1042 int num_frames = bio->bi_size / CD_FRAMESIZE;
1043 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1044 BUG_ON(first_frame < 0);
1045 BUG_ON(first_frame + num_frames > pkt->frames);
1046 for (f = first_frame; f < first_frame + num_frames; f++)
1047 written[f] = 1;
1048 }
1049 spin_unlock(&pkt->lock);
1050
1051 if (pkt->cache_valid) {
1052 VPRINTK("pkt_gather_data: zone %llx cached\n",
1053 (unsigned long long)pkt->sector);
1054 goto out_account;
1055 }
1056
1057
1058
1059
1060 for (f = 0; f < pkt->frames; f++) {
1061 int p, offset;
1062
1063 if (written[f])
1064 continue;
1065
1066 bio = pkt->r_bios[f];
1067 bio_reset(bio);
1068 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1069 bio->bi_bdev = pd->bdev;
1070 bio->bi_end_io = pkt_end_io_read;
1071 bio->bi_private = pkt;
1072
1073 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1074 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1075 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
1076 f, pkt->pages[p], offset);
1077 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1078 BUG();
1079
1080 atomic_inc(&pkt->io_wait);
1081 bio->bi_rw = READ;
1082 pkt_queue_bio(pd, bio);
1083 frames_read++;
1084 }
1085
1086out_account:
1087 VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
1088 frames_read, (unsigned long long)pkt->sector);
1089 pd->stats.pkt_started++;
1090 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1091}
1092
1093
1094
1095
1096
1097static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1098{
1099 struct packet_data *pkt;
1100
1101 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1102 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1103 list_del_init(&pkt->list);
1104 if (pkt->sector != zone)
1105 pkt->cache_valid = 0;
1106 return pkt;
1107 }
1108 }
1109 BUG();
1110 return NULL;
1111}
1112
1113static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1114{
1115 if (pkt->cache_valid) {
1116 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1117 } else {
1118 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1119 }
1120}
1121
1122
1123
1124
1125
1126
1127
1128static int pkt_start_recovery(struct packet_data *pkt)
1129{
1130
1131
1132
1133
1134 return 0;
1135#if 0
1136 struct request *rq = pkt->rq;
1137 struct pktcdvd_device *pd = rq->rq_disk->private_data;
1138 struct block_device *pkt_bdev;
1139 struct super_block *sb = NULL;
1140 unsigned long old_block, new_block;
1141 sector_t new_sector;
1142
1143 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
1144 if (pkt_bdev) {
1145 sb = get_super(pkt_bdev);
1146 bdput(pkt_bdev);
1147 }
1148
1149 if (!sb)
1150 return 0;
1151
1152 if (!sb->s_op->relocate_blocks)
1153 goto out;
1154
1155 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
1156 if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1157 goto out;
1158
1159 new_sector = new_block * (CD_FRAMESIZE >> 9);
1160 pkt->sector = new_sector;
1161
1162 bio_reset(pkt->bio);
1163 pkt->bio->bi_bdev = pd->bdev;
1164 pkt->bio->bi_rw = REQ_WRITE;
1165 pkt->bio->bi_sector = new_sector;
1166 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
1167 pkt->bio->bi_vcnt = pkt->frames;
1168
1169 pkt->bio->bi_end_io = pkt_end_io_packet_write;
1170 pkt->bio->bi_private = pkt;
1171
1172 drop_super(sb);
1173 return 1;
1174
1175out:
1176 drop_super(sb);
1177 return 0;
1178#endif
1179}
1180
1181static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1182{
1183#if PACKET_DEBUG > 1
1184 static const char *state_name[] = {
1185 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1186 };
1187 enum packet_data_state old_state = pkt->state;
1188 VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
1189 state_name[old_state], state_name[state]);
1190#endif
1191 pkt->state = state;
1192}
1193
1194
1195
1196
1197
1198static int pkt_handle_queue(struct pktcdvd_device *pd)
1199{
1200 struct packet_data *pkt, *p;
1201 struct bio *bio = NULL;
1202 sector_t zone = 0;
1203 struct pkt_rb_node *node, *first_node;
1204 struct rb_node *n;
1205 int wakeup;
1206
1207 VPRINTK("handle_queue\n");
1208
1209 atomic_set(&pd->scan_queue, 0);
1210
1211 if (list_empty(&pd->cdrw.pkt_free_list)) {
1212 VPRINTK("handle_queue: no pkt\n");
1213 return 0;
1214 }
1215
1216
1217
1218
1219 spin_lock(&pd->lock);
1220 first_node = pkt_rbtree_find(pd, pd->current_sector);
1221 if (!first_node) {
1222 n = rb_first(&pd->bio_queue);
1223 if (n)
1224 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1225 }
1226 node = first_node;
1227 while (node) {
1228 bio = node->bio;
1229 zone = ZONE(bio->bi_sector, pd);
1230 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1231 if (p->sector == zone) {
1232 bio = NULL;
1233 goto try_next_bio;
1234 }
1235 }
1236 break;
1237try_next_bio:
1238 node = pkt_rbtree_next(node);
1239 if (!node) {
1240 n = rb_first(&pd->bio_queue);
1241 if (n)
1242 node = rb_entry(n, struct pkt_rb_node, rb_node);
1243 }
1244 if (node == first_node)
1245 node = NULL;
1246 }
1247 spin_unlock(&pd->lock);
1248 if (!bio) {
1249 VPRINTK("handle_queue: no bio\n");
1250 return 0;
1251 }
1252
1253 pkt = pkt_get_packet_data(pd, zone);
1254
1255 pd->current_sector = zone + pd->settings.size;
1256 pkt->sector = zone;
1257 BUG_ON(pkt->frames != pd->settings.size >> 2);
1258 pkt->write_size = 0;
1259
1260
1261
1262
1263
1264 spin_lock(&pd->lock);
1265 VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
1266 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1267 bio = node->bio;
1268 VPRINTK("pkt_handle_queue: found zone=%llx\n",
1269 (unsigned long long)ZONE(bio->bi_sector, pd));
1270 if (ZONE(bio->bi_sector, pd) != zone)
1271 break;
1272 pkt_rbtree_erase(pd, node);
1273 spin_lock(&pkt->lock);
1274 bio_list_add(&pkt->orig_bios, bio);
1275 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
1276 spin_unlock(&pkt->lock);
1277 }
1278
1279
1280 wakeup = (pd->write_congestion_on > 0
1281 && pd->bio_queue_size <= pd->write_congestion_off);
1282 spin_unlock(&pd->lock);
1283 if (wakeup) {
1284 clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1285 BLK_RW_ASYNC);
1286 }
1287
1288 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1289 pkt_set_state(pkt, PACKET_WAITING_STATE);
1290 atomic_set(&pkt->run_sm, 1);
1291
1292 spin_lock(&pd->cdrw.active_list_lock);
1293 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1294 spin_unlock(&pd->cdrw.active_list_lock);
1295
1296 return 1;
1297}
1298
1299
1300
1301
1302
1303static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1304{
1305 int f;
1306 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1307
1308 bio_reset(pkt->w_bio);
1309 pkt->w_bio->bi_sector = pkt->sector;
1310 pkt->w_bio->bi_bdev = pd->bdev;
1311 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1312 pkt->w_bio->bi_private = pkt;
1313
1314
1315 for (f = 0; f < pkt->frames; f++) {
1316 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1317 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1318 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1319 BUG();
1320 }
1321 VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
1322
1323
1324
1325
1326 spin_lock(&pkt->lock);
1327 bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
1328
1329 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1330 spin_unlock(&pkt->lock);
1331
1332 VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1333 pkt->write_size, (unsigned long long)pkt->sector);
1334
1335 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1336 pkt_make_local_copy(pkt, bvec);
1337 pkt->cache_valid = 1;
1338 } else {
1339 pkt->cache_valid = 0;
1340 }
1341
1342
1343 atomic_set(&pkt->io_wait, 1);
1344 pkt->w_bio->bi_rw = WRITE;
1345 pkt_queue_bio(pd, pkt->w_bio);
1346}
1347
1348static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1349{
1350 struct bio *bio;
1351
1352 if (!uptodate)
1353 pkt->cache_valid = 0;
1354
1355
1356 while ((bio = bio_list_pop(&pkt->orig_bios)))
1357 bio_endio(bio, uptodate ? 0 : -EIO);
1358}
1359
1360static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1361{
1362 int uptodate;
1363
1364 VPRINTK("run_state_machine: pkt %d\n", pkt->id);
1365
1366 for (;;) {
1367 switch (pkt->state) {
1368 case PACKET_WAITING_STATE:
1369 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1370 return;
1371
1372 pkt->sleep_time = 0;
1373 pkt_gather_data(pd, pkt);
1374 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1375 break;
1376
1377 case PACKET_READ_WAIT_STATE:
1378 if (atomic_read(&pkt->io_wait) > 0)
1379 return;
1380
1381 if (atomic_read(&pkt->io_errors) > 0) {
1382 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1383 } else {
1384 pkt_start_write(pd, pkt);
1385 }
1386 break;
1387
1388 case PACKET_WRITE_WAIT_STATE:
1389 if (atomic_read(&pkt->io_wait) > 0)
1390 return;
1391
1392 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1393 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1394 } else {
1395 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1396 }
1397 break;
1398
1399 case PACKET_RECOVERY_STATE:
1400 if (pkt_start_recovery(pkt)) {
1401 pkt_start_write(pd, pkt);
1402 } else {
1403 VPRINTK("No recovery possible\n");
1404 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1405 }
1406 break;
1407
1408 case PACKET_FINISHED_STATE:
1409 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1410 pkt_finish_packet(pkt, uptodate);
1411 return;
1412
1413 default:
1414 BUG();
1415 break;
1416 }
1417 }
1418}
1419
1420static void pkt_handle_packets(struct pktcdvd_device *pd)
1421{
1422 struct packet_data *pkt, *next;
1423
1424 VPRINTK("pkt_handle_packets\n");
1425
1426
1427
1428
1429 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1430 if (atomic_read(&pkt->run_sm) > 0) {
1431 atomic_set(&pkt->run_sm, 0);
1432 pkt_run_state_machine(pd, pkt);
1433 }
1434 }
1435
1436
1437
1438
1439 spin_lock(&pd->cdrw.active_list_lock);
1440 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1441 if (pkt->state == PACKET_FINISHED_STATE) {
1442 list_del(&pkt->list);
1443 pkt_put_packet_data(pd, pkt);
1444 pkt_set_state(pkt, PACKET_IDLE_STATE);
1445 atomic_set(&pd->scan_queue, 1);
1446 }
1447 }
1448 spin_unlock(&pd->cdrw.active_list_lock);
1449}
1450
1451static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1452{
1453 struct packet_data *pkt;
1454 int i;
1455
1456 for (i = 0; i < PACKET_NUM_STATES; i++)
1457 states[i] = 0;
1458
1459 spin_lock(&pd->cdrw.active_list_lock);
1460 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1461 states[pkt->state]++;
1462 }
1463 spin_unlock(&pd->cdrw.active_list_lock);
1464}
1465
1466
1467
1468
1469
1470static int kcdrwd(void *foobar)
1471{
1472 struct pktcdvd_device *pd = foobar;
1473 struct packet_data *pkt;
1474 long min_sleep_time, residue;
1475
1476 set_user_nice(current, -20);
1477 set_freezable();
1478
1479 for (;;) {
1480 DECLARE_WAITQUEUE(wait, current);
1481
1482
1483
1484
1485 add_wait_queue(&pd->wqueue, &wait);
1486 for (;;) {
1487 set_current_state(TASK_INTERRUPTIBLE);
1488
1489
1490 if (atomic_read(&pd->scan_queue) > 0)
1491 goto work_to_do;
1492
1493
1494 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1495 if (atomic_read(&pkt->run_sm) > 0)
1496 goto work_to_do;
1497 }
1498
1499
1500 if (atomic_read(&pd->iosched.attention) != 0)
1501 goto work_to_do;
1502
1503
1504 if (PACKET_DEBUG > 1) {
1505 int states[PACKET_NUM_STATES];
1506 pkt_count_states(pd, states);
1507 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1508 states[0], states[1], states[2], states[3],
1509 states[4], states[5]);
1510 }
1511
1512 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1513 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1514 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1515 min_sleep_time = pkt->sleep_time;
1516 }
1517
1518 VPRINTK("kcdrwd: sleeping\n");
1519 residue = schedule_timeout(min_sleep_time);
1520 VPRINTK("kcdrwd: wake up\n");
1521
1522
1523 try_to_freeze();
1524
1525 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1526 if (!pkt->sleep_time)
1527 continue;
1528 pkt->sleep_time -= min_sleep_time - residue;
1529 if (pkt->sleep_time <= 0) {
1530 pkt->sleep_time = 0;
1531 atomic_inc(&pkt->run_sm);
1532 }
1533 }
1534
1535 if (kthread_should_stop())
1536 break;
1537 }
1538work_to_do:
1539 set_current_state(TASK_RUNNING);
1540 remove_wait_queue(&pd->wqueue, &wait);
1541
1542 if (kthread_should_stop())
1543 break;
1544
1545
1546
1547
1548
1549 while (pkt_handle_queue(pd))
1550 ;
1551
1552
1553
1554
1555 pkt_handle_packets(pd);
1556
1557
1558
1559
1560 pkt_iosched_process_queue(pd);
1561 }
1562
1563 return 0;
1564}
1565
1566static void pkt_print_settings(struct pktcdvd_device *pd)
1567{
1568 printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1569 printk("%u blocks, ", pd->settings.size >> 2);
1570 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1571}
1572
1573static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1574{
1575 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1576
1577 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1578 cgc->cmd[2] = page_code | (page_control << 6);
1579 cgc->cmd[7] = cgc->buflen >> 8;
1580 cgc->cmd[8] = cgc->buflen & 0xff;
1581 cgc->data_direction = CGC_DATA_READ;
1582 return pkt_generic_packet(pd, cgc);
1583}
1584
1585static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1586{
1587 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1588 memset(cgc->buffer, 0, 2);
1589 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1590 cgc->cmd[1] = 0x10;
1591 cgc->cmd[7] = cgc->buflen >> 8;
1592 cgc->cmd[8] = cgc->buflen & 0xff;
1593 cgc->data_direction = CGC_DATA_WRITE;
1594 return pkt_generic_packet(pd, cgc);
1595}
1596
1597static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1598{
1599 struct packet_command cgc;
1600 int ret;
1601
1602
1603 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1604 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1605 cgc.cmd[8] = cgc.buflen = 2;
1606 cgc.quiet = 1;
1607
1608 if ((ret = pkt_generic_packet(pd, &cgc)))
1609 return ret;
1610
1611
1612
1613
1614 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1615 sizeof(di->disc_information_length);
1616
1617 if (cgc.buflen > sizeof(disc_information))
1618 cgc.buflen = sizeof(disc_information);
1619
1620 cgc.cmd[8] = cgc.buflen;
1621 return pkt_generic_packet(pd, &cgc);
1622}
1623
1624static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1625{
1626 struct packet_command cgc;
1627 int ret;
1628
1629 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1630 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1631 cgc.cmd[1] = type & 3;
1632 cgc.cmd[4] = (track & 0xff00) >> 8;
1633 cgc.cmd[5] = track & 0xff;
1634 cgc.cmd[8] = 8;
1635 cgc.quiet = 1;
1636
1637 if ((ret = pkt_generic_packet(pd, &cgc)))
1638 return ret;
1639
1640 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1641 sizeof(ti->track_information_length);
1642
1643 if (cgc.buflen > sizeof(track_information))
1644 cgc.buflen = sizeof(track_information);
1645
1646 cgc.cmd[8] = cgc.buflen;
1647 return pkt_generic_packet(pd, &cgc);
1648}
1649
1650static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1651 long *last_written)
1652{
1653 disc_information di;
1654 track_information ti;
1655 __u32 last_track;
1656 int ret = -1;
1657
1658 if ((ret = pkt_get_disc_info(pd, &di)))
1659 return ret;
1660
1661 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1662 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1663 return ret;
1664
1665
1666 if (ti.blank) {
1667 last_track--;
1668 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1669 return ret;
1670 }
1671
1672
1673 if (ti.lra_v) {
1674 *last_written = be32_to_cpu(ti.last_rec_address);
1675 } else {
1676
1677 *last_written = be32_to_cpu(ti.track_start) +
1678 be32_to_cpu(ti.track_size);
1679 if (ti.free_blocks)
1680 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1681 }
1682 return 0;
1683}
1684
1685
1686
1687
1688static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1689{
1690 struct packet_command cgc;
1691 struct request_sense sense;
1692 write_param_page *wp;
1693 char buffer[128];
1694 int ret, size;
1695
1696
1697 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1698 return 0;
1699
1700 memset(buffer, 0, sizeof(buffer));
1701 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1702 cgc.sense = &sense;
1703 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1704 pkt_dump_sense(&cgc);
1705 return ret;
1706 }
1707
1708 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1709 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1710 if (size > sizeof(buffer))
1711 size = sizeof(buffer);
1712
1713
1714
1715
1716 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1717 cgc.sense = &sense;
1718 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1719 pkt_dump_sense(&cgc);
1720 return ret;
1721 }
1722
1723
1724
1725
1726 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1727
1728 wp->fp = pd->settings.fp;
1729 wp->track_mode = pd->settings.track_mode;
1730 wp->write_type = pd->settings.write_type;
1731 wp->data_block_type = pd->settings.block_mode;
1732
1733 wp->multi_session = 0;
1734
1735#ifdef PACKET_USE_LS
1736 wp->link_size = 7;
1737 wp->ls_v = 1;
1738#endif
1739
1740 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1741 wp->session_format = 0;
1742 wp->subhdr2 = 0x20;
1743 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1744 wp->session_format = 0x20;
1745 wp->subhdr2 = 8;
1746#if 0
1747 wp->mcn[0] = 0x80;
1748 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1749#endif
1750 } else {
1751
1752
1753
1754 printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
1755 return 1;
1756 }
1757 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1758
1759 cgc.buflen = cgc.cmd[8] = size;
1760 if ((ret = pkt_mode_select(pd, &cgc))) {
1761 pkt_dump_sense(&cgc);
1762 return ret;
1763 }
1764
1765 pkt_print_settings(pd);
1766 return 0;
1767}
1768
1769
1770
1771
1772static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1773{
1774 switch (pd->mmc3_profile) {
1775 case 0x1a:
1776 case 0x12:
1777
1778 return 1;
1779 default:
1780 break;
1781 }
1782
1783 if (!ti->packet || !ti->fp)
1784 return 0;
1785
1786
1787
1788
1789 if (ti->rt == 0 && ti->blank == 0)
1790 return 1;
1791
1792 if (ti->rt == 0 && ti->blank == 1)
1793 return 1;
1794
1795 if (ti->rt == 1 && ti->blank == 0)
1796 return 1;
1797
1798 printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1799 return 0;
1800}
1801
1802
1803
1804
1805static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1806{
1807 switch (pd->mmc3_profile) {
1808 case 0x0a:
1809 case 0xffff:
1810 break;
1811 case 0x1a:
1812 case 0x13:
1813 case 0x12:
1814 return 1;
1815 default:
1816 VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
1817 return 0;
1818 }
1819
1820
1821
1822
1823
1824 if (di->disc_type == 0xff) {
1825 printk(DRIVER_NAME": Unknown disc. No track?\n");
1826 return 0;
1827 }
1828
1829 if (di->disc_type != 0x20 && di->disc_type != 0) {
1830 printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
1831 return 0;
1832 }
1833
1834 if (di->erasable == 0) {
1835 printk(DRIVER_NAME": Disc not erasable\n");
1836 return 0;
1837 }
1838
1839 if (di->border_status == PACKET_SESSION_RESERVED) {
1840 printk(DRIVER_NAME": Can't write to last track (reserved)\n");
1841 return 0;
1842 }
1843
1844 return 1;
1845}
1846
1847static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1848{
1849 struct packet_command cgc;
1850 unsigned char buf[12];
1851 disc_information di;
1852 track_information ti;
1853 int ret, track;
1854
1855 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1856 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1857 cgc.cmd[8] = 8;
1858 ret = pkt_generic_packet(pd, &cgc);
1859 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1860
1861 memset(&di, 0, sizeof(disc_information));
1862 memset(&ti, 0, sizeof(track_information));
1863
1864 if ((ret = pkt_get_disc_info(pd, &di))) {
1865 printk("failed get_disc\n");
1866 return ret;
1867 }
1868
1869 if (!pkt_writable_disc(pd, &di))
1870 return -EROFS;
1871
1872 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1873
1874 track = 1;
1875 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1876 printk(DRIVER_NAME": failed get_track\n");
1877 return ret;
1878 }
1879
1880 if (!pkt_writable_track(pd, &ti)) {
1881 printk(DRIVER_NAME": can't write to this track\n");
1882 return -EROFS;
1883 }
1884
1885
1886
1887
1888
1889 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1890 if (pd->settings.size == 0) {
1891 printk(DRIVER_NAME": detected zero packet size!\n");
1892 return -ENXIO;
1893 }
1894 if (pd->settings.size > PACKET_MAX_SECTORS) {
1895 printk(DRIVER_NAME": packet size is too big\n");
1896 return -EROFS;
1897 }
1898 pd->settings.fp = ti.fp;
1899 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1900
1901 if (ti.nwa_v) {
1902 pd->nwa = be32_to_cpu(ti.next_writable);
1903 set_bit(PACKET_NWA_VALID, &pd->flags);
1904 }
1905
1906
1907
1908
1909
1910
1911 if (ti.lra_v) {
1912 pd->lra = be32_to_cpu(ti.last_rec_address);
1913 set_bit(PACKET_LRA_VALID, &pd->flags);
1914 } else {
1915 pd->lra = 0xffffffff;
1916 set_bit(PACKET_LRA_VALID, &pd->flags);
1917 }
1918
1919
1920
1921
1922 pd->settings.link_loss = 7;
1923 pd->settings.write_type = 0;
1924 pd->settings.track_mode = ti.track_mode;
1925
1926
1927
1928
1929 switch (ti.data_mode) {
1930 case PACKET_MODE1:
1931 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1932 break;
1933 case PACKET_MODE2:
1934 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1935 break;
1936 default:
1937 printk(DRIVER_NAME": unknown data mode\n");
1938 return -EROFS;
1939 }
1940 return 0;
1941}
1942
1943
1944
1945
1946static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1947 int set)
1948{
1949 struct packet_command cgc;
1950 struct request_sense sense;
1951 unsigned char buf[64];
1952 int ret;
1953
1954 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1955 cgc.sense = &sense;
1956 cgc.buflen = pd->mode_offset + 12;
1957
1958
1959
1960
1961 cgc.quiet = 1;
1962
1963 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1964 return ret;
1965
1966 buf[pd->mode_offset + 10] |= (!!set << 2);
1967
1968 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1969 ret = pkt_mode_select(pd, &cgc);
1970 if (ret) {
1971 printk(DRIVER_NAME": write caching control failed\n");
1972 pkt_dump_sense(&cgc);
1973 } else if (!ret && set)
1974 printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
1975 return ret;
1976}
1977
1978static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1979{
1980 struct packet_command cgc;
1981
1982 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1983 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1984 cgc.cmd[4] = lockflag ? 1 : 0;
1985 return pkt_generic_packet(pd, &cgc);
1986}
1987
1988
1989
1990
1991static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1992 unsigned *write_speed)
1993{
1994 struct packet_command cgc;
1995 struct request_sense sense;
1996 unsigned char buf[256+18];
1997 unsigned char *cap_buf;
1998 int ret, offset;
1999
2000 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
2001 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
2002 cgc.sense = &sense;
2003
2004 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2005 if (ret) {
2006 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
2007 sizeof(struct mode_page_header);
2008 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2009 if (ret) {
2010 pkt_dump_sense(&cgc);
2011 return ret;
2012 }
2013 }
2014
2015 offset = 20;
2016 if (cap_buf[1] >= 28)
2017 offset = 28;
2018 if (cap_buf[1] >= 30) {
2019
2020
2021
2022
2023 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
2024 if (num_spdb > 0)
2025 offset = 34;
2026 }
2027
2028 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
2029 return 0;
2030}
2031
2032
2033
2034static char clv_to_speed[16] = {
2035
2036 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2037};
2038
2039static char hs_clv_to_speed[16] = {
2040
2041 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2042};
2043
2044static char us_clv_to_speed[16] = {
2045
2046 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2047};
2048
2049
2050
2051
2052static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2053 unsigned *speed)
2054{
2055 struct packet_command cgc;
2056 struct request_sense sense;
2057 unsigned char buf[64];
2058 unsigned int size, st, sp;
2059 int ret;
2060
2061 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
2062 cgc.sense = &sense;
2063 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2064 cgc.cmd[1] = 2;
2065 cgc.cmd[2] = 4;
2066 cgc.cmd[8] = 2;
2067 ret = pkt_generic_packet(pd, &cgc);
2068 if (ret) {
2069 pkt_dump_sense(&cgc);
2070 return ret;
2071 }
2072 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2073 if (size > sizeof(buf))
2074 size = sizeof(buf);
2075
2076 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2077 cgc.sense = &sense;
2078 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2079 cgc.cmd[1] = 2;
2080 cgc.cmd[2] = 4;
2081 cgc.cmd[8] = size;
2082 ret = pkt_generic_packet(pd, &cgc);
2083 if (ret) {
2084 pkt_dump_sense(&cgc);
2085 return ret;
2086 }
2087
2088 if (!(buf[6] & 0x40)) {
2089 printk(DRIVER_NAME": Disc type is not CD-RW\n");
2090 return 1;
2091 }
2092 if (!(buf[6] & 0x4)) {
2093 printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
2094 return 1;
2095 }
2096
2097 st = (buf[6] >> 3) & 0x7;
2098
2099 sp = buf[16] & 0xf;
2100
2101
2102 switch (st) {
2103 case 0:
2104 *speed = clv_to_speed[sp];
2105 break;
2106 case 1:
2107 *speed = hs_clv_to_speed[sp];
2108 break;
2109 case 2:
2110 *speed = us_clv_to_speed[sp];
2111 break;
2112 default:
2113 printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
2114 return 1;
2115 }
2116 if (*speed) {
2117 printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
2118 return 0;
2119 } else {
2120 printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
2121 return 1;
2122 }
2123}
2124
2125static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2126{
2127 struct packet_command cgc;
2128 struct request_sense sense;
2129 int ret;
2130
2131 VPRINTK(DRIVER_NAME": Performing OPC\n");
2132
2133 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2134 cgc.sense = &sense;
2135 cgc.timeout = 60*HZ;
2136 cgc.cmd[0] = GPCMD_SEND_OPC;
2137 cgc.cmd[1] = 1;
2138 if ((ret = pkt_generic_packet(pd, &cgc)))
2139 pkt_dump_sense(&cgc);
2140 return ret;
2141}
2142
2143static int pkt_open_write(struct pktcdvd_device *pd)
2144{
2145 int ret;
2146 unsigned int write_speed, media_write_speed, read_speed;
2147
2148 if ((ret = pkt_probe_settings(pd))) {
2149 VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
2150 return ret;
2151 }
2152
2153 if ((ret = pkt_set_write_settings(pd))) {
2154 DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
2155 return -EIO;
2156 }
2157
2158 pkt_write_caching(pd, USE_WCACHING);
2159
2160 if ((ret = pkt_get_max_speed(pd, &write_speed)))
2161 write_speed = 16 * 177;
2162 switch (pd->mmc3_profile) {
2163 case 0x13:
2164 case 0x1a:
2165 case 0x12:
2166 DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
2167 break;
2168 default:
2169 if ((ret = pkt_media_speed(pd, &media_write_speed)))
2170 media_write_speed = 16;
2171 write_speed = min(write_speed, media_write_speed * 177);
2172 DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
2173 break;
2174 }
2175 read_speed = write_speed;
2176
2177 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2178 DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
2179 return -EIO;
2180 }
2181 pd->write_speed = write_speed;
2182 pd->read_speed = read_speed;
2183
2184 if ((ret = pkt_perform_opc(pd))) {
2185 DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
2186 }
2187
2188 return 0;
2189}
2190
2191
2192
2193
2194static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2195{
2196 int ret;
2197 long lba;
2198 struct request_queue *q;
2199
2200
2201
2202
2203
2204
2205 bdget(pd->bdev->bd_dev);
2206 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
2207 goto out;
2208
2209 if ((ret = pkt_get_last_written(pd, &lba))) {
2210 printk(DRIVER_NAME": pkt_get_last_written failed\n");
2211 goto out_putdev;
2212 }
2213
2214 set_capacity(pd->disk, lba << 2);
2215 set_capacity(pd->bdev->bd_disk, lba << 2);
2216 bd_set_size(pd->bdev, (loff_t)lba << 11);
2217
2218 q = bdev_get_queue(pd->bdev);
2219 if (write) {
2220 if ((ret = pkt_open_write(pd)))
2221 goto out_putdev;
2222
2223
2224
2225
2226 spin_lock_irq(q->queue_lock);
2227 blk_queue_max_hw_sectors(q, pd->settings.size);
2228 spin_unlock_irq(q->queue_lock);
2229 set_bit(PACKET_WRITABLE, &pd->flags);
2230 } else {
2231 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2232 clear_bit(PACKET_WRITABLE, &pd->flags);
2233 }
2234
2235 if ((ret = pkt_set_segment_merging(pd, q)))
2236 goto out_putdev;
2237
2238 if (write) {
2239 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2240 printk(DRIVER_NAME": not enough memory for buffers\n");
2241 ret = -ENOMEM;
2242 goto out_putdev;
2243 }
2244 printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
2245 }
2246
2247 return 0;
2248
2249out_putdev:
2250 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2251out:
2252 return ret;
2253}
2254
2255
2256
2257
2258
2259static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2260{
2261 if (flush && pkt_flush_cache(pd))
2262 DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
2263
2264 pkt_lock_door(pd, 0);
2265
2266 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2267 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2268
2269 pkt_shrink_pktlist(pd);
2270}
2271
2272static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2273{
2274 if (dev_minor >= MAX_WRITERS)
2275 return NULL;
2276 return pkt_devs[dev_minor];
2277}
2278
2279static int pkt_open(struct block_device *bdev, fmode_t mode)
2280{
2281 struct pktcdvd_device *pd = NULL;
2282 int ret;
2283
2284 VPRINTK(DRIVER_NAME": entering open\n");
2285
2286 mutex_lock(&pktcdvd_mutex);
2287 mutex_lock(&ctl_mutex);
2288 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2289 if (!pd) {
2290 ret = -ENODEV;
2291 goto out;
2292 }
2293 BUG_ON(pd->refcnt < 0);
2294
2295 pd->refcnt++;
2296 if (pd->refcnt > 1) {
2297 if ((mode & FMODE_WRITE) &&
2298 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2299 ret = -EBUSY;
2300 goto out_dec;
2301 }
2302 } else {
2303 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2304 if (ret)
2305 goto out_dec;
2306
2307
2308
2309
2310 set_blocksize(bdev, CD_FRAMESIZE);
2311 }
2312
2313 mutex_unlock(&ctl_mutex);
2314 mutex_unlock(&pktcdvd_mutex);
2315 return 0;
2316
2317out_dec:
2318 pd->refcnt--;
2319out:
2320 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
2321 mutex_unlock(&ctl_mutex);
2322 mutex_unlock(&pktcdvd_mutex);
2323 return ret;
2324}
2325
2326static void pkt_close(struct gendisk *disk, fmode_t mode)
2327{
2328 struct pktcdvd_device *pd = disk->private_data;
2329
2330 mutex_lock(&pktcdvd_mutex);
2331 mutex_lock(&ctl_mutex);
2332 pd->refcnt--;
2333 BUG_ON(pd->refcnt < 0);
2334 if (pd->refcnt == 0) {
2335 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2336 pkt_release_dev(pd, flush);
2337 }
2338 mutex_unlock(&ctl_mutex);
2339 mutex_unlock(&pktcdvd_mutex);
2340}
2341
2342
2343static void pkt_end_io_read_cloned(struct bio *bio, int err)
2344{
2345 struct packet_stacked_data *psd = bio->bi_private;
2346 struct pktcdvd_device *pd = psd->pd;
2347
2348 bio_put(bio);
2349 bio_endio(psd->bio, err);
2350 mempool_free(psd, psd_pool);
2351 pkt_bio_finished(pd);
2352}
2353
2354static void pkt_make_request(struct request_queue *q, struct bio *bio)
2355{
2356 struct pktcdvd_device *pd;
2357 char b[BDEVNAME_SIZE];
2358 sector_t zone;
2359 struct packet_data *pkt;
2360 int was_empty, blocked_bio;
2361 struct pkt_rb_node *node;
2362
2363 pd = q->queuedata;
2364 if (!pd) {
2365 printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
2366 goto end_io;
2367 }
2368
2369
2370
2371
2372 if (bio_data_dir(bio) == READ) {
2373 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2374 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2375
2376 psd->pd = pd;
2377 psd->bio = bio;
2378 cloned_bio->bi_bdev = pd->bdev;
2379 cloned_bio->bi_private = psd;
2380 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2381 pd->stats.secs_r += bio_sectors(bio);
2382 pkt_queue_bio(pd, cloned_bio);
2383 return;
2384 }
2385
2386 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2387 printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
2388 pd->name, (unsigned long long)bio->bi_sector);
2389 goto end_io;
2390 }
2391
2392 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2393 printk(DRIVER_NAME": wrong bio size\n");
2394 goto end_io;
2395 }
2396
2397 blk_queue_bounce(q, &bio);
2398
2399 zone = ZONE(bio->bi_sector, pd);
2400 VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
2401 (unsigned long long)bio->bi_sector,
2402 (unsigned long long)bio_end_sector(bio));
2403
2404
2405 {
2406 struct bio_pair *bp;
2407 sector_t last_zone;
2408 int first_sectors;
2409
2410 last_zone = ZONE(bio_end_sector(bio) - 1, pd);
2411 if (last_zone != zone) {
2412 BUG_ON(last_zone != zone + pd->settings.size);
2413 first_sectors = last_zone - bio->bi_sector;
2414 bp = bio_split(bio, first_sectors);
2415 BUG_ON(!bp);
2416 pkt_make_request(q, &bp->bio1);
2417 pkt_make_request(q, &bp->bio2);
2418 bio_pair_release(bp);
2419 return;
2420 }
2421 }
2422
2423
2424
2425
2426
2427 spin_lock(&pd->cdrw.active_list_lock);
2428 blocked_bio = 0;
2429 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2430 if (pkt->sector == zone) {
2431 spin_lock(&pkt->lock);
2432 if ((pkt->state == PACKET_WAITING_STATE) ||
2433 (pkt->state == PACKET_READ_WAIT_STATE)) {
2434 bio_list_add(&pkt->orig_bios, bio);
2435 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2436 if ((pkt->write_size >= pkt->frames) &&
2437 (pkt->state == PACKET_WAITING_STATE)) {
2438 atomic_inc(&pkt->run_sm);
2439 wake_up(&pd->wqueue);
2440 }
2441 spin_unlock(&pkt->lock);
2442 spin_unlock(&pd->cdrw.active_list_lock);
2443 return;
2444 } else {
2445 blocked_bio = 1;
2446 }
2447 spin_unlock(&pkt->lock);
2448 }
2449 }
2450 spin_unlock(&pd->cdrw.active_list_lock);
2451
2452
2453
2454
2455
2456
2457 spin_lock(&pd->lock);
2458 if (pd->write_congestion_on > 0
2459 && pd->bio_queue_size >= pd->write_congestion_on) {
2460 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
2461 do {
2462 spin_unlock(&pd->lock);
2463 congestion_wait(BLK_RW_ASYNC, HZ);
2464 spin_lock(&pd->lock);
2465 } while(pd->bio_queue_size > pd->write_congestion_off);
2466 }
2467 spin_unlock(&pd->lock);
2468
2469
2470
2471
2472 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2473 node->bio = bio;
2474 spin_lock(&pd->lock);
2475 BUG_ON(pd->bio_queue_size < 0);
2476 was_empty = (pd->bio_queue_size == 0);
2477 pkt_rbtree_insert(pd, node);
2478 spin_unlock(&pd->lock);
2479
2480
2481
2482
2483 atomic_set(&pd->scan_queue, 1);
2484 if (was_empty) {
2485
2486 wake_up(&pd->wqueue);
2487 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2488
2489
2490
2491
2492 wake_up(&pd->wqueue);
2493 }
2494 return;
2495end_io:
2496 bio_io_error(bio);
2497}
2498
2499
2500
2501static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2502 struct bio_vec *bvec)
2503{
2504 struct pktcdvd_device *pd = q->queuedata;
2505 sector_t zone = ZONE(bmd->bi_sector, pd);
2506 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2507 int remaining = (pd->settings.size << 9) - used;
2508 int remaining2;
2509
2510
2511
2512
2513
2514 remaining2 = PAGE_SIZE - bmd->bi_size;
2515 remaining = max(remaining, remaining2);
2516
2517 BUG_ON(remaining < 0);
2518 return remaining;
2519}
2520
2521static void pkt_init_queue(struct pktcdvd_device *pd)
2522{
2523 struct request_queue *q = pd->disk->queue;
2524
2525 blk_queue_make_request(q, pkt_make_request);
2526 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2527 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2528 blk_queue_merge_bvec(q, pkt_merge_bvec);
2529 q->queuedata = pd;
2530}
2531
2532static int pkt_seq_show(struct seq_file *m, void *p)
2533{
2534 struct pktcdvd_device *pd = m->private;
2535 char *msg;
2536 char bdev_buf[BDEVNAME_SIZE];
2537 int states[PACKET_NUM_STATES];
2538
2539 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2540 bdevname(pd->bdev, bdev_buf));
2541
2542 seq_printf(m, "\nSettings:\n");
2543 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2544
2545 if (pd->settings.write_type == 0)
2546 msg = "Packet";
2547 else
2548 msg = "Unknown";
2549 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2550
2551 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2552 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2553
2554 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2555
2556 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2557 msg = "Mode 1";
2558 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2559 msg = "Mode 2";
2560 else
2561 msg = "Unknown";
2562 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2563
2564 seq_printf(m, "\nStatistics:\n");
2565 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2566 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2567 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2568 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2569 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2570
2571 seq_printf(m, "\nMisc:\n");
2572 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2573 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2574 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2575 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2576 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2577 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2578
2579 seq_printf(m, "\nQueue state:\n");
2580 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2581 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2582 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2583
2584 pkt_count_states(pd, states);
2585 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2586 states[0], states[1], states[2], states[3], states[4], states[5]);
2587
2588 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2589 pd->write_congestion_off,
2590 pd->write_congestion_on);
2591 return 0;
2592}
2593
2594static int pkt_seq_open(struct inode *inode, struct file *file)
2595{
2596 return single_open(file, pkt_seq_show, PDE_DATA(inode));
2597}
2598
2599static const struct file_operations pkt_proc_fops = {
2600 .open = pkt_seq_open,
2601 .read = seq_read,
2602 .llseek = seq_lseek,
2603 .release = single_release
2604};
2605
2606static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2607{
2608 int i;
2609 int ret = 0;
2610 char b[BDEVNAME_SIZE];
2611 struct block_device *bdev;
2612
2613 if (pd->pkt_dev == dev) {
2614 printk(DRIVER_NAME": Recursive setup not allowed\n");
2615 return -EBUSY;
2616 }
2617 for (i = 0; i < MAX_WRITERS; i++) {
2618 struct pktcdvd_device *pd2 = pkt_devs[i];
2619 if (!pd2)
2620 continue;
2621 if (pd2->bdev->bd_dev == dev) {
2622 printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
2623 return -EBUSY;
2624 }
2625 if (pd2->pkt_dev == dev) {
2626 printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
2627 return -EBUSY;
2628 }
2629 }
2630
2631 bdev = bdget(dev);
2632 if (!bdev)
2633 return -ENOMEM;
2634 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
2635 if (ret)
2636 return ret;
2637
2638
2639 __module_get(THIS_MODULE);
2640
2641 pd->bdev = bdev;
2642 set_blocksize(bdev, CD_FRAMESIZE);
2643
2644 pkt_init_queue(pd);
2645
2646 atomic_set(&pd->cdrw.pending_bios, 0);
2647 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2648 if (IS_ERR(pd->cdrw.thread)) {
2649 printk(DRIVER_NAME": can't start kernel thread\n");
2650 ret = -ENOMEM;
2651 goto out_mem;
2652 }
2653
2654 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2655 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2656 return 0;
2657
2658out_mem:
2659 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2660
2661 module_put(THIS_MODULE);
2662 return ret;
2663}
2664
2665static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2666{
2667 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2668 int ret;
2669
2670 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
2671 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2672
2673 mutex_lock(&pktcdvd_mutex);
2674 switch (cmd) {
2675 case CDROMEJECT:
2676
2677
2678
2679
2680 if (pd->refcnt == 1)
2681 pkt_lock_door(pd, 0);
2682
2683
2684
2685
2686 case CDROMMULTISESSION:
2687 case CDROMREADTOCENTRY:
2688 case CDROM_LAST_WRITTEN:
2689 case CDROM_SEND_PACKET:
2690 case SCSI_IOCTL_SEND_COMMAND:
2691 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2692 break;
2693
2694 default:
2695 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2696 ret = -ENOTTY;
2697 }
2698 mutex_unlock(&pktcdvd_mutex);
2699
2700 return ret;
2701}
2702
2703static unsigned int pkt_check_events(struct gendisk *disk,
2704 unsigned int clearing)
2705{
2706 struct pktcdvd_device *pd = disk->private_data;
2707 struct gendisk *attached_disk;
2708
2709 if (!pd)
2710 return 0;
2711 if (!pd->bdev)
2712 return 0;
2713 attached_disk = pd->bdev->bd_disk;
2714 if (!attached_disk || !attached_disk->fops->check_events)
2715 return 0;
2716 return attached_disk->fops->check_events(attached_disk, clearing);
2717}
2718
2719static const struct block_device_operations pktcdvd_ops = {
2720 .owner = THIS_MODULE,
2721 .open = pkt_open,
2722 .release = pkt_close,
2723 .ioctl = pkt_ioctl,
2724 .check_events = pkt_check_events,
2725};
2726
2727static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
2728{
2729 return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
2730}
2731
2732
2733
2734
2735static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2736{
2737 int idx;
2738 int ret = -ENOMEM;
2739 struct pktcdvd_device *pd;
2740 struct gendisk *disk;
2741
2742 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2743
2744 for (idx = 0; idx < MAX_WRITERS; idx++)
2745 if (!pkt_devs[idx])
2746 break;
2747 if (idx == MAX_WRITERS) {
2748 printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
2749 ret = -EBUSY;
2750 goto out_mutex;
2751 }
2752
2753 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2754 if (!pd)
2755 goto out_mutex;
2756
2757 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2758 sizeof(struct pkt_rb_node));
2759 if (!pd->rb_pool)
2760 goto out_mem;
2761
2762 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2763 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2764 spin_lock_init(&pd->cdrw.active_list_lock);
2765
2766 spin_lock_init(&pd->lock);
2767 spin_lock_init(&pd->iosched.lock);
2768 bio_list_init(&pd->iosched.read_queue);
2769 bio_list_init(&pd->iosched.write_queue);
2770 sprintf(pd->name, DRIVER_NAME"%d", idx);
2771 init_waitqueue_head(&pd->wqueue);
2772 pd->bio_queue = RB_ROOT;
2773
2774 pd->write_congestion_on = write_congestion_on;
2775 pd->write_congestion_off = write_congestion_off;
2776
2777 disk = alloc_disk(1);
2778 if (!disk)
2779 goto out_mem;
2780 pd->disk = disk;
2781 disk->major = pktdev_major;
2782 disk->first_minor = idx;
2783 disk->fops = &pktcdvd_ops;
2784 disk->flags = GENHD_FL_REMOVABLE;
2785 strcpy(disk->disk_name, pd->name);
2786 disk->devnode = pktcdvd_devnode;
2787 disk->private_data = pd;
2788 disk->queue = blk_alloc_queue(GFP_KERNEL);
2789 if (!disk->queue)
2790 goto out_mem2;
2791
2792 pd->pkt_dev = MKDEV(pktdev_major, idx);
2793 ret = pkt_new_dev(pd, dev);
2794 if (ret)
2795 goto out_new_dev;
2796
2797
2798 disk->events = pd->bdev->bd_disk->events;
2799 disk->async_events = pd->bdev->bd_disk->async_events;
2800
2801 add_disk(disk);
2802
2803 pkt_sysfs_dev_new(pd);
2804 pkt_debugfs_dev_new(pd);
2805
2806 pkt_devs[idx] = pd;
2807 if (pkt_dev)
2808 *pkt_dev = pd->pkt_dev;
2809
2810 mutex_unlock(&ctl_mutex);
2811 return 0;
2812
2813out_new_dev:
2814 blk_cleanup_queue(disk->queue);
2815out_mem2:
2816 put_disk(disk);
2817out_mem:
2818 if (pd->rb_pool)
2819 mempool_destroy(pd->rb_pool);
2820 kfree(pd);
2821out_mutex:
2822 mutex_unlock(&ctl_mutex);
2823 printk(DRIVER_NAME": setup of pktcdvd device failed\n");
2824 return ret;
2825}
2826
2827
2828
2829
2830static int pkt_remove_dev(dev_t pkt_dev)
2831{
2832 struct pktcdvd_device *pd;
2833 int idx;
2834 int ret = 0;
2835
2836 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2837
2838 for (idx = 0; idx < MAX_WRITERS; idx++) {
2839 pd = pkt_devs[idx];
2840 if (pd && (pd->pkt_dev == pkt_dev))
2841 break;
2842 }
2843 if (idx == MAX_WRITERS) {
2844 DPRINTK(DRIVER_NAME": dev not setup\n");
2845 ret = -ENXIO;
2846 goto out;
2847 }
2848
2849 if (pd->refcnt > 0) {
2850 ret = -EBUSY;
2851 goto out;
2852 }
2853 if (!IS_ERR(pd->cdrw.thread))
2854 kthread_stop(pd->cdrw.thread);
2855
2856 pkt_devs[idx] = NULL;
2857
2858 pkt_debugfs_dev_remove(pd);
2859 pkt_sysfs_dev_remove(pd);
2860
2861 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2862
2863 remove_proc_entry(pd->name, pkt_proc);
2864 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
2865
2866 del_gendisk(pd->disk);
2867 blk_cleanup_queue(pd->disk->queue);
2868 put_disk(pd->disk);
2869
2870 mempool_destroy(pd->rb_pool);
2871 kfree(pd);
2872
2873
2874 module_put(THIS_MODULE);
2875
2876out:
2877 mutex_unlock(&ctl_mutex);
2878 return ret;
2879}
2880
2881static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2882{
2883 struct pktcdvd_device *pd;
2884
2885 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2886
2887 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2888 if (pd) {
2889 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2890 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2891 } else {
2892 ctrl_cmd->dev = 0;
2893 ctrl_cmd->pkt_dev = 0;
2894 }
2895 ctrl_cmd->num_devices = MAX_WRITERS;
2896
2897 mutex_unlock(&ctl_mutex);
2898}
2899
2900static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2901{
2902 void __user *argp = (void __user *)arg;
2903 struct pkt_ctrl_command ctrl_cmd;
2904 int ret = 0;
2905 dev_t pkt_dev = 0;
2906
2907 if (cmd != PACKET_CTRL_CMD)
2908 return -ENOTTY;
2909
2910 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2911 return -EFAULT;
2912
2913 switch (ctrl_cmd.command) {
2914 case PKT_CTRL_CMD_SETUP:
2915 if (!capable(CAP_SYS_ADMIN))
2916 return -EPERM;
2917 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2918 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2919 break;
2920 case PKT_CTRL_CMD_TEARDOWN:
2921 if (!capable(CAP_SYS_ADMIN))
2922 return -EPERM;
2923 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2924 break;
2925 case PKT_CTRL_CMD_STATUS:
2926 pkt_get_status(&ctrl_cmd);
2927 break;
2928 default:
2929 return -ENOTTY;
2930 }
2931
2932 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2933 return -EFAULT;
2934 return ret;
2935}
2936
2937#ifdef CONFIG_COMPAT
2938static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2939{
2940 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2941}
2942#endif
2943
2944static const struct file_operations pkt_ctl_fops = {
2945 .open = nonseekable_open,
2946 .unlocked_ioctl = pkt_ctl_ioctl,
2947#ifdef CONFIG_COMPAT
2948 .compat_ioctl = pkt_ctl_compat_ioctl,
2949#endif
2950 .owner = THIS_MODULE,
2951 .llseek = no_llseek,
2952};
2953
2954static struct miscdevice pkt_misc = {
2955 .minor = MISC_DYNAMIC_MINOR,
2956 .name = DRIVER_NAME,
2957 .nodename = "pktcdvd/control",
2958 .fops = &pkt_ctl_fops
2959};
2960
2961static int __init pkt_init(void)
2962{
2963 int ret;
2964
2965 mutex_init(&ctl_mutex);
2966
2967 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2968 sizeof(struct packet_stacked_data));
2969 if (!psd_pool)
2970 return -ENOMEM;
2971
2972 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2973 if (ret < 0) {
2974 printk(DRIVER_NAME": Unable to register block device\n");
2975 goto out2;
2976 }
2977 if (!pktdev_major)
2978 pktdev_major = ret;
2979
2980 ret = pkt_sysfs_init();
2981 if (ret)
2982 goto out;
2983
2984 pkt_debugfs_init();
2985
2986 ret = misc_register(&pkt_misc);
2987 if (ret) {
2988 printk(DRIVER_NAME": Unable to register misc device\n");
2989 goto out_misc;
2990 }
2991
2992 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2993
2994 return 0;
2995
2996out_misc:
2997 pkt_debugfs_cleanup();
2998 pkt_sysfs_cleanup();
2999out:
3000 unregister_blkdev(pktdev_major, DRIVER_NAME);
3001out2:
3002 mempool_destroy(psd_pool);
3003 return ret;
3004}
3005
3006static void __exit pkt_exit(void)
3007{
3008 remove_proc_entry("driver/"DRIVER_NAME, NULL);
3009 misc_deregister(&pkt_misc);
3010
3011 pkt_debugfs_cleanup();
3012 pkt_sysfs_cleanup();
3013
3014 unregister_blkdev(pktdev_major, DRIVER_NAME);
3015 mempool_destroy(psd_pool);
3016}
3017
3018MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3019MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3020MODULE_LICENSE("GPL");
3021
3022module_init(pkt_init);
3023module_exit(pkt_exit);
3024