1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched.h>
31#include <linux/kmod.h>
32#include <linux/device.h>
33
34#include <linux/spinlock.h>
35#include <linux/mutex.h>
36#include <asm/irq.h>
37
38#undef PARPORT_PARANOID
39
40#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41
42unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
43int parport_default_spintime = DEFAULT_SPIN_TIME;
44
45static LIST_HEAD(portlist);
46static DEFINE_SPINLOCK(parportlist_lock);
47
48
49static LIST_HEAD(all_ports);
50static DEFINE_SPINLOCK(full_list_lock);
51
52static LIST_HEAD(drivers);
53
54static DEFINE_MUTEX(registration_lock);
55
56
57static void dead_write_lines(struct parport *p, unsigned char b){}
58static unsigned char dead_read_lines(struct parport *p) { return 0; }
59static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
60 unsigned char c) { return 0; }
61static void dead_onearg(struct parport *p){}
62static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
63static void dead_state(struct parport *p, struct parport_state *s) { }
64static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
65{ return 0; }
66static size_t dead_read(struct parport *p, void *b, size_t l, int f)
67{ return 0; }
68static struct parport_operations dead_ops = {
69 .write_data = dead_write_lines,
70 .read_data = dead_read_lines,
71
72 .write_control = dead_write_lines,
73 .read_control = dead_read_lines,
74 .frob_control = dead_frob_lines,
75
76 .read_status = dead_read_lines,
77
78 .enable_irq = dead_onearg,
79 .disable_irq = dead_onearg,
80
81 .data_forward = dead_onearg,
82 .data_reverse = dead_onearg,
83
84 .init_state = dead_initstate,
85 .save_state = dead_state,
86 .restore_state = dead_state,
87
88 .epp_write_data = dead_write,
89 .epp_read_data = dead_read,
90 .epp_write_addr = dead_write,
91 .epp_read_addr = dead_read,
92
93 .ecp_write_data = dead_write,
94 .ecp_read_data = dead_read,
95 .ecp_write_addr = dead_write,
96
97 .compat_write_data = dead_write,
98 .nibble_read_data = dead_read,
99 .byte_read_data = dead_read,
100
101 .owner = NULL,
102};
103
104static struct device_type parport_device_type = {
105 .name = "parport",
106};
107
108static int is_parport(struct device *dev)
109{
110 return dev->type == &parport_device_type;
111}
112
113static int parport_probe(struct device *dev)
114{
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129
130 return drv->probe(to_pardevice(dev));
131}
132
133static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136};
137
138int parport_bus_init(void)
139{
140 return bus_register(&parport_bus_type);
141}
142
143void parport_bus_exit(void)
144{
145 bus_unregister(&parport_bus_type);
146}
147
148
149
150
151
152
153
154static int driver_check(struct device_driver *dev_drv, void *_port)
155{
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162}
163
164
165static void attach_driver_chain(struct parport *port)
166{
167
168 struct parport_driver *drv;
169
170 list_for_each_entry(drv, &drivers, list)
171 drv->attach(port);
172
173
174
175
176
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179}
180
181static int driver_detach(struct device_driver *_drv, void *_port)
182{
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
189}
190
191
192static void detach_driver_chain(struct parport *port)
193{
194 struct parport_driver *drv;
195
196 list_for_each_entry(drv, &drivers, list)
197 drv->detach(port);
198
199
200
201
202
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
205}
206
207
208static void get_lowlevel_driver(void)
209{
210
211
212
213
214 request_module("parport_lowlevel");
215}
216
217
218
219
220
221
222
223static int port_check(struct device *dev, void *dev_drv)
224{
225 struct parport_driver *drv = dev_drv;
226
227
228 if (is_parport(dev))
229 drv->match_port(to_parport_dev(dev));
230 return 0;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266int __parport_register_driver(struct parport_driver *drv, struct module *owner,
267 const char *mod_name)
268{
269 if (list_empty(&portlist))
270 get_lowlevel_driver();
271
272 if (drv->devmodel) {
273
274 int ret;
275
276
277 drv->driver.name = drv->name;
278 drv->driver.bus = &parport_bus_type;
279 drv->driver.owner = owner;
280 drv->driver.mod_name = mod_name;
281 ret = driver_register(&drv->driver);
282 if (ret)
283 return ret;
284
285 mutex_lock(®istration_lock);
286 if (drv->match_port)
287 bus_for_each_dev(&parport_bus_type, NULL, drv,
288 port_check);
289 mutex_unlock(®istration_lock);
290 } else {
291 struct parport *port;
292
293 drv->devmodel = false;
294
295 mutex_lock(®istration_lock);
296 list_for_each_entry(port, &portlist, list)
297 drv->attach(port);
298 list_add(&drv->list, &drivers);
299 mutex_unlock(®istration_lock);
300 }
301
302 return 0;
303}
304EXPORT_SYMBOL(__parport_register_driver);
305
306static int port_detach(struct device *dev, void *_drv)
307{
308 struct parport_driver *drv = _drv;
309
310 if (is_parport(dev) && drv->detach)
311 drv->detach(to_parport_dev(dev));
312
313 return 0;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333void parport_unregister_driver(struct parport_driver *drv)
334{
335 struct parport *port;
336
337 mutex_lock(®istration_lock);
338 if (drv->devmodel) {
339 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
340 driver_unregister(&drv->driver);
341 } else {
342 list_del_init(&drv->list);
343 list_for_each_entry(port, &portlist, list)
344 drv->detach(port);
345 }
346 mutex_unlock(®istration_lock);
347}
348EXPORT_SYMBOL(parport_unregister_driver);
349
350static void free_port(struct device *dev)
351{
352 int d;
353 struct parport *port = to_parport_dev(dev);
354
355 spin_lock(&full_list_lock);
356 list_del(&port->full_list);
357 spin_unlock(&full_list_lock);
358 for (d = 0; d < 5; d++) {
359 kfree(port->probe_info[d].class_name);
360 kfree(port->probe_info[d].mfr);
361 kfree(port->probe_info[d].model);
362 kfree(port->probe_info[d].cmdset);
363 kfree(port->probe_info[d].description);
364 }
365
366 kfree(port->name);
367 kfree(port);
368}
369
370
371
372
373
374
375
376
377
378struct parport *parport_get_port(struct parport *port)
379{
380 struct device *dev = get_device(&port->bus_dev);
381
382 return to_parport_dev(dev);
383}
384EXPORT_SYMBOL(parport_get_port);
385
386void parport_del_port(struct parport *port)
387{
388 device_unregister(&port->bus_dev);
389}
390EXPORT_SYMBOL(parport_del_port);
391
392
393
394
395
396
397
398
399
400
401void parport_put_port(struct parport *port)
402{
403 put_device(&port->bus_dev);
404}
405EXPORT_SYMBOL(parport_put_port);
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436struct parport *parport_register_port(unsigned long base, int irq, int dma,
437 struct parport_operations *ops)
438{
439 struct list_head *l;
440 struct parport *tmp;
441 int num;
442 int device;
443 char *name;
444 int ret;
445
446 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
447 if (!tmp)
448 return NULL;
449
450
451 tmp->base = base;
452 tmp->irq = irq;
453 tmp->dma = dma;
454 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
455 tmp->modes = 0;
456 INIT_LIST_HEAD(&tmp->list);
457 tmp->devices = tmp->cad = NULL;
458 tmp->flags = 0;
459 tmp->ops = ops;
460 tmp->physport = tmp;
461 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
462 rwlock_init(&tmp->cad_lock);
463 spin_lock_init(&tmp->waitlist_lock);
464 spin_lock_init(&tmp->pardevice_lock);
465 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
466 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
467 sema_init(&tmp->ieee1284.irq, 0);
468 tmp->spintime = parport_default_spintime;
469 atomic_set(&tmp->ref_count, 1);
470 INIT_LIST_HEAD(&tmp->full_list);
471
472 name = kmalloc(15, GFP_KERNEL);
473 if (!name) {
474 kfree(tmp);
475 return NULL;
476 }
477
478
479 spin_lock(&full_list_lock);
480 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
481 struct parport *p = list_entry(l, struct parport, full_list);
482 if (p->number != num)
483 break;
484 }
485 tmp->portnum = tmp->number = num;
486 list_add_tail(&tmp->full_list, l);
487 spin_unlock(&full_list_lock);
488
489
490
491
492 sprintf(name, "parport%d", tmp->portnum = tmp->number);
493 tmp->name = name;
494 tmp->bus_dev.bus = &parport_bus_type;
495 tmp->bus_dev.release = free_port;
496 dev_set_name(&tmp->bus_dev, name);
497 tmp->bus_dev.type = &parport_device_type;
498
499 for (device = 0; device < 5; device++)
500
501 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
502
503 tmp->waithead = tmp->waittail = NULL;
504
505 ret = device_register(&tmp->bus_dev);
506 if (ret) {
507 put_device(&tmp->bus_dev);
508 return NULL;
509 }
510
511 return tmp;
512}
513EXPORT_SYMBOL(parport_register_port);
514
515
516
517
518
519
520
521
522
523
524
525
526
527void parport_announce_port(struct parport *port)
528{
529 int i;
530
531#ifdef CONFIG_PARPORT_1284
532
533 parport_daisy_init(port);
534#endif
535
536 if (!port->dev)
537 printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
538 port->name);
539
540 parport_proc_register(port);
541 mutex_lock(®istration_lock);
542 spin_lock_irq(&parportlist_lock);
543 list_add_tail(&port->list, &portlist);
544 for (i = 1; i < 3; i++) {
545 struct parport *slave = port->slaves[i-1];
546 if (slave)
547 list_add_tail(&slave->list, &portlist);
548 }
549 spin_unlock_irq(&parportlist_lock);
550
551
552 attach_driver_chain(port);
553 for (i = 1; i < 3; i++) {
554 struct parport *slave = port->slaves[i-1];
555 if (slave)
556 attach_driver_chain(slave);
557 }
558 mutex_unlock(®istration_lock);
559}
560EXPORT_SYMBOL(parport_announce_port);
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581void parport_remove_port(struct parport *port)
582{
583 int i;
584
585 mutex_lock(®istration_lock);
586
587
588 detach_driver_chain(port);
589
590#ifdef CONFIG_PARPORT_1284
591
592 parport_daisy_fini(port);
593 for (i = 1; i < 3; i++) {
594 struct parport *slave = port->slaves[i-1];
595 if (!slave)
596 continue;
597 detach_driver_chain(slave);
598 parport_daisy_fini(slave);
599 }
600#endif
601
602 port->ops = &dead_ops;
603 spin_lock(&parportlist_lock);
604 list_del_init(&port->list);
605 for (i = 1; i < 3; i++) {
606 struct parport *slave = port->slaves[i-1];
607 if (slave)
608 list_del_init(&slave->list);
609 }
610 spin_unlock(&parportlist_lock);
611
612 mutex_unlock(®istration_lock);
613
614 parport_proc_unregister(port);
615
616 for (i = 1; i < 3; i++) {
617 struct parport *slave = port->slaves[i-1];
618 if (slave)
619 parport_put_port(slave);
620 }
621}
622EXPORT_SYMBOL(parport_remove_port);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693struct pardevice *
694parport_register_device(struct parport *port, const char *name,
695 int (*pf)(void *), void (*kf)(void *),
696 void (*irq_func)(void *),
697 int flags, void *handle)
698{
699 struct pardevice *tmp;
700
701 if (port->physport->flags & PARPORT_FLAG_EXCL) {
702
703 printk(KERN_DEBUG "%s: no more devices allowed\n",
704 port->name);
705 return NULL;
706 }
707
708 if (flags & PARPORT_DEV_LURK) {
709 if (!pf || !kf) {
710 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
711 return NULL;
712 }
713 }
714
715 if (flags & PARPORT_DEV_EXCL) {
716 if (port->physport->devices) {
717
718
719
720
721
722
723 pr_err("%s: cannot grant exclusive access for device %s\n",
724 port->name, name);
725 return NULL;
726 }
727 }
728
729
730
731
732
733
734
735 if (!try_module_get(port->ops->owner))
736 return NULL;
737
738 parport_get_port(port);
739
740 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
741 if (!tmp)
742 goto out;
743
744 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
745 if (!tmp->state)
746 goto out_free_pardevice;
747
748 tmp->name = name;
749 tmp->port = port;
750 tmp->daisy = -1;
751 tmp->preempt = pf;
752 tmp->wakeup = kf;
753 tmp->private = handle;
754 tmp->flags = flags;
755 tmp->irq_func = irq_func;
756 tmp->waiting = 0;
757 tmp->timeout = 5 * HZ;
758 tmp->devmodel = false;
759
760
761 tmp->prev = NULL;
762
763
764
765
766 spin_lock(&port->physport->pardevice_lock);
767
768 if (flags & PARPORT_DEV_EXCL) {
769 if (port->physport->devices) {
770 spin_unlock(&port->physport->pardevice_lock);
771 printk(KERN_DEBUG
772 "%s: cannot grant exclusive access for device %s\n",
773 port->name, name);
774 goto out_free_all;
775 }
776 port->flags |= PARPORT_FLAG_EXCL;
777 }
778
779 tmp->next = port->physport->devices;
780 wmb();
781
782
783
784
785 if (port->physport->devices)
786 port->physport->devices->prev = tmp;
787 port->physport->devices = tmp;
788 spin_unlock(&port->physport->pardevice_lock);
789
790 init_waitqueue_head(&tmp->wait_q);
791 tmp->timeslice = parport_default_timeslice;
792 tmp->waitnext = tmp->waitprev = NULL;
793
794
795
796
797
798 port->ops->init_state(tmp, tmp->state);
799 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
800 port->proc_device = tmp;
801 parport_device_proc_register(tmp);
802 }
803 return tmp;
804
805 out_free_all:
806 kfree(tmp->state);
807 out_free_pardevice:
808 kfree(tmp);
809 out:
810 parport_put_port(port);
811 module_put(port->ops->owner);
812
813 return NULL;
814}
815EXPORT_SYMBOL(parport_register_device);
816
817static void free_pardevice(struct device *dev)
818{
819 struct pardevice *par_dev = to_pardevice(dev);
820
821 kfree(par_dev->name);
822 kfree(par_dev);
823}
824
825struct pardevice *
826parport_register_dev_model(struct parport *port, const char *name,
827 const struct pardev_cb *par_dev_cb, int id)
828{
829 struct pardevice *par_dev;
830 int ret;
831 char *devname;
832
833 if (port->physport->flags & PARPORT_FLAG_EXCL) {
834
835 pr_err("%s: no more devices allowed\n", port->name);
836 return NULL;
837 }
838
839 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
840 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
841 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
842 port->name, name);
843 return NULL;
844 }
845 }
846
847 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
848 if (port->physport->devices) {
849
850
851
852
853
854
855 pr_err("%s: cannot grant exclusive access for device %s\n",
856 port->name, name);
857 return NULL;
858 }
859 }
860
861 if (!try_module_get(port->ops->owner))
862 return NULL;
863
864 parport_get_port(port);
865
866 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
867 if (!par_dev)
868 goto err_put_port;
869
870 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
871 if (!par_dev->state)
872 goto err_put_par_dev;
873
874 devname = kstrdup(name, GFP_KERNEL);
875 if (!devname)
876 goto err_free_par_dev;
877
878 par_dev->name = devname;
879 par_dev->port = port;
880 par_dev->daisy = -1;
881 par_dev->preempt = par_dev_cb->preempt;
882 par_dev->wakeup = par_dev_cb->wakeup;
883 par_dev->private = par_dev_cb->private;
884 par_dev->flags = par_dev_cb->flags;
885 par_dev->irq_func = par_dev_cb->irq_func;
886 par_dev->waiting = 0;
887 par_dev->timeout = 5 * HZ;
888
889 par_dev->dev.parent = &port->bus_dev;
890 par_dev->dev.bus = &parport_bus_type;
891 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
892 if (ret)
893 goto err_free_devname;
894 par_dev->dev.release = free_pardevice;
895 par_dev->devmodel = true;
896 ret = device_register(&par_dev->dev);
897 if (ret) {
898 put_device(&par_dev->dev);
899 goto err_put_port;
900 }
901
902
903 par_dev->prev = NULL;
904
905
906
907
908 spin_lock(&port->physport->pardevice_lock);
909
910 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
911 if (port->physport->devices) {
912 spin_unlock(&port->physport->pardevice_lock);
913 pr_debug("%s: cannot grant exclusive access for device %s\n",
914 port->name, name);
915 device_unregister(&par_dev->dev);
916 goto err_put_port;
917 }
918 port->flags |= PARPORT_FLAG_EXCL;
919 }
920
921 par_dev->next = port->physport->devices;
922 wmb();
923
924
925
926
927 if (port->physport->devices)
928 port->physport->devices->prev = par_dev;
929 port->physport->devices = par_dev;
930 spin_unlock(&port->physport->pardevice_lock);
931
932 init_waitqueue_head(&par_dev->wait_q);
933 par_dev->timeslice = parport_default_timeslice;
934 par_dev->waitnext = NULL;
935 par_dev->waitprev = NULL;
936
937
938
939
940
941 port->ops->init_state(par_dev, par_dev->state);
942 port->proc_device = par_dev;
943 parport_device_proc_register(par_dev);
944
945 return par_dev;
946
947err_free_devname:
948 kfree(devname);
949err_free_par_dev:
950 kfree(par_dev->state);
951err_put_par_dev:
952 if (!par_dev->devmodel)
953 kfree(par_dev);
954err_put_port:
955 parport_put_port(port);
956 module_put(port->ops->owner);
957
958 return NULL;
959}
960EXPORT_SYMBOL(parport_register_dev_model);
961
962
963
964
965
966
967
968
969void parport_unregister_device(struct pardevice *dev)
970{
971 struct parport *port;
972
973#ifdef PARPORT_PARANOID
974 if (!dev) {
975 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
976 return;
977 }
978#endif
979
980 port = dev->port->physport;
981
982 if (port->proc_device == dev) {
983 port->proc_device = NULL;
984 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
985 parport_device_proc_unregister(dev);
986 }
987
988 if (port->cad == dev) {
989 printk(KERN_DEBUG "%s: %s forgot to release port\n",
990 port->name, dev->name);
991 parport_release(dev);
992 }
993
994 spin_lock(&port->pardevice_lock);
995 if (dev->next)
996 dev->next->prev = dev->prev;
997 if (dev->prev)
998 dev->prev->next = dev->next;
999 else
1000 port->devices = dev->next;
1001
1002 if (dev->flags & PARPORT_DEV_EXCL)
1003 port->flags &= ~PARPORT_FLAG_EXCL;
1004
1005 spin_unlock(&port->pardevice_lock);
1006
1007
1008
1009
1010
1011 spin_lock_irq(&port->waitlist_lock);
1012 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
1013 if (dev->waitprev)
1014 dev->waitprev->waitnext = dev->waitnext;
1015 else
1016 port->waithead = dev->waitnext;
1017 if (dev->waitnext)
1018 dev->waitnext->waitprev = dev->waitprev;
1019 else
1020 port->waittail = dev->waitprev;
1021 }
1022 spin_unlock_irq(&port->waitlist_lock);
1023
1024 kfree(dev->state);
1025 if (dev->devmodel)
1026 device_unregister(&dev->dev);
1027 else
1028 kfree(dev);
1029
1030 module_put(port->ops->owner);
1031 parport_put_port(port);
1032}
1033EXPORT_SYMBOL(parport_unregister_device);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047struct parport *parport_find_number(int number)
1048{
1049 struct parport *port, *result = NULL;
1050
1051 if (list_empty(&portlist))
1052 get_lowlevel_driver();
1053
1054 spin_lock(&parportlist_lock);
1055 list_for_each_entry(port, &portlist, list) {
1056 if (port->number == number) {
1057 result = parport_get_port(port);
1058 break;
1059 }
1060 }
1061 spin_unlock(&parportlist_lock);
1062 return result;
1063}
1064EXPORT_SYMBOL(parport_find_number);
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078struct parport *parport_find_base(unsigned long base)
1079{
1080 struct parport *port, *result = NULL;
1081
1082 if (list_empty(&portlist))
1083 get_lowlevel_driver();
1084
1085 spin_lock(&parportlist_lock);
1086 list_for_each_entry(port, &portlist, list) {
1087 if (port->base == base) {
1088 result = parport_get_port(port);
1089 break;
1090 }
1091 }
1092 spin_unlock(&parportlist_lock);
1093 return result;
1094}
1095EXPORT_SYMBOL(parport_find_base);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109int parport_claim(struct pardevice *dev)
1110{
1111 struct pardevice *oldcad;
1112 struct parport *port = dev->port->physport;
1113 unsigned long flags;
1114
1115 if (port->cad == dev) {
1116 printk(KERN_INFO "%s: %s already owner\n",
1117 dev->port->name,dev->name);
1118 return 0;
1119 }
1120
1121
1122 write_lock_irqsave(&port->cad_lock, flags);
1123 oldcad = port->cad;
1124 if (oldcad) {
1125 if (oldcad->preempt) {
1126 if (oldcad->preempt(oldcad->private))
1127 goto blocked;
1128 port->ops->save_state(port, dev->state);
1129 } else
1130 goto blocked;
1131
1132 if (port->cad != oldcad) {
1133
1134
1135
1136
1137 printk(KERN_WARNING
1138 "%s: %s released port when preempted!\n",
1139 port->name, oldcad->name);
1140 if (port->cad)
1141 goto blocked;
1142 }
1143 }
1144
1145
1146 if (dev->waiting & 1) {
1147 dev->waiting = 0;
1148
1149
1150 spin_lock_irq(&port->waitlist_lock);
1151 if (dev->waitprev)
1152 dev->waitprev->waitnext = dev->waitnext;
1153 else
1154 port->waithead = dev->waitnext;
1155 if (dev->waitnext)
1156 dev->waitnext->waitprev = dev->waitprev;
1157 else
1158 port->waittail = dev->waitprev;
1159 spin_unlock_irq(&port->waitlist_lock);
1160 dev->waitprev = dev->waitnext = NULL;
1161 }
1162
1163
1164 port->cad = dev;
1165
1166#ifdef CONFIG_PARPORT_1284
1167
1168 if (dev->port->muxport >= 0) {
1169
1170 port->muxsel = dev->port->muxport;
1171 }
1172
1173
1174 if (dev->daisy >= 0) {
1175
1176 if (!parport_daisy_select(port, dev->daisy,
1177 IEEE1284_MODE_COMPAT))
1178 port->daisy = dev->daisy;
1179 }
1180#endif
1181
1182
1183 port->ops->restore_state(port, dev->state);
1184 write_unlock_irqrestore(&port->cad_lock, flags);
1185 dev->time = jiffies;
1186 return 0;
1187
1188blocked:
1189
1190
1191
1192
1193
1194
1195
1196 if (dev->waiting & 2 || dev->wakeup) {
1197 spin_lock(&port->waitlist_lock);
1198 if (test_and_set_bit(0, &dev->waiting) == 0) {
1199
1200 dev->waitnext = NULL;
1201 dev->waitprev = port->waittail;
1202 if (port->waittail) {
1203 port->waittail->waitnext = dev;
1204 port->waittail = dev;
1205 } else
1206 port->waithead = port->waittail = dev;
1207 }
1208 spin_unlock(&port->waitlist_lock);
1209 }
1210 write_unlock_irqrestore(&port->cad_lock, flags);
1211 return -EAGAIN;
1212}
1213EXPORT_SYMBOL(parport_claim);
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225int parport_claim_or_block(struct pardevice *dev)
1226{
1227 int r;
1228
1229
1230
1231
1232
1233 dev->waiting = 2;
1234
1235
1236 r = parport_claim(dev);
1237 if (r == -EAGAIN) {
1238#ifdef PARPORT_DEBUG_SHARING
1239 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1240#endif
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 if (dev->waiting) {
1255 wait_event_interruptible(dev->wait_q,
1256 !dev->waiting);
1257 if (signal_pending(current))
1258 return -EINTR;
1259 r = 1;
1260 } else {
1261 r = 0;
1262#ifdef PARPORT_DEBUG_SHARING
1263 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1264 dev->name);
1265#endif
1266 }
1267
1268#ifdef PARPORT_DEBUG_SHARING
1269 if (dev->port->physport->cad != dev)
1270 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1271 dev->name, dev->port->physport->cad ?
1272 dev->port->physport->cad->name:"nobody");
1273#endif
1274 }
1275 dev->waiting = 0;
1276 return r;
1277}
1278EXPORT_SYMBOL(parport_claim_or_block);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289void parport_release(struct pardevice *dev)
1290{
1291 struct parport *port = dev->port->physport;
1292 struct pardevice *pd;
1293 unsigned long flags;
1294
1295
1296 write_lock_irqsave(&port->cad_lock, flags);
1297 if (port->cad != dev) {
1298 write_unlock_irqrestore(&port->cad_lock, flags);
1299 printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
1300 port->name, dev->name);
1301 return;
1302 }
1303
1304#ifdef CONFIG_PARPORT_1284
1305
1306 if (dev->port->muxport >= 0) {
1307
1308 port->muxsel = -1;
1309 }
1310
1311
1312 if (dev->daisy >= 0) {
1313 parport_daisy_deselect_all(port);
1314 port->daisy = -1;
1315 }
1316#endif
1317
1318 port->cad = NULL;
1319 write_unlock_irqrestore(&port->cad_lock, flags);
1320
1321
1322 port->ops->save_state(port, dev->state);
1323
1324
1325
1326
1327
1328
1329 for (pd = port->waithead; pd; pd = pd->waitnext) {
1330 if (pd->waiting & 2) {
1331 parport_claim(pd);
1332 if (waitqueue_active(&pd->wait_q))
1333 wake_up_interruptible(&pd->wait_q);
1334 return;
1335 } else if (pd->wakeup) {
1336 pd->wakeup(pd->private);
1337 if (dev->port->cad)
1338 return;
1339 } else {
1340 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1341 }
1342 }
1343
1344
1345
1346
1347
1348
1349 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1350 if (pd->wakeup && pd != dev)
1351 pd->wakeup(pd->private);
1352 }
1353}
1354EXPORT_SYMBOL(parport_release);
1355
1356irqreturn_t parport_irq_handler(int irq, void *dev_id)
1357{
1358 struct parport *port = dev_id;
1359
1360 parport_generic_irq(port);
1361
1362 return IRQ_HANDLED;
1363}
1364EXPORT_SYMBOL(parport_irq_handler);
1365
1366MODULE_LICENSE("GPL");
1367