1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched/signal.h>
31#include <linux/kmod.h>
32#include <linux/device.h>
33
34#include <linux/spinlock.h>
35#include <linux/mutex.h>
36#include <asm/irq.h>
37
38#undef PARPORT_PARANOID
39
40#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41
42unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
43int parport_default_spintime = DEFAULT_SPIN_TIME;
44
45static LIST_HEAD(portlist);
46static DEFINE_SPINLOCK(parportlist_lock);
47
48
49static LIST_HEAD(all_ports);
50static DEFINE_SPINLOCK(full_list_lock);
51
52static LIST_HEAD(drivers);
53
54static DEFINE_MUTEX(registration_lock);
55
56
57static void dead_write_lines(struct parport *p, unsigned char b){}
58static unsigned char dead_read_lines(struct parport *p) { return 0; }
59static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
60 unsigned char c) { return 0; }
61static void dead_onearg(struct parport *p){}
62static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
63static void dead_state(struct parport *p, struct parport_state *s) { }
64static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
65{ return 0; }
66static size_t dead_read(struct parport *p, void *b, size_t l, int f)
67{ return 0; }
68static struct parport_operations dead_ops = {
69 .write_data = dead_write_lines,
70 .read_data = dead_read_lines,
71
72 .write_control = dead_write_lines,
73 .read_control = dead_read_lines,
74 .frob_control = dead_frob_lines,
75
76 .read_status = dead_read_lines,
77
78 .enable_irq = dead_onearg,
79 .disable_irq = dead_onearg,
80
81 .data_forward = dead_onearg,
82 .data_reverse = dead_onearg,
83
84 .init_state = dead_initstate,
85 .save_state = dead_state,
86 .restore_state = dead_state,
87
88 .epp_write_data = dead_write,
89 .epp_read_data = dead_read,
90 .epp_write_addr = dead_write,
91 .epp_read_addr = dead_read,
92
93 .ecp_write_data = dead_write,
94 .ecp_read_data = dead_read,
95 .ecp_write_addr = dead_write,
96
97 .compat_write_data = dead_write,
98 .nibble_read_data = dead_read,
99 .byte_read_data = dead_read,
100
101 .owner = NULL,
102};
103
104static struct device_type parport_device_type = {
105 .name = "parport",
106};
107
108static int is_parport(struct device *dev)
109{
110 return dev->type == &parport_device_type;
111}
112
113static int parport_probe(struct device *dev)
114{
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129
130 return drv->probe(to_pardevice(dev));
131}
132
133static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136};
137
138int parport_bus_init(void)
139{
140 return bus_register(&parport_bus_type);
141}
142
143void parport_bus_exit(void)
144{
145 bus_unregister(&parport_bus_type);
146}
147
148
149
150
151
152
153
154static int driver_check(struct device_driver *dev_drv, void *_port)
155{
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162}
163
164
165static void attach_driver_chain(struct parport *port)
166{
167
168 struct parport_driver *drv;
169
170 list_for_each_entry(drv, &drivers, list)
171 drv->attach(port);
172
173
174
175
176
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179}
180
181static int driver_detach(struct device_driver *_drv, void *_port)
182{
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
189}
190
191
192static void detach_driver_chain(struct parport *port)
193{
194 struct parport_driver *drv;
195
196 list_for_each_entry(drv, &drivers, list)
197 drv->detach(port);
198
199
200
201
202
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
205}
206
207
208static void get_lowlevel_driver(void)
209{
210
211
212
213
214 request_module("parport_lowlevel");
215}
216
217
218
219
220
221
222
223static int port_check(struct device *dev, void *dev_drv)
224{
225 struct parport_driver *drv = dev_drv;
226
227
228 if (is_parport(dev))
229 drv->match_port(to_parport_dev(dev));
230 return 0;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266int __parport_register_driver(struct parport_driver *drv, struct module *owner,
267 const char *mod_name)
268{
269 if (list_empty(&portlist))
270 get_lowlevel_driver();
271
272 if (drv->devmodel) {
273
274 int ret;
275
276
277 drv->driver.name = drv->name;
278 drv->driver.bus = &parport_bus_type;
279 drv->driver.owner = owner;
280 drv->driver.mod_name = mod_name;
281 ret = driver_register(&drv->driver);
282 if (ret)
283 return ret;
284
285 mutex_lock(®istration_lock);
286 if (drv->match_port)
287 bus_for_each_dev(&parport_bus_type, NULL, drv,
288 port_check);
289 mutex_unlock(®istration_lock);
290 } else {
291 struct parport *port;
292
293 drv->devmodel = false;
294
295 mutex_lock(®istration_lock);
296 list_for_each_entry(port, &portlist, list)
297 drv->attach(port);
298 list_add(&drv->list, &drivers);
299 mutex_unlock(®istration_lock);
300 }
301
302 return 0;
303}
304EXPORT_SYMBOL(__parport_register_driver);
305
306static int port_detach(struct device *dev, void *_drv)
307{
308 struct parport_driver *drv = _drv;
309
310 if (is_parport(dev) && drv->detach)
311 drv->detach(to_parport_dev(dev));
312
313 return 0;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333void parport_unregister_driver(struct parport_driver *drv)
334{
335 struct parport *port;
336
337 mutex_lock(®istration_lock);
338 if (drv->devmodel) {
339 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
340 driver_unregister(&drv->driver);
341 } else {
342 list_del_init(&drv->list);
343 list_for_each_entry(port, &portlist, list)
344 drv->detach(port);
345 }
346 mutex_unlock(®istration_lock);
347}
348EXPORT_SYMBOL(parport_unregister_driver);
349
350static void free_port(struct device *dev)
351{
352 int d;
353 struct parport *port = to_parport_dev(dev);
354
355 spin_lock(&full_list_lock);
356 list_del(&port->full_list);
357 spin_unlock(&full_list_lock);
358 for (d = 0; d < 5; d++) {
359 kfree(port->probe_info[d].class_name);
360 kfree(port->probe_info[d].mfr);
361 kfree(port->probe_info[d].model);
362 kfree(port->probe_info[d].cmdset);
363 kfree(port->probe_info[d].description);
364 }
365
366 kfree(port->name);
367 kfree(port);
368}
369
370
371
372
373
374
375
376
377
378struct parport *parport_get_port(struct parport *port)
379{
380 struct device *dev = get_device(&port->bus_dev);
381
382 return to_parport_dev(dev);
383}
384EXPORT_SYMBOL(parport_get_port);
385
386void parport_del_port(struct parport *port)
387{
388 device_unregister(&port->bus_dev);
389}
390EXPORT_SYMBOL(parport_del_port);
391
392
393
394
395
396
397
398
399
400
401void parport_put_port(struct parport *port)
402{
403 put_device(&port->bus_dev);
404}
405EXPORT_SYMBOL(parport_put_port);
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436struct parport *parport_register_port(unsigned long base, int irq, int dma,
437 struct parport_operations *ops)
438{
439 struct list_head *l;
440 struct parport *tmp;
441 int num;
442 int device;
443 char *name;
444 int ret;
445
446 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
447 if (!tmp)
448 return NULL;
449
450
451 tmp->base = base;
452 tmp->irq = irq;
453 tmp->dma = dma;
454 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
455 tmp->modes = 0;
456 INIT_LIST_HEAD(&tmp->list);
457 tmp->devices = tmp->cad = NULL;
458 tmp->flags = 0;
459 tmp->ops = ops;
460 tmp->physport = tmp;
461 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
462 rwlock_init(&tmp->cad_lock);
463 spin_lock_init(&tmp->waitlist_lock);
464 spin_lock_init(&tmp->pardevice_lock);
465 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
466 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
467 sema_init(&tmp->ieee1284.irq, 0);
468 tmp->spintime = parport_default_spintime;
469 atomic_set(&tmp->ref_count, 1);
470 INIT_LIST_HEAD(&tmp->full_list);
471
472 name = kmalloc(15, GFP_KERNEL);
473 if (!name) {
474 kfree(tmp);
475 return NULL;
476 }
477
478
479 spin_lock(&full_list_lock);
480 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
481 struct parport *p = list_entry(l, struct parport, full_list);
482 if (p->number != num)
483 break;
484 }
485 tmp->portnum = tmp->number = num;
486 list_add_tail(&tmp->full_list, l);
487 spin_unlock(&full_list_lock);
488
489
490
491
492 sprintf(name, "parport%d", tmp->portnum = tmp->number);
493 tmp->name = name;
494 tmp->bus_dev.bus = &parport_bus_type;
495 tmp->bus_dev.release = free_port;
496 dev_set_name(&tmp->bus_dev, name);
497 tmp->bus_dev.type = &parport_device_type;
498
499 for (device = 0; device < 5; device++)
500
501 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
502
503 tmp->waithead = tmp->waittail = NULL;
504
505 ret = device_register(&tmp->bus_dev);
506 if (ret) {
507 put_device(&tmp->bus_dev);
508 return NULL;
509 }
510
511 return tmp;
512}
513EXPORT_SYMBOL(parport_register_port);
514
515
516
517
518
519
520
521
522
523
524
525
526
527void parport_announce_port(struct parport *port)
528{
529 int i;
530
531#ifdef CONFIG_PARPORT_1284
532
533 parport_daisy_init(port);
534#endif
535
536 if (!port->dev)
537 printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
538 port->name);
539
540 parport_proc_register(port);
541 mutex_lock(®istration_lock);
542 spin_lock_irq(&parportlist_lock);
543 list_add_tail(&port->list, &portlist);
544 for (i = 1; i < 3; i++) {
545 struct parport *slave = port->slaves[i-1];
546 if (slave)
547 list_add_tail(&slave->list, &portlist);
548 }
549 spin_unlock_irq(&parportlist_lock);
550
551
552 attach_driver_chain(port);
553 for (i = 1; i < 3; i++) {
554 struct parport *slave = port->slaves[i-1];
555 if (slave)
556 attach_driver_chain(slave);
557 }
558 mutex_unlock(®istration_lock);
559}
560EXPORT_SYMBOL(parport_announce_port);
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581void parport_remove_port(struct parport *port)
582{
583 int i;
584
585 mutex_lock(®istration_lock);
586
587
588 detach_driver_chain(port);
589
590#ifdef CONFIG_PARPORT_1284
591
592 parport_daisy_fini(port);
593 for (i = 1; i < 3; i++) {
594 struct parport *slave = port->slaves[i-1];
595 if (!slave)
596 continue;
597 detach_driver_chain(slave);
598 parport_daisy_fini(slave);
599 }
600#endif
601
602 port->ops = &dead_ops;
603 spin_lock(&parportlist_lock);
604 list_del_init(&port->list);
605 for (i = 1; i < 3; i++) {
606 struct parport *slave = port->slaves[i-1];
607 if (slave)
608 list_del_init(&slave->list);
609 }
610 spin_unlock(&parportlist_lock);
611
612 mutex_unlock(®istration_lock);
613
614 parport_proc_unregister(port);
615
616 for (i = 1; i < 3; i++) {
617 struct parport *slave = port->slaves[i-1];
618 if (slave)
619 parport_put_port(slave);
620 }
621}
622EXPORT_SYMBOL(parport_remove_port);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693struct pardevice *
694parport_register_device(struct parport *port, const char *name,
695 int (*pf)(void *), void (*kf)(void *),
696 void (*irq_func)(void *),
697 int flags, void *handle)
698{
699 struct pardevice *tmp;
700
701 if (port->physport->flags & PARPORT_FLAG_EXCL) {
702
703 printk(KERN_DEBUG "%s: no more devices allowed\n",
704 port->name);
705 return NULL;
706 }
707
708 if (flags & PARPORT_DEV_LURK) {
709 if (!pf || !kf) {
710 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
711 return NULL;
712 }
713 }
714
715 if (flags & PARPORT_DEV_EXCL) {
716 if (port->physport->devices) {
717
718
719
720
721
722
723 pr_err("%s: cannot grant exclusive access for device %s\n",
724 port->name, name);
725 return NULL;
726 }
727 }
728
729
730
731
732
733
734
735 if (!try_module_get(port->ops->owner))
736 return NULL;
737
738 parport_get_port(port);
739
740 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
741 if (!tmp)
742 goto out;
743
744 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
745 if (!tmp->state)
746 goto out_free_pardevice;
747
748 tmp->name = name;
749 tmp->port = port;
750 tmp->daisy = -1;
751 tmp->preempt = pf;
752 tmp->wakeup = kf;
753 tmp->private = handle;
754 tmp->flags = flags;
755 tmp->irq_func = irq_func;
756 tmp->waiting = 0;
757 tmp->timeout = 5 * HZ;
758 tmp->devmodel = false;
759
760
761 tmp->prev = NULL;
762
763
764
765
766 spin_lock(&port->physport->pardevice_lock);
767
768 if (flags & PARPORT_DEV_EXCL) {
769 if (port->physport->devices) {
770 spin_unlock(&port->physport->pardevice_lock);
771 printk(KERN_DEBUG
772 "%s: cannot grant exclusive access for device %s\n",
773 port->name, name);
774 goto out_free_all;
775 }
776 port->flags |= PARPORT_FLAG_EXCL;
777 }
778
779 tmp->next = port->physport->devices;
780 wmb();
781
782
783
784
785 if (port->physport->devices)
786 port->physport->devices->prev = tmp;
787 port->physport->devices = tmp;
788 spin_unlock(&port->physport->pardevice_lock);
789
790 init_waitqueue_head(&tmp->wait_q);
791 tmp->timeslice = parport_default_timeslice;
792 tmp->waitnext = tmp->waitprev = NULL;
793
794
795
796
797
798 port->ops->init_state(tmp, tmp->state);
799 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
800 port->proc_device = tmp;
801 parport_device_proc_register(tmp);
802 }
803 return tmp;
804
805 out_free_all:
806 kfree(tmp->state);
807 out_free_pardevice:
808 kfree(tmp);
809 out:
810 parport_put_port(port);
811 module_put(port->ops->owner);
812
813 return NULL;
814}
815EXPORT_SYMBOL(parport_register_device);
816
817static void free_pardevice(struct device *dev)
818{
819 struct pardevice *par_dev = to_pardevice(dev);
820
821 kfree(par_dev->name);
822 kfree(par_dev);
823}
824
825struct pardevice *
826parport_register_dev_model(struct parport *port, const char *name,
827 const struct pardev_cb *par_dev_cb, int id)
828{
829 struct pardevice *par_dev;
830 int ret;
831 char *devname;
832
833 if (port->physport->flags & PARPORT_FLAG_EXCL) {
834
835 pr_err("%s: no more devices allowed\n", port->name);
836 return NULL;
837 }
838
839 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
840 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
841 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
842 port->name, name);
843 return NULL;
844 }
845 }
846
847 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
848 if (port->physport->devices) {
849
850
851
852
853
854
855 pr_err("%s: cannot grant exclusive access for device %s\n",
856 port->name, name);
857 return NULL;
858 }
859 }
860
861 if (!try_module_get(port->ops->owner))
862 return NULL;
863
864 parport_get_port(port);
865
866 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
867 if (!par_dev)
868 goto err_put_port;
869
870 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
871 if (!par_dev->state)
872 goto err_put_par_dev;
873
874 devname = kstrdup(name, GFP_KERNEL);
875 if (!devname)
876 goto err_free_par_dev;
877
878 par_dev->name = devname;
879 par_dev->port = port;
880 par_dev->daisy = -1;
881 par_dev->preempt = par_dev_cb->preempt;
882 par_dev->wakeup = par_dev_cb->wakeup;
883 par_dev->private = par_dev_cb->private;
884 par_dev->flags = par_dev_cb->flags;
885 par_dev->irq_func = par_dev_cb->irq_func;
886 par_dev->waiting = 0;
887 par_dev->timeout = 5 * HZ;
888
889 par_dev->dev.parent = &port->bus_dev;
890 par_dev->dev.bus = &parport_bus_type;
891 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
892 if (ret)
893 goto err_free_devname;
894 par_dev->dev.release = free_pardevice;
895 par_dev->devmodel = true;
896 ret = device_register(&par_dev->dev);
897 if (ret) {
898 put_device(&par_dev->dev);
899 goto err_put_port;
900 }
901
902
903 par_dev->prev = NULL;
904
905
906
907
908 spin_lock(&port->physport->pardevice_lock);
909
910 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
911 if (port->physport->devices) {
912 spin_unlock(&port->physport->pardevice_lock);
913 pr_debug("%s: cannot grant exclusive access for device %s\n",
914 port->name, name);
915 device_unregister(&par_dev->dev);
916 goto err_put_port;
917 }
918 port->flags |= PARPORT_FLAG_EXCL;
919 }
920
921 par_dev->next = port->physport->devices;
922 wmb();
923
924
925
926
927 if (port->physport->devices)
928 port->physport->devices->prev = par_dev;
929 port->physport->devices = par_dev;
930 spin_unlock(&port->physport->pardevice_lock);
931
932 init_waitqueue_head(&par_dev->wait_q);
933 par_dev->timeslice = parport_default_timeslice;
934 par_dev->waitnext = NULL;
935 par_dev->waitprev = NULL;
936
937
938
939
940
941 port->ops->init_state(par_dev, par_dev->state);
942 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
943 port->proc_device = par_dev;
944 parport_device_proc_register(par_dev);
945 }
946
947 return par_dev;
948
949err_free_devname:
950 kfree(devname);
951err_free_par_dev:
952 kfree(par_dev->state);
953err_put_par_dev:
954 if (!par_dev->devmodel)
955 kfree(par_dev);
956err_put_port:
957 parport_put_port(port);
958 module_put(port->ops->owner);
959
960 return NULL;
961}
962EXPORT_SYMBOL(parport_register_dev_model);
963
964
965
966
967
968
969
970
971void parport_unregister_device(struct pardevice *dev)
972{
973 struct parport *port;
974
975#ifdef PARPORT_PARANOID
976 if (!dev) {
977 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
978 return;
979 }
980#endif
981
982 port = dev->port->physport;
983
984 if (port->proc_device == dev) {
985 port->proc_device = NULL;
986 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
987 parport_device_proc_unregister(dev);
988 }
989
990 if (port->cad == dev) {
991 printk(KERN_DEBUG "%s: %s forgot to release port\n",
992 port->name, dev->name);
993 parport_release(dev);
994 }
995
996 spin_lock(&port->pardevice_lock);
997 if (dev->next)
998 dev->next->prev = dev->prev;
999 if (dev->prev)
1000 dev->prev->next = dev->next;
1001 else
1002 port->devices = dev->next;
1003
1004 if (dev->flags & PARPORT_DEV_EXCL)
1005 port->flags &= ~PARPORT_FLAG_EXCL;
1006
1007 spin_unlock(&port->pardevice_lock);
1008
1009
1010
1011
1012
1013 spin_lock_irq(&port->waitlist_lock);
1014 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
1015 if (dev->waitprev)
1016 dev->waitprev->waitnext = dev->waitnext;
1017 else
1018 port->waithead = dev->waitnext;
1019 if (dev->waitnext)
1020 dev->waitnext->waitprev = dev->waitprev;
1021 else
1022 port->waittail = dev->waitprev;
1023 }
1024 spin_unlock_irq(&port->waitlist_lock);
1025
1026 kfree(dev->state);
1027 if (dev->devmodel)
1028 device_unregister(&dev->dev);
1029 else
1030 kfree(dev);
1031
1032 module_put(port->ops->owner);
1033 parport_put_port(port);
1034}
1035EXPORT_SYMBOL(parport_unregister_device);
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049struct parport *parport_find_number(int number)
1050{
1051 struct parport *port, *result = NULL;
1052
1053 if (list_empty(&portlist))
1054 get_lowlevel_driver();
1055
1056 spin_lock(&parportlist_lock);
1057 list_for_each_entry(port, &portlist, list) {
1058 if (port->number == number) {
1059 result = parport_get_port(port);
1060 break;
1061 }
1062 }
1063 spin_unlock(&parportlist_lock);
1064 return result;
1065}
1066EXPORT_SYMBOL(parport_find_number);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080struct parport *parport_find_base(unsigned long base)
1081{
1082 struct parport *port, *result = NULL;
1083
1084 if (list_empty(&portlist))
1085 get_lowlevel_driver();
1086
1087 spin_lock(&parportlist_lock);
1088 list_for_each_entry(port, &portlist, list) {
1089 if (port->base == base) {
1090 result = parport_get_port(port);
1091 break;
1092 }
1093 }
1094 spin_unlock(&parportlist_lock);
1095 return result;
1096}
1097EXPORT_SYMBOL(parport_find_base);
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111int parport_claim(struct pardevice *dev)
1112{
1113 struct pardevice *oldcad;
1114 struct parport *port = dev->port->physport;
1115 unsigned long flags;
1116
1117 if (port->cad == dev) {
1118 printk(KERN_INFO "%s: %s already owner\n",
1119 dev->port->name,dev->name);
1120 return 0;
1121 }
1122
1123
1124 write_lock_irqsave(&port->cad_lock, flags);
1125 oldcad = port->cad;
1126 if (oldcad) {
1127 if (oldcad->preempt) {
1128 if (oldcad->preempt(oldcad->private))
1129 goto blocked;
1130 port->ops->save_state(port, dev->state);
1131 } else
1132 goto blocked;
1133
1134 if (port->cad != oldcad) {
1135
1136
1137
1138
1139 printk(KERN_WARNING
1140 "%s: %s released port when preempted!\n",
1141 port->name, oldcad->name);
1142 if (port->cad)
1143 goto blocked;
1144 }
1145 }
1146
1147
1148 if (dev->waiting & 1) {
1149 dev->waiting = 0;
1150
1151
1152 spin_lock_irq(&port->waitlist_lock);
1153 if (dev->waitprev)
1154 dev->waitprev->waitnext = dev->waitnext;
1155 else
1156 port->waithead = dev->waitnext;
1157 if (dev->waitnext)
1158 dev->waitnext->waitprev = dev->waitprev;
1159 else
1160 port->waittail = dev->waitprev;
1161 spin_unlock_irq(&port->waitlist_lock);
1162 dev->waitprev = dev->waitnext = NULL;
1163 }
1164
1165
1166 port->cad = dev;
1167
1168#ifdef CONFIG_PARPORT_1284
1169
1170 if (dev->port->muxport >= 0) {
1171
1172 port->muxsel = dev->port->muxport;
1173 }
1174
1175
1176 if (dev->daisy >= 0) {
1177
1178 if (!parport_daisy_select(port, dev->daisy,
1179 IEEE1284_MODE_COMPAT))
1180 port->daisy = dev->daisy;
1181 }
1182#endif
1183
1184
1185 port->ops->restore_state(port, dev->state);
1186 write_unlock_irqrestore(&port->cad_lock, flags);
1187 dev->time = jiffies;
1188 return 0;
1189
1190blocked:
1191
1192
1193
1194
1195
1196
1197
1198 if (dev->waiting & 2 || dev->wakeup) {
1199 spin_lock(&port->waitlist_lock);
1200 if (test_and_set_bit(0, &dev->waiting) == 0) {
1201
1202 dev->waitnext = NULL;
1203 dev->waitprev = port->waittail;
1204 if (port->waittail) {
1205 port->waittail->waitnext = dev;
1206 port->waittail = dev;
1207 } else
1208 port->waithead = port->waittail = dev;
1209 }
1210 spin_unlock(&port->waitlist_lock);
1211 }
1212 write_unlock_irqrestore(&port->cad_lock, flags);
1213 return -EAGAIN;
1214}
1215EXPORT_SYMBOL(parport_claim);
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227int parport_claim_or_block(struct pardevice *dev)
1228{
1229 int r;
1230
1231
1232
1233
1234
1235 dev->waiting = 2;
1236
1237
1238 r = parport_claim(dev);
1239 if (r == -EAGAIN) {
1240#ifdef PARPORT_DEBUG_SHARING
1241 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1242#endif
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256 if (dev->waiting) {
1257 wait_event_interruptible(dev->wait_q,
1258 !dev->waiting);
1259 if (signal_pending(current))
1260 return -EINTR;
1261 r = 1;
1262 } else {
1263 r = 0;
1264#ifdef PARPORT_DEBUG_SHARING
1265 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1266 dev->name);
1267#endif
1268 }
1269
1270#ifdef PARPORT_DEBUG_SHARING
1271 if (dev->port->physport->cad != dev)
1272 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1273 dev->name, dev->port->physport->cad ?
1274 dev->port->physport->cad->name:"nobody");
1275#endif
1276 }
1277 dev->waiting = 0;
1278 return r;
1279}
1280EXPORT_SYMBOL(parport_claim_or_block);
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291void parport_release(struct pardevice *dev)
1292{
1293 struct parport *port = dev->port->physport;
1294 struct pardevice *pd;
1295 unsigned long flags;
1296
1297
1298 write_lock_irqsave(&port->cad_lock, flags);
1299 if (port->cad != dev) {
1300 write_unlock_irqrestore(&port->cad_lock, flags);
1301 printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
1302 port->name, dev->name);
1303 return;
1304 }
1305
1306#ifdef CONFIG_PARPORT_1284
1307
1308 if (dev->port->muxport >= 0) {
1309
1310 port->muxsel = -1;
1311 }
1312
1313
1314 if (dev->daisy >= 0) {
1315 parport_daisy_deselect_all(port);
1316 port->daisy = -1;
1317 }
1318#endif
1319
1320 port->cad = NULL;
1321 write_unlock_irqrestore(&port->cad_lock, flags);
1322
1323
1324 port->ops->save_state(port, dev->state);
1325
1326
1327
1328
1329
1330
1331 for (pd = port->waithead; pd; pd = pd->waitnext) {
1332 if (pd->waiting & 2) {
1333 parport_claim(pd);
1334 if (waitqueue_active(&pd->wait_q))
1335 wake_up_interruptible(&pd->wait_q);
1336 return;
1337 } else if (pd->wakeup) {
1338 pd->wakeup(pd->private);
1339 if (dev->port->cad)
1340 return;
1341 } else {
1342 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1343 }
1344 }
1345
1346
1347
1348
1349
1350
1351 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1352 if (pd->wakeup && pd != dev)
1353 pd->wakeup(pd->private);
1354 }
1355}
1356EXPORT_SYMBOL(parport_release);
1357
1358irqreturn_t parport_irq_handler(int irq, void *dev_id)
1359{
1360 struct parport *port = dev_id;
1361
1362 parport_generic_irq(port);
1363
1364 return IRQ_HANDLED;
1365}
1366EXPORT_SYMBOL(parport_irq_handler);
1367
1368MODULE_LICENSE("GPL");
1369