1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched/signal.h>
31#include <linux/kmod.h>
32#include <linux/device.h>
33
34#include <linux/spinlock.h>
35#include <linux/mutex.h>
36#include <asm/irq.h>
37
38#undef PARPORT_PARANOID
39
40#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41
42unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
43int parport_default_spintime = DEFAULT_SPIN_TIME;
44
45static LIST_HEAD(portlist);
46static DEFINE_SPINLOCK(parportlist_lock);
47
48
49static LIST_HEAD(all_ports);
50static DEFINE_SPINLOCK(full_list_lock);
51
52static LIST_HEAD(drivers);
53
54static DEFINE_MUTEX(registration_lock);
55
56
57static void dead_write_lines(struct parport *p, unsigned char b){}
58static unsigned char dead_read_lines(struct parport *p) { return 0; }
59static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
60 unsigned char c) { return 0; }
61static void dead_onearg(struct parport *p){}
62static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
63static void dead_state(struct parport *p, struct parport_state *s) { }
64static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
65{ return 0; }
66static size_t dead_read(struct parport *p, void *b, size_t l, int f)
67{ return 0; }
68static struct parport_operations dead_ops = {
69 .write_data = dead_write_lines,
70 .read_data = dead_read_lines,
71
72 .write_control = dead_write_lines,
73 .read_control = dead_read_lines,
74 .frob_control = dead_frob_lines,
75
76 .read_status = dead_read_lines,
77
78 .enable_irq = dead_onearg,
79 .disable_irq = dead_onearg,
80
81 .data_forward = dead_onearg,
82 .data_reverse = dead_onearg,
83
84 .init_state = dead_initstate,
85 .save_state = dead_state,
86 .restore_state = dead_state,
87
88 .epp_write_data = dead_write,
89 .epp_read_data = dead_read,
90 .epp_write_addr = dead_write,
91 .epp_read_addr = dead_read,
92
93 .ecp_write_data = dead_write,
94 .ecp_read_data = dead_read,
95 .ecp_write_addr = dead_write,
96
97 .compat_write_data = dead_write,
98 .nibble_read_data = dead_read,
99 .byte_read_data = dead_read,
100
101 .owner = NULL,
102};
103
104static struct device_type parport_device_type = {
105 .name = "parport",
106};
107
108static int is_parport(struct device *dev)
109{
110 return dev->type == &parport_device_type;
111}
112
113static int parport_probe(struct device *dev)
114{
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129
130 return drv->probe(to_pardevice(dev));
131}
132
133static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136};
137
138int parport_bus_init(void)
139{
140 return bus_register(&parport_bus_type);
141}
142
143void parport_bus_exit(void)
144{
145 bus_unregister(&parport_bus_type);
146}
147
148
149
150
151
152
153
154static int driver_check(struct device_driver *dev_drv, void *_port)
155{
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162}
163
164
165static void attach_driver_chain(struct parport *port)
166{
167
168 struct parport_driver *drv;
169
170 list_for_each_entry(drv, &drivers, list)
171 drv->attach(port);
172
173
174
175
176
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179}
180
181static int driver_detach(struct device_driver *_drv, void *_port)
182{
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
189}
190
191
192static void detach_driver_chain(struct parport *port)
193{
194 struct parport_driver *drv;
195
196 list_for_each_entry(drv, &drivers, list)
197 drv->detach(port);
198
199
200
201
202
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
205}
206
207
208static void get_lowlevel_driver(void)
209{
210
211
212
213
214 request_module("parport_lowlevel");
215}
216
217
218
219
220
221
222
223static int port_check(struct device *dev, void *dev_drv)
224{
225 struct parport_driver *drv = dev_drv;
226
227
228 if (is_parport(dev))
229 drv->match_port(to_parport_dev(dev));
230 return 0;
231}
232
233
234
235
236
237
238static int port_detect(struct device *dev, void *dev_drv)
239{
240 if (is_parport(dev))
241 return 1;
242 return 0;
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278int __parport_register_driver(struct parport_driver *drv, struct module *owner,
279 const char *mod_name)
280{
281
282 int ret;
283
284
285 drv->driver.name = drv->name;
286 drv->driver.bus = &parport_bus_type;
287 drv->driver.owner = owner;
288 drv->driver.mod_name = mod_name;
289 ret = driver_register(&drv->driver);
290 if (ret)
291 return ret;
292
293
294
295
296
297 ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
298 port_detect);
299 if (!ret)
300 get_lowlevel_driver();
301
302 mutex_lock(®istration_lock);
303 if (drv->match_port)
304 bus_for_each_dev(&parport_bus_type, NULL, drv,
305 port_check);
306 mutex_unlock(®istration_lock);
307
308 return 0;
309}
310EXPORT_SYMBOL(__parport_register_driver);
311
312static int port_detach(struct device *dev, void *_drv)
313{
314 struct parport_driver *drv = _drv;
315
316 if (is_parport(dev) && drv->detach)
317 drv->detach(to_parport_dev(dev));
318
319 return 0;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339void parport_unregister_driver(struct parport_driver *drv)
340{
341 mutex_lock(®istration_lock);
342 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
343 driver_unregister(&drv->driver);
344 mutex_unlock(®istration_lock);
345}
346EXPORT_SYMBOL(parport_unregister_driver);
347
348static void free_port(struct device *dev)
349{
350 int d;
351 struct parport *port = to_parport_dev(dev);
352
353 spin_lock(&full_list_lock);
354 list_del(&port->full_list);
355 spin_unlock(&full_list_lock);
356 for (d = 0; d < 5; d++) {
357 kfree(port->probe_info[d].class_name);
358 kfree(port->probe_info[d].mfr);
359 kfree(port->probe_info[d].model);
360 kfree(port->probe_info[d].cmdset);
361 kfree(port->probe_info[d].description);
362 }
363
364 kfree(port->name);
365 kfree(port);
366}
367
368
369
370
371
372
373
374
375
376struct parport *parport_get_port(struct parport *port)
377{
378 struct device *dev = get_device(&port->bus_dev);
379
380 return to_parport_dev(dev);
381}
382EXPORT_SYMBOL(parport_get_port);
383
384void parport_del_port(struct parport *port)
385{
386 device_unregister(&port->bus_dev);
387}
388EXPORT_SYMBOL(parport_del_port);
389
390
391
392
393
394
395
396
397
398
399void parport_put_port(struct parport *port)
400{
401 put_device(&port->bus_dev);
402}
403EXPORT_SYMBOL(parport_put_port);
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434struct parport *parport_register_port(unsigned long base, int irq, int dma,
435 struct parport_operations *ops)
436{
437 struct list_head *l;
438 struct parport *tmp;
439 int num;
440 int device;
441 char *name;
442 int ret;
443
444 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
445 if (!tmp)
446 return NULL;
447
448
449 tmp->base = base;
450 tmp->irq = irq;
451 tmp->dma = dma;
452 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
453 tmp->modes = 0;
454 INIT_LIST_HEAD(&tmp->list);
455 tmp->devices = tmp->cad = NULL;
456 tmp->flags = 0;
457 tmp->ops = ops;
458 tmp->physport = tmp;
459 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
460 rwlock_init(&tmp->cad_lock);
461 spin_lock_init(&tmp->waitlist_lock);
462 spin_lock_init(&tmp->pardevice_lock);
463 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
464 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
465 sema_init(&tmp->ieee1284.irq, 0);
466 tmp->spintime = parport_default_spintime;
467 atomic_set(&tmp->ref_count, 1);
468 INIT_LIST_HEAD(&tmp->full_list);
469
470 name = kmalloc(15, GFP_KERNEL);
471 if (!name) {
472 kfree(tmp);
473 return NULL;
474 }
475
476
477 spin_lock(&full_list_lock);
478 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
479 struct parport *p = list_entry(l, struct parport, full_list);
480 if (p->number != num)
481 break;
482 }
483 tmp->portnum = tmp->number = num;
484 list_add_tail(&tmp->full_list, l);
485 spin_unlock(&full_list_lock);
486
487
488
489
490 sprintf(name, "parport%d", tmp->portnum = tmp->number);
491 tmp->name = name;
492 tmp->bus_dev.bus = &parport_bus_type;
493 tmp->bus_dev.release = free_port;
494 dev_set_name(&tmp->bus_dev, name);
495 tmp->bus_dev.type = &parport_device_type;
496
497 for (device = 0; device < 5; device++)
498
499 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
500
501 tmp->waithead = tmp->waittail = NULL;
502
503 ret = device_register(&tmp->bus_dev);
504 if (ret) {
505 put_device(&tmp->bus_dev);
506 return NULL;
507 }
508
509 return tmp;
510}
511EXPORT_SYMBOL(parport_register_port);
512
513
514
515
516
517
518
519
520
521
522
523
524
525void parport_announce_port(struct parport *port)
526{
527 int i;
528
529#ifdef CONFIG_PARPORT_1284
530
531 parport_daisy_init(port);
532#endif
533
534 if (!port->dev)
535 pr_warn("%s: fix this legacy no-device port driver!\n",
536 port->name);
537
538 parport_proc_register(port);
539 mutex_lock(®istration_lock);
540 spin_lock_irq(&parportlist_lock);
541 list_add_tail(&port->list, &portlist);
542 for (i = 1; i < 3; i++) {
543 struct parport *slave = port->slaves[i-1];
544 if (slave)
545 list_add_tail(&slave->list, &portlist);
546 }
547 spin_unlock_irq(&parportlist_lock);
548
549
550 attach_driver_chain(port);
551 for (i = 1; i < 3; i++) {
552 struct parport *slave = port->slaves[i-1];
553 if (slave)
554 attach_driver_chain(slave);
555 }
556 mutex_unlock(®istration_lock);
557}
558EXPORT_SYMBOL(parport_announce_port);
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579void parport_remove_port(struct parport *port)
580{
581 int i;
582
583 mutex_lock(®istration_lock);
584
585
586 detach_driver_chain(port);
587
588#ifdef CONFIG_PARPORT_1284
589
590 parport_daisy_fini(port);
591 for (i = 1; i < 3; i++) {
592 struct parport *slave = port->slaves[i-1];
593 if (!slave)
594 continue;
595 detach_driver_chain(slave);
596 parport_daisy_fini(slave);
597 }
598#endif
599
600 port->ops = &dead_ops;
601 spin_lock(&parportlist_lock);
602 list_del_init(&port->list);
603 for (i = 1; i < 3; i++) {
604 struct parport *slave = port->slaves[i-1];
605 if (slave)
606 list_del_init(&slave->list);
607 }
608 spin_unlock(&parportlist_lock);
609
610 mutex_unlock(®istration_lock);
611
612 parport_proc_unregister(port);
613
614 for (i = 1; i < 3; i++) {
615 struct parport *slave = port->slaves[i-1];
616 if (slave)
617 parport_put_port(slave);
618 }
619}
620EXPORT_SYMBOL(parport_remove_port);
621
622static void free_pardevice(struct device *dev)
623{
624 struct pardevice *par_dev = to_pardevice(dev);
625
626 kfree(par_dev->name);
627 kfree(par_dev);
628}
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692struct pardevice *
693parport_register_dev_model(struct parport *port, const char *name,
694 const struct pardev_cb *par_dev_cb, int id)
695{
696 struct pardevice *par_dev;
697 int ret;
698 char *devname;
699
700 if (port->physport->flags & PARPORT_FLAG_EXCL) {
701
702 pr_err("%s: no more devices allowed\n", port->name);
703 return NULL;
704 }
705
706 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
707 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
708 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
709 port->name, name);
710 return NULL;
711 }
712 }
713
714 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
715 if (port->physport->devices) {
716
717
718
719
720
721
722 pr_err("%s: cannot grant exclusive access for device %s\n",
723 port->name, name);
724 return NULL;
725 }
726 }
727
728 if (!try_module_get(port->ops->owner))
729 return NULL;
730
731 parport_get_port(port);
732
733 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
734 if (!par_dev)
735 goto err_put_port;
736
737 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
738 if (!par_dev->state)
739 goto err_put_par_dev;
740
741 devname = kstrdup(name, GFP_KERNEL);
742 if (!devname)
743 goto err_free_par_dev;
744
745 par_dev->name = devname;
746 par_dev->port = port;
747 par_dev->daisy = -1;
748 par_dev->preempt = par_dev_cb->preempt;
749 par_dev->wakeup = par_dev_cb->wakeup;
750 par_dev->private = par_dev_cb->private;
751 par_dev->flags = par_dev_cb->flags;
752 par_dev->irq_func = par_dev_cb->irq_func;
753 par_dev->waiting = 0;
754 par_dev->timeout = 5 * HZ;
755
756 par_dev->dev.parent = &port->bus_dev;
757 par_dev->dev.bus = &parport_bus_type;
758 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
759 if (ret)
760 goto err_free_devname;
761 par_dev->dev.release = free_pardevice;
762 par_dev->devmodel = true;
763 ret = device_register(&par_dev->dev);
764 if (ret) {
765 kfree(par_dev->state);
766 put_device(&par_dev->dev);
767 goto err_put_port;
768 }
769
770
771 par_dev->prev = NULL;
772
773
774
775
776 spin_lock(&port->physport->pardevice_lock);
777
778 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
779 if (port->physport->devices) {
780 spin_unlock(&port->physport->pardevice_lock);
781 pr_debug("%s: cannot grant exclusive access for device %s\n",
782 port->name, name);
783 kfree(par_dev->state);
784 device_unregister(&par_dev->dev);
785 goto err_put_port;
786 }
787 port->flags |= PARPORT_FLAG_EXCL;
788 }
789
790 par_dev->next = port->physport->devices;
791 wmb();
792
793
794
795
796 if (port->physport->devices)
797 port->physport->devices->prev = par_dev;
798 port->physport->devices = par_dev;
799 spin_unlock(&port->physport->pardevice_lock);
800
801 init_waitqueue_head(&par_dev->wait_q);
802 par_dev->timeslice = parport_default_timeslice;
803 par_dev->waitnext = NULL;
804 par_dev->waitprev = NULL;
805
806
807
808
809
810 port->ops->init_state(par_dev, par_dev->state);
811 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
812 port->proc_device = par_dev;
813 parport_device_proc_register(par_dev);
814 }
815
816 return par_dev;
817
818err_free_devname:
819 kfree(devname);
820err_free_par_dev:
821 kfree(par_dev->state);
822err_put_par_dev:
823 if (!par_dev->devmodel)
824 kfree(par_dev);
825err_put_port:
826 parport_put_port(port);
827 module_put(port->ops->owner);
828
829 return NULL;
830}
831EXPORT_SYMBOL(parport_register_dev_model);
832
833
834
835
836
837
838
839
840void parport_unregister_device(struct pardevice *dev)
841{
842 struct parport *port;
843
844#ifdef PARPORT_PARANOID
845 if (!dev) {
846 pr_err("%s: passed NULL\n", __func__);
847 return;
848 }
849#endif
850
851 port = dev->port->physport;
852
853 if (port->proc_device == dev) {
854 port->proc_device = NULL;
855 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
856 parport_device_proc_unregister(dev);
857 }
858
859 if (port->cad == dev) {
860 printk(KERN_DEBUG "%s: %s forgot to release port\n",
861 port->name, dev->name);
862 parport_release(dev);
863 }
864
865 spin_lock(&port->pardevice_lock);
866 if (dev->next)
867 dev->next->prev = dev->prev;
868 if (dev->prev)
869 dev->prev->next = dev->next;
870 else
871 port->devices = dev->next;
872
873 if (dev->flags & PARPORT_DEV_EXCL)
874 port->flags &= ~PARPORT_FLAG_EXCL;
875
876 spin_unlock(&port->pardevice_lock);
877
878
879
880
881
882 spin_lock_irq(&port->waitlist_lock);
883 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
884 if (dev->waitprev)
885 dev->waitprev->waitnext = dev->waitnext;
886 else
887 port->waithead = dev->waitnext;
888 if (dev->waitnext)
889 dev->waitnext->waitprev = dev->waitprev;
890 else
891 port->waittail = dev->waitprev;
892 }
893 spin_unlock_irq(&port->waitlist_lock);
894
895 kfree(dev->state);
896 device_unregister(&dev->dev);
897
898 module_put(port->ops->owner);
899 parport_put_port(port);
900}
901EXPORT_SYMBOL(parport_unregister_device);
902
903
904
905
906
907
908
909
910
911
912
913
914
915struct parport *parport_find_number(int number)
916{
917 struct parport *port, *result = NULL;
918
919 if (list_empty(&portlist))
920 get_lowlevel_driver();
921
922 spin_lock(&parportlist_lock);
923 list_for_each_entry(port, &portlist, list) {
924 if (port->number == number) {
925 result = parport_get_port(port);
926 break;
927 }
928 }
929 spin_unlock(&parportlist_lock);
930 return result;
931}
932EXPORT_SYMBOL(parport_find_number);
933
934
935
936
937
938
939
940
941
942
943
944
945
946struct parport *parport_find_base(unsigned long base)
947{
948 struct parport *port, *result = NULL;
949
950 if (list_empty(&portlist))
951 get_lowlevel_driver();
952
953 spin_lock(&parportlist_lock);
954 list_for_each_entry(port, &portlist, list) {
955 if (port->base == base) {
956 result = parport_get_port(port);
957 break;
958 }
959 }
960 spin_unlock(&parportlist_lock);
961 return result;
962}
963EXPORT_SYMBOL(parport_find_base);
964
965
966
967
968
969
970
971
972
973
974
975
976
977int parport_claim(struct pardevice *dev)
978{
979 struct pardevice *oldcad;
980 struct parport *port = dev->port->physport;
981 unsigned long flags;
982
983 if (port->cad == dev) {
984 pr_info("%s: %s already owner\n", dev->port->name, dev->name);
985 return 0;
986 }
987
988
989 write_lock_irqsave(&port->cad_lock, flags);
990 oldcad = port->cad;
991 if (oldcad) {
992 if (oldcad->preempt) {
993 if (oldcad->preempt(oldcad->private))
994 goto blocked;
995 port->ops->save_state(port, dev->state);
996 } else
997 goto blocked;
998
999 if (port->cad != oldcad) {
1000
1001
1002
1003
1004 pr_warn("%s: %s released port when preempted!\n",
1005 port->name, oldcad->name);
1006 if (port->cad)
1007 goto blocked;
1008 }
1009 }
1010
1011
1012 if (dev->waiting & 1) {
1013 dev->waiting = 0;
1014
1015
1016 spin_lock_irq(&port->waitlist_lock);
1017 if (dev->waitprev)
1018 dev->waitprev->waitnext = dev->waitnext;
1019 else
1020 port->waithead = dev->waitnext;
1021 if (dev->waitnext)
1022 dev->waitnext->waitprev = dev->waitprev;
1023 else
1024 port->waittail = dev->waitprev;
1025 spin_unlock_irq(&port->waitlist_lock);
1026 dev->waitprev = dev->waitnext = NULL;
1027 }
1028
1029
1030 port->cad = dev;
1031
1032#ifdef CONFIG_PARPORT_1284
1033
1034 if (dev->port->muxport >= 0) {
1035
1036 port->muxsel = dev->port->muxport;
1037 }
1038
1039
1040 if (dev->daisy >= 0) {
1041
1042 if (!parport_daisy_select(port, dev->daisy,
1043 IEEE1284_MODE_COMPAT))
1044 port->daisy = dev->daisy;
1045 }
1046#endif
1047
1048
1049 port->ops->restore_state(port, dev->state);
1050 write_unlock_irqrestore(&port->cad_lock, flags);
1051 dev->time = jiffies;
1052 return 0;
1053
1054blocked:
1055
1056
1057
1058
1059
1060
1061
1062 if (dev->waiting & 2 || dev->wakeup) {
1063 spin_lock(&port->waitlist_lock);
1064 if (test_and_set_bit(0, &dev->waiting) == 0) {
1065
1066 dev->waitnext = NULL;
1067 dev->waitprev = port->waittail;
1068 if (port->waittail) {
1069 port->waittail->waitnext = dev;
1070 port->waittail = dev;
1071 } else
1072 port->waithead = port->waittail = dev;
1073 }
1074 spin_unlock(&port->waitlist_lock);
1075 }
1076 write_unlock_irqrestore(&port->cad_lock, flags);
1077 return -EAGAIN;
1078}
1079EXPORT_SYMBOL(parport_claim);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091int parport_claim_or_block(struct pardevice *dev)
1092{
1093 int r;
1094
1095
1096
1097
1098
1099 dev->waiting = 2;
1100
1101
1102 r = parport_claim(dev);
1103 if (r == -EAGAIN) {
1104#ifdef PARPORT_DEBUG_SHARING
1105 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
1106 dev->name);
1107#endif
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (dev->waiting) {
1122 wait_event_interruptible(dev->wait_q,
1123 !dev->waiting);
1124 if (signal_pending(current))
1125 return -EINTR;
1126 r = 1;
1127 } else {
1128 r = 0;
1129#ifdef PARPORT_DEBUG_SHARING
1130 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1131 dev->name);
1132#endif
1133 }
1134
1135#ifdef PARPORT_DEBUG_SHARING
1136 if (dev->port->physport->cad != dev)
1137 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1138 dev->name, dev->port->physport->cad ?
1139 dev->port->physport->cad->name : "nobody");
1140#endif
1141 }
1142 dev->waiting = 0;
1143 return r;
1144}
1145EXPORT_SYMBOL(parport_claim_or_block);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156void parport_release(struct pardevice *dev)
1157{
1158 struct parport *port = dev->port->physport;
1159 struct pardevice *pd;
1160 unsigned long flags;
1161
1162
1163 write_lock_irqsave(&port->cad_lock, flags);
1164 if (port->cad != dev) {
1165 write_unlock_irqrestore(&port->cad_lock, flags);
1166 pr_warn("%s: %s tried to release parport when not owner\n",
1167 port->name, dev->name);
1168 return;
1169 }
1170
1171#ifdef CONFIG_PARPORT_1284
1172
1173 if (dev->port->muxport >= 0) {
1174
1175 port->muxsel = -1;
1176 }
1177
1178
1179 if (dev->daisy >= 0) {
1180 parport_daisy_deselect_all(port);
1181 port->daisy = -1;
1182 }
1183#endif
1184
1185 port->cad = NULL;
1186 write_unlock_irqrestore(&port->cad_lock, flags);
1187
1188
1189 port->ops->save_state(port, dev->state);
1190
1191
1192
1193
1194
1195
1196 for (pd = port->waithead; pd; pd = pd->waitnext) {
1197 if (pd->waiting & 2) {
1198 parport_claim(pd);
1199 if (waitqueue_active(&pd->wait_q))
1200 wake_up_interruptible(&pd->wait_q);
1201 return;
1202 } else if (pd->wakeup) {
1203 pd->wakeup(pd->private);
1204 if (dev->port->cad)
1205 return;
1206 } else {
1207 pr_err("%s: don't know how to wake %s\n",
1208 port->name, pd->name);
1209 }
1210 }
1211
1212
1213
1214
1215
1216
1217 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1218 if (pd->wakeup && pd != dev)
1219 pd->wakeup(pd->private);
1220 }
1221}
1222EXPORT_SYMBOL(parport_release);
1223
1224irqreturn_t parport_irq_handler(int irq, void *dev_id)
1225{
1226 struct parport *port = dev_id;
1227
1228 parport_generic_irq(port);
1229
1230 return IRQ_HANDLED;
1231}
1232EXPORT_SYMBOL(parport_irq_handler);
1233
1234MODULE_LICENSE("GPL");
1235