1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched/signal.h>
31#include <linux/kmod.h>
32#include <linux/device.h>
33
34#include <linux/spinlock.h>
35#include <linux/mutex.h>
36#include <asm/irq.h>
37
38#undef PARPORT_PARANOID
39
40#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41
42unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
43int parport_default_spintime = DEFAULT_SPIN_TIME;
44
45static LIST_HEAD(portlist);
46static DEFINE_SPINLOCK(parportlist_lock);
47
48
49static LIST_HEAD(all_ports);
50static DEFINE_SPINLOCK(full_list_lock);
51
52static LIST_HEAD(drivers);
53
54static DEFINE_MUTEX(registration_lock);
55
56
57static void dead_write_lines(struct parport *p, unsigned char b){}
58static unsigned char dead_read_lines(struct parport *p) { return 0; }
59static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
60 unsigned char c) { return 0; }
61static void dead_onearg(struct parport *p){}
62static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
63static void dead_state(struct parport *p, struct parport_state *s) { }
64static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
65{ return 0; }
66static size_t dead_read(struct parport *p, void *b, size_t l, int f)
67{ return 0; }
68static struct parport_operations dead_ops = {
69 .write_data = dead_write_lines,
70 .read_data = dead_read_lines,
71
72 .write_control = dead_write_lines,
73 .read_control = dead_read_lines,
74 .frob_control = dead_frob_lines,
75
76 .read_status = dead_read_lines,
77
78 .enable_irq = dead_onearg,
79 .disable_irq = dead_onearg,
80
81 .data_forward = dead_onearg,
82 .data_reverse = dead_onearg,
83
84 .init_state = dead_initstate,
85 .save_state = dead_state,
86 .restore_state = dead_state,
87
88 .epp_write_data = dead_write,
89 .epp_read_data = dead_read,
90 .epp_write_addr = dead_write,
91 .epp_read_addr = dead_read,
92
93 .ecp_write_data = dead_write,
94 .ecp_read_data = dead_read,
95 .ecp_write_addr = dead_write,
96
97 .compat_write_data = dead_write,
98 .nibble_read_data = dead_read,
99 .byte_read_data = dead_read,
100
101 .owner = NULL,
102};
103
104static struct device_type parport_device_type = {
105 .name = "parport",
106};
107
108static int is_parport(struct device *dev)
109{
110 return dev->type == &parport_device_type;
111}
112
113static int parport_probe(struct device *dev)
114{
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129
130 return drv->probe(to_pardevice(dev));
131}
132
133static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136};
137
138int parport_bus_init(void)
139{
140 return bus_register(&parport_bus_type);
141}
142
143void parport_bus_exit(void)
144{
145 bus_unregister(&parport_bus_type);
146}
147
148
149
150
151
152
153
154static int driver_check(struct device_driver *dev_drv, void *_port)
155{
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162}
163
164
165static void attach_driver_chain(struct parport *port)
166{
167
168 struct parport_driver *drv;
169
170 list_for_each_entry(drv, &drivers, list)
171 drv->attach(port);
172
173
174
175
176
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179}
180
181static int driver_detach(struct device_driver *_drv, void *_port)
182{
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
189}
190
191
192static void detach_driver_chain(struct parport *port)
193{
194 struct parport_driver *drv;
195
196 list_for_each_entry(drv, &drivers, list)
197 drv->detach(port);
198
199
200
201
202
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
205}
206
207
208static void get_lowlevel_driver(void)
209{
210
211
212
213
214 request_module("parport_lowlevel");
215}
216
217
218
219
220
221
222
223static int port_check(struct device *dev, void *dev_drv)
224{
225 struct parport_driver *drv = dev_drv;
226
227
228 if (is_parport(dev))
229 drv->match_port(to_parport_dev(dev));
230 return 0;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266int __parport_register_driver(struct parport_driver *drv, struct module *owner,
267 const char *mod_name)
268{
269 if (list_empty(&portlist))
270 get_lowlevel_driver();
271
272 if (drv->devmodel) {
273
274 int ret;
275
276
277 drv->driver.name = drv->name;
278 drv->driver.bus = &parport_bus_type;
279 drv->driver.owner = owner;
280 drv->driver.mod_name = mod_name;
281 ret = driver_register(&drv->driver);
282 if (ret)
283 return ret;
284
285 mutex_lock(®istration_lock);
286 if (drv->match_port)
287 bus_for_each_dev(&parport_bus_type, NULL, drv,
288 port_check);
289 mutex_unlock(®istration_lock);
290 } else {
291 struct parport *port;
292
293 drv->devmodel = false;
294
295 mutex_lock(®istration_lock);
296 list_for_each_entry(port, &portlist, list)
297 drv->attach(port);
298 list_add(&drv->list, &drivers);
299 mutex_unlock(®istration_lock);
300 }
301
302 return 0;
303}
304EXPORT_SYMBOL(__parport_register_driver);
305
306static int port_detach(struct device *dev, void *_drv)
307{
308 struct parport_driver *drv = _drv;
309
310 if (is_parport(dev) && drv->detach)
311 drv->detach(to_parport_dev(dev));
312
313 return 0;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333void parport_unregister_driver(struct parport_driver *drv)
334{
335 struct parport *port;
336
337 mutex_lock(®istration_lock);
338 if (drv->devmodel) {
339 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
340 driver_unregister(&drv->driver);
341 } else {
342 list_del_init(&drv->list);
343 list_for_each_entry(port, &portlist, list)
344 drv->detach(port);
345 }
346 mutex_unlock(®istration_lock);
347}
348EXPORT_SYMBOL(parport_unregister_driver);
349
350static void free_port(struct device *dev)
351{
352 int d;
353 struct parport *port = to_parport_dev(dev);
354
355 spin_lock(&full_list_lock);
356 list_del(&port->full_list);
357 spin_unlock(&full_list_lock);
358 for (d = 0; d < 5; d++) {
359 kfree(port->probe_info[d].class_name);
360 kfree(port->probe_info[d].mfr);
361 kfree(port->probe_info[d].model);
362 kfree(port->probe_info[d].cmdset);
363 kfree(port->probe_info[d].description);
364 }
365
366 kfree(port->name);
367 kfree(port);
368}
369
370
371
372
373
374
375
376
377
378struct parport *parport_get_port(struct parport *port)
379{
380 struct device *dev = get_device(&port->bus_dev);
381
382 return to_parport_dev(dev);
383}
384EXPORT_SYMBOL(parport_get_port);
385
386void parport_del_port(struct parport *port)
387{
388 device_unregister(&port->bus_dev);
389}
390EXPORT_SYMBOL(parport_del_port);
391
392
393
394
395
396
397
398
399
400
401void parport_put_port(struct parport *port)
402{
403 put_device(&port->bus_dev);
404}
405EXPORT_SYMBOL(parport_put_port);
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436struct parport *parport_register_port(unsigned long base, int irq, int dma,
437 struct parport_operations *ops)
438{
439 struct list_head *l;
440 struct parport *tmp;
441 int num;
442 int device;
443 char *name;
444 int ret;
445
446 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
447 if (!tmp)
448 return NULL;
449
450
451 tmp->base = base;
452 tmp->irq = irq;
453 tmp->dma = dma;
454 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
455 tmp->modes = 0;
456 INIT_LIST_HEAD(&tmp->list);
457 tmp->devices = tmp->cad = NULL;
458 tmp->flags = 0;
459 tmp->ops = ops;
460 tmp->physport = tmp;
461 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
462 rwlock_init(&tmp->cad_lock);
463 spin_lock_init(&tmp->waitlist_lock);
464 spin_lock_init(&tmp->pardevice_lock);
465 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
466 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
467 sema_init(&tmp->ieee1284.irq, 0);
468 tmp->spintime = parport_default_spintime;
469 atomic_set(&tmp->ref_count, 1);
470 INIT_LIST_HEAD(&tmp->full_list);
471
472 name = kmalloc(15, GFP_KERNEL);
473 if (!name) {
474 kfree(tmp);
475 return NULL;
476 }
477
478
479 spin_lock(&full_list_lock);
480 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
481 struct parport *p = list_entry(l, struct parport, full_list);
482 if (p->number != num)
483 break;
484 }
485 tmp->portnum = tmp->number = num;
486 list_add_tail(&tmp->full_list, l);
487 spin_unlock(&full_list_lock);
488
489
490
491
492 sprintf(name, "parport%d", tmp->portnum = tmp->number);
493 tmp->name = name;
494 tmp->bus_dev.bus = &parport_bus_type;
495 tmp->bus_dev.release = free_port;
496 dev_set_name(&tmp->bus_dev, name);
497 tmp->bus_dev.type = &parport_device_type;
498
499 for (device = 0; device < 5; device++)
500
501 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
502
503 tmp->waithead = tmp->waittail = NULL;
504
505 ret = device_register(&tmp->bus_dev);
506 if (ret) {
507 put_device(&tmp->bus_dev);
508 return NULL;
509 }
510
511 return tmp;
512}
513EXPORT_SYMBOL(parport_register_port);
514
515
516
517
518
519
520
521
522
523
524
525
526
527void parport_announce_port(struct parport *port)
528{
529 int i;
530
531#ifdef CONFIG_PARPORT_1284
532
533 parport_daisy_init(port);
534#endif
535
536 if (!port->dev)
537 printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
538 port->name);
539
540 parport_proc_register(port);
541 mutex_lock(®istration_lock);
542 spin_lock_irq(&parportlist_lock);
543 list_add_tail(&port->list, &portlist);
544 for (i = 1; i < 3; i++) {
545 struct parport *slave = port->slaves[i-1];
546 if (slave)
547 list_add_tail(&slave->list, &portlist);
548 }
549 spin_unlock_irq(&parportlist_lock);
550
551
552 attach_driver_chain(port);
553 for (i = 1; i < 3; i++) {
554 struct parport *slave = port->slaves[i-1];
555 if (slave)
556 attach_driver_chain(slave);
557 }
558 mutex_unlock(®istration_lock);
559}
560EXPORT_SYMBOL(parport_announce_port);
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581void parport_remove_port(struct parport *port)
582{
583 int i;
584
585 mutex_lock(®istration_lock);
586
587
588 detach_driver_chain(port);
589
590#ifdef CONFIG_PARPORT_1284
591
592 parport_daisy_fini(port);
593 for (i = 1; i < 3; i++) {
594 struct parport *slave = port->slaves[i-1];
595 if (!slave)
596 continue;
597 detach_driver_chain(slave);
598 parport_daisy_fini(slave);
599 }
600#endif
601
602 port->ops = &dead_ops;
603 spin_lock(&parportlist_lock);
604 list_del_init(&port->list);
605 for (i = 1; i < 3; i++) {
606 struct parport *slave = port->slaves[i-1];
607 if (slave)
608 list_del_init(&slave->list);
609 }
610 spin_unlock(&parportlist_lock);
611
612 mutex_unlock(®istration_lock);
613
614 parport_proc_unregister(port);
615
616 for (i = 1; i < 3; i++) {
617 struct parport *slave = port->slaves[i-1];
618 if (slave)
619 parport_put_port(slave);
620 }
621}
622EXPORT_SYMBOL(parport_remove_port);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693struct pardevice *
694parport_register_device(struct parport *port, const char *name,
695 int (*pf)(void *), void (*kf)(void *),
696 void (*irq_func)(void *),
697 int flags, void *handle)
698{
699 struct pardevice *tmp;
700
701 if (port->physport->flags & PARPORT_FLAG_EXCL) {
702
703 printk(KERN_DEBUG "%s: no more devices allowed\n",
704 port->name);
705 return NULL;
706 }
707
708 if (flags & PARPORT_DEV_LURK) {
709 if (!pf || !kf) {
710 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
711 return NULL;
712 }
713 }
714
715 if (flags & PARPORT_DEV_EXCL) {
716 if (port->physport->devices) {
717
718
719
720
721
722
723 pr_err("%s: cannot grant exclusive access for device %s\n",
724 port->name, name);
725 return NULL;
726 }
727 }
728
729
730
731
732
733
734
735 if (!try_module_get(port->ops->owner))
736 return NULL;
737
738 parport_get_port(port);
739
740 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
741 if (!tmp)
742 goto out;
743
744 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
745 if (!tmp->state)
746 goto out_free_pardevice;
747
748 tmp->name = name;
749 tmp->port = port;
750 tmp->daisy = -1;
751 tmp->preempt = pf;
752 tmp->wakeup = kf;
753 tmp->private = handle;
754 tmp->flags = flags;
755 tmp->irq_func = irq_func;
756 tmp->waiting = 0;
757 tmp->timeout = 5 * HZ;
758 tmp->devmodel = false;
759
760
761 tmp->prev = NULL;
762
763
764
765
766 spin_lock(&port->physport->pardevice_lock);
767
768 if (flags & PARPORT_DEV_EXCL) {
769 if (port->physport->devices) {
770 spin_unlock(&port->physport->pardevice_lock);
771 printk(KERN_DEBUG
772 "%s: cannot grant exclusive access for device %s\n",
773 port->name, name);
774 goto out_free_all;
775 }
776 port->flags |= PARPORT_FLAG_EXCL;
777 }
778
779 tmp->next = port->physport->devices;
780 wmb();
781
782
783
784
785 if (port->physport->devices)
786 port->physport->devices->prev = tmp;
787 port->physport->devices = tmp;
788 spin_unlock(&port->physport->pardevice_lock);
789
790 init_waitqueue_head(&tmp->wait_q);
791 tmp->timeslice = parport_default_timeslice;
792 tmp->waitnext = tmp->waitprev = NULL;
793
794
795
796
797
798 port->ops->init_state(tmp, tmp->state);
799 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
800 port->proc_device = tmp;
801 parport_device_proc_register(tmp);
802 }
803 return tmp;
804
805 out_free_all:
806 kfree(tmp->state);
807 out_free_pardevice:
808 kfree(tmp);
809 out:
810 parport_put_port(port);
811 module_put(port->ops->owner);
812
813 return NULL;
814}
815EXPORT_SYMBOL(parport_register_device);
816
817static void free_pardevice(struct device *dev)
818{
819 struct pardevice *par_dev = to_pardevice(dev);
820
821 kfree(par_dev->name);
822 kfree(par_dev);
823}
824
825struct pardevice *
826parport_register_dev_model(struct parport *port, const char *name,
827 const struct pardev_cb *par_dev_cb, int id)
828{
829 struct pardevice *par_dev;
830 int ret;
831 char *devname;
832
833 if (port->physport->flags & PARPORT_FLAG_EXCL) {
834
835 pr_err("%s: no more devices allowed\n", port->name);
836 return NULL;
837 }
838
839 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
840 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
841 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
842 port->name, name);
843 return NULL;
844 }
845 }
846
847 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
848 if (port->physport->devices) {
849
850
851
852
853
854
855 pr_err("%s: cannot grant exclusive access for device %s\n",
856 port->name, name);
857 return NULL;
858 }
859 }
860
861 if (!try_module_get(port->ops->owner))
862 return NULL;
863
864 parport_get_port(port);
865
866 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
867 if (!par_dev)
868 goto err_put_port;
869
870 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
871 if (!par_dev->state)
872 goto err_put_par_dev;
873
874 devname = kstrdup(name, GFP_KERNEL);
875 if (!devname)
876 goto err_free_par_dev;
877
878 par_dev->name = devname;
879 par_dev->port = port;
880 par_dev->daisy = -1;
881 par_dev->preempt = par_dev_cb->preempt;
882 par_dev->wakeup = par_dev_cb->wakeup;
883 par_dev->private = par_dev_cb->private;
884 par_dev->flags = par_dev_cb->flags;
885 par_dev->irq_func = par_dev_cb->irq_func;
886 par_dev->waiting = 0;
887 par_dev->timeout = 5 * HZ;
888
889 par_dev->dev.parent = &port->bus_dev;
890 par_dev->dev.bus = &parport_bus_type;
891 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
892 if (ret)
893 goto err_free_devname;
894 par_dev->dev.release = free_pardevice;
895 par_dev->devmodel = true;
896 ret = device_register(&par_dev->dev);
897 if (ret) {
898 kfree(par_dev->state);
899 put_device(&par_dev->dev);
900 goto err_put_port;
901 }
902
903
904 par_dev->prev = NULL;
905
906
907
908
909 spin_lock(&port->physport->pardevice_lock);
910
911 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
912 if (port->physport->devices) {
913 spin_unlock(&port->physport->pardevice_lock);
914 pr_debug("%s: cannot grant exclusive access for device %s\n",
915 port->name, name);
916 kfree(par_dev->state);
917 device_unregister(&par_dev->dev);
918 goto err_put_port;
919 }
920 port->flags |= PARPORT_FLAG_EXCL;
921 }
922
923 par_dev->next = port->physport->devices;
924 wmb();
925
926
927
928
929 if (port->physport->devices)
930 port->physport->devices->prev = par_dev;
931 port->physport->devices = par_dev;
932 spin_unlock(&port->physport->pardevice_lock);
933
934 init_waitqueue_head(&par_dev->wait_q);
935 par_dev->timeslice = parport_default_timeslice;
936 par_dev->waitnext = NULL;
937 par_dev->waitprev = NULL;
938
939
940
941
942
943 port->ops->init_state(par_dev, par_dev->state);
944 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
945 port->proc_device = par_dev;
946 parport_device_proc_register(par_dev);
947 }
948
949 return par_dev;
950
951err_free_devname:
952 kfree(devname);
953err_free_par_dev:
954 kfree(par_dev->state);
955err_put_par_dev:
956 if (!par_dev->devmodel)
957 kfree(par_dev);
958err_put_port:
959 parport_put_port(port);
960 module_put(port->ops->owner);
961
962 return NULL;
963}
964EXPORT_SYMBOL(parport_register_dev_model);
965
966
967
968
969
970
971
972
973void parport_unregister_device(struct pardevice *dev)
974{
975 struct parport *port;
976
977#ifdef PARPORT_PARANOID
978 if (!dev) {
979 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
980 return;
981 }
982#endif
983
984 port = dev->port->physport;
985
986 if (port->proc_device == dev) {
987 port->proc_device = NULL;
988 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
989 parport_device_proc_unregister(dev);
990 }
991
992 if (port->cad == dev) {
993 printk(KERN_DEBUG "%s: %s forgot to release port\n",
994 port->name, dev->name);
995 parport_release(dev);
996 }
997
998 spin_lock(&port->pardevice_lock);
999 if (dev->next)
1000 dev->next->prev = dev->prev;
1001 if (dev->prev)
1002 dev->prev->next = dev->next;
1003 else
1004 port->devices = dev->next;
1005
1006 if (dev->flags & PARPORT_DEV_EXCL)
1007 port->flags &= ~PARPORT_FLAG_EXCL;
1008
1009 spin_unlock(&port->pardevice_lock);
1010
1011
1012
1013
1014
1015 spin_lock_irq(&port->waitlist_lock);
1016 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
1017 if (dev->waitprev)
1018 dev->waitprev->waitnext = dev->waitnext;
1019 else
1020 port->waithead = dev->waitnext;
1021 if (dev->waitnext)
1022 dev->waitnext->waitprev = dev->waitprev;
1023 else
1024 port->waittail = dev->waitprev;
1025 }
1026 spin_unlock_irq(&port->waitlist_lock);
1027
1028 kfree(dev->state);
1029 if (dev->devmodel)
1030 device_unregister(&dev->dev);
1031 else
1032 kfree(dev);
1033
1034 module_put(port->ops->owner);
1035 parport_put_port(port);
1036}
1037EXPORT_SYMBOL(parport_unregister_device);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051struct parport *parport_find_number(int number)
1052{
1053 struct parport *port, *result = NULL;
1054
1055 if (list_empty(&portlist))
1056 get_lowlevel_driver();
1057
1058 spin_lock(&parportlist_lock);
1059 list_for_each_entry(port, &portlist, list) {
1060 if (port->number == number) {
1061 result = parport_get_port(port);
1062 break;
1063 }
1064 }
1065 spin_unlock(&parportlist_lock);
1066 return result;
1067}
1068EXPORT_SYMBOL(parport_find_number);
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082struct parport *parport_find_base(unsigned long base)
1083{
1084 struct parport *port, *result = NULL;
1085
1086 if (list_empty(&portlist))
1087 get_lowlevel_driver();
1088
1089 spin_lock(&parportlist_lock);
1090 list_for_each_entry(port, &portlist, list) {
1091 if (port->base == base) {
1092 result = parport_get_port(port);
1093 break;
1094 }
1095 }
1096 spin_unlock(&parportlist_lock);
1097 return result;
1098}
1099EXPORT_SYMBOL(parport_find_base);
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113int parport_claim(struct pardevice *dev)
1114{
1115 struct pardevice *oldcad;
1116 struct parport *port = dev->port->physport;
1117 unsigned long flags;
1118
1119 if (port->cad == dev) {
1120 printk(KERN_INFO "%s: %s already owner\n",
1121 dev->port->name,dev->name);
1122 return 0;
1123 }
1124
1125
1126 write_lock_irqsave(&port->cad_lock, flags);
1127 oldcad = port->cad;
1128 if (oldcad) {
1129 if (oldcad->preempt) {
1130 if (oldcad->preempt(oldcad->private))
1131 goto blocked;
1132 port->ops->save_state(port, dev->state);
1133 } else
1134 goto blocked;
1135
1136 if (port->cad != oldcad) {
1137
1138
1139
1140
1141 printk(KERN_WARNING
1142 "%s: %s released port when preempted!\n",
1143 port->name, oldcad->name);
1144 if (port->cad)
1145 goto blocked;
1146 }
1147 }
1148
1149
1150 if (dev->waiting & 1) {
1151 dev->waiting = 0;
1152
1153
1154 spin_lock_irq(&port->waitlist_lock);
1155 if (dev->waitprev)
1156 dev->waitprev->waitnext = dev->waitnext;
1157 else
1158 port->waithead = dev->waitnext;
1159 if (dev->waitnext)
1160 dev->waitnext->waitprev = dev->waitprev;
1161 else
1162 port->waittail = dev->waitprev;
1163 spin_unlock_irq(&port->waitlist_lock);
1164 dev->waitprev = dev->waitnext = NULL;
1165 }
1166
1167
1168 port->cad = dev;
1169
1170#ifdef CONFIG_PARPORT_1284
1171
1172 if (dev->port->muxport >= 0) {
1173
1174 port->muxsel = dev->port->muxport;
1175 }
1176
1177
1178 if (dev->daisy >= 0) {
1179
1180 if (!parport_daisy_select(port, dev->daisy,
1181 IEEE1284_MODE_COMPAT))
1182 port->daisy = dev->daisy;
1183 }
1184#endif
1185
1186
1187 port->ops->restore_state(port, dev->state);
1188 write_unlock_irqrestore(&port->cad_lock, flags);
1189 dev->time = jiffies;
1190 return 0;
1191
1192blocked:
1193
1194
1195
1196
1197
1198
1199
1200 if (dev->waiting & 2 || dev->wakeup) {
1201 spin_lock(&port->waitlist_lock);
1202 if (test_and_set_bit(0, &dev->waiting) == 0) {
1203
1204 dev->waitnext = NULL;
1205 dev->waitprev = port->waittail;
1206 if (port->waittail) {
1207 port->waittail->waitnext = dev;
1208 port->waittail = dev;
1209 } else
1210 port->waithead = port->waittail = dev;
1211 }
1212 spin_unlock(&port->waitlist_lock);
1213 }
1214 write_unlock_irqrestore(&port->cad_lock, flags);
1215 return -EAGAIN;
1216}
1217EXPORT_SYMBOL(parport_claim);
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229int parport_claim_or_block(struct pardevice *dev)
1230{
1231 int r;
1232
1233
1234
1235
1236
1237 dev->waiting = 2;
1238
1239
1240 r = parport_claim(dev);
1241 if (r == -EAGAIN) {
1242#ifdef PARPORT_DEBUG_SHARING
1243 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1244#endif
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 if (dev->waiting) {
1259 wait_event_interruptible(dev->wait_q,
1260 !dev->waiting);
1261 if (signal_pending(current))
1262 return -EINTR;
1263 r = 1;
1264 } else {
1265 r = 0;
1266#ifdef PARPORT_DEBUG_SHARING
1267 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1268 dev->name);
1269#endif
1270 }
1271
1272#ifdef PARPORT_DEBUG_SHARING
1273 if (dev->port->physport->cad != dev)
1274 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1275 dev->name, dev->port->physport->cad ?
1276 dev->port->physport->cad->name:"nobody");
1277#endif
1278 }
1279 dev->waiting = 0;
1280 return r;
1281}
1282EXPORT_SYMBOL(parport_claim_or_block);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293void parport_release(struct pardevice *dev)
1294{
1295 struct parport *port = dev->port->physport;
1296 struct pardevice *pd;
1297 unsigned long flags;
1298
1299
1300 write_lock_irqsave(&port->cad_lock, flags);
1301 if (port->cad != dev) {
1302 write_unlock_irqrestore(&port->cad_lock, flags);
1303 printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
1304 port->name, dev->name);
1305 return;
1306 }
1307
1308#ifdef CONFIG_PARPORT_1284
1309
1310 if (dev->port->muxport >= 0) {
1311
1312 port->muxsel = -1;
1313 }
1314
1315
1316 if (dev->daisy >= 0) {
1317 parport_daisy_deselect_all(port);
1318 port->daisy = -1;
1319 }
1320#endif
1321
1322 port->cad = NULL;
1323 write_unlock_irqrestore(&port->cad_lock, flags);
1324
1325
1326 port->ops->save_state(port, dev->state);
1327
1328
1329
1330
1331
1332
1333 for (pd = port->waithead; pd; pd = pd->waitnext) {
1334 if (pd->waiting & 2) {
1335 parport_claim(pd);
1336 if (waitqueue_active(&pd->wait_q))
1337 wake_up_interruptible(&pd->wait_q);
1338 return;
1339 } else if (pd->wakeup) {
1340 pd->wakeup(pd->private);
1341 if (dev->port->cad)
1342 return;
1343 } else {
1344 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1345 }
1346 }
1347
1348
1349
1350
1351
1352
1353 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1354 if (pd->wakeup && pd != dev)
1355 pd->wakeup(pd->private);
1356 }
1357}
1358EXPORT_SYMBOL(parport_release);
1359
1360irqreturn_t parport_irq_handler(int irq, void *dev_id)
1361{
1362 struct parport *port = dev_id;
1363
1364 parport_generic_irq(port);
1365
1366 return IRQ_HANDLED;
1367}
1368EXPORT_SYMBOL(parport_irq_handler);
1369
1370MODULE_LICENSE("GPL");
1371