1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched.h>
31#include <linux/kmod.h>
32
33#include <linux/spinlock.h>
34#include <linux/mutex.h>
35#include <asm/irq.h>
36
37#undef PARPORT_PARANOID
38
39#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
40
41unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
42int parport_default_spintime = DEFAULT_SPIN_TIME;
43
44static LIST_HEAD(portlist);
45static DEFINE_SPINLOCK(parportlist_lock);
46
47
48static LIST_HEAD(all_ports);
49static DEFINE_SPINLOCK(full_list_lock);
50
51static LIST_HEAD(drivers);
52
53static DEFINE_MUTEX(registration_lock);
54
55
56static void dead_write_lines (struct parport *p, unsigned char b){}
57static unsigned char dead_read_lines (struct parport *p) { return 0; }
58static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
59 unsigned char c) { return 0; }
60static void dead_onearg (struct parport *p){}
61static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
62static void dead_state (struct parport *p, struct parport_state *s) { }
63static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
64{ return 0; }
65static size_t dead_read (struct parport *p, void *b, size_t l, int f)
66{ return 0; }
67static struct parport_operations dead_ops = {
68 .write_data = dead_write_lines,
69 .read_data = dead_read_lines,
70
71 .write_control = dead_write_lines,
72 .read_control = dead_read_lines,
73 .frob_control = dead_frob_lines,
74
75 .read_status = dead_read_lines,
76
77 .enable_irq = dead_onearg,
78 .disable_irq = dead_onearg,
79
80 .data_forward = dead_onearg,
81 .data_reverse = dead_onearg,
82
83 .init_state = dead_initstate,
84 .save_state = dead_state,
85 .restore_state = dead_state,
86
87 .epp_write_data = dead_write,
88 .epp_read_data = dead_read,
89 .epp_write_addr = dead_write,
90 .epp_read_addr = dead_read,
91
92 .ecp_write_data = dead_write,
93 .ecp_read_data = dead_read,
94 .ecp_write_addr = dead_write,
95
96 .compat_write_data = dead_write,
97 .nibble_read_data = dead_read,
98 .byte_read_data = dead_read,
99
100 .owner = NULL,
101};
102
103
104static void attach_driver_chain(struct parport *port)
105{
106
107 struct parport_driver *drv;
108 list_for_each_entry(drv, &drivers, list)
109 drv->attach(port);
110}
111
112
113static void detach_driver_chain(struct parport *port)
114{
115 struct parport_driver *drv;
116
117 list_for_each_entry(drv, &drivers, list)
118 drv->detach (port);
119}
120
121
122static void get_lowlevel_driver (void)
123{
124
125
126 request_module ("parport_lowlevel");
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154int parport_register_driver (struct parport_driver *drv)
155{
156 struct parport *port;
157
158 if (list_empty(&portlist))
159 get_lowlevel_driver ();
160
161 mutex_lock(®istration_lock);
162 list_for_each_entry(port, &portlist, list)
163 drv->attach(port);
164 list_add(&drv->list, &drivers);
165 mutex_unlock(®istration_lock);
166
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187void parport_unregister_driver (struct parport_driver *drv)
188{
189 struct parport *port;
190
191 mutex_lock(®istration_lock);
192 list_del_init(&drv->list);
193 list_for_each_entry(port, &portlist, list)
194 drv->detach(port);
195 mutex_unlock(®istration_lock);
196}
197
198static void free_port (struct parport *port)
199{
200 int d;
201 spin_lock(&full_list_lock);
202 list_del(&port->full_list);
203 spin_unlock(&full_list_lock);
204 for (d = 0; d < 5; d++) {
205 kfree(port->probe_info[d].class_name);
206 kfree(port->probe_info[d].mfr);
207 kfree(port->probe_info[d].model);
208 kfree(port->probe_info[d].cmdset);
209 kfree(port->probe_info[d].description);
210 }
211
212 kfree(port->name);
213 kfree(port);
214}
215
216
217
218
219
220
221
222
223
224struct parport *parport_get_port (struct parport *port)
225{
226 atomic_inc (&port->ref_count);
227 return port;
228}
229
230
231
232
233
234
235
236
237
238void parport_put_port (struct parport *port)
239{
240 if (atomic_dec_and_test (&port->ref_count))
241
242 free_port (port);
243
244 return;
245}
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276struct parport *parport_register_port(unsigned long base, int irq, int dma,
277 struct parport_operations *ops)
278{
279 struct list_head *l;
280 struct parport *tmp;
281 int num;
282 int device;
283 char *name;
284
285 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
286 if (!tmp) {
287 printk(KERN_WARNING "parport: memory squeeze\n");
288 return NULL;
289 }
290
291
292 memset(tmp, 0, sizeof(struct parport));
293 tmp->base = base;
294 tmp->irq = irq;
295 tmp->dma = dma;
296 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
297 tmp->modes = 0;
298 INIT_LIST_HEAD(&tmp->list);
299 tmp->devices = tmp->cad = NULL;
300 tmp->flags = 0;
301 tmp->ops = ops;
302 tmp->physport = tmp;
303 memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
304 rwlock_init(&tmp->cad_lock);
305 spin_lock_init(&tmp->waitlist_lock);
306 spin_lock_init(&tmp->pardevice_lock);
307 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
308 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
309 sema_init(&tmp->ieee1284.irq, 0);
310 tmp->spintime = parport_default_spintime;
311 atomic_set (&tmp->ref_count, 1);
312 INIT_LIST_HEAD(&tmp->full_list);
313
314 name = kmalloc(15, GFP_KERNEL);
315 if (!name) {
316 printk(KERN_ERR "parport: memory squeeze\n");
317 kfree(tmp);
318 return NULL;
319 }
320
321
322 spin_lock(&full_list_lock);
323 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
324 struct parport *p = list_entry(l, struct parport, full_list);
325 if (p->number != num)
326 break;
327 }
328 tmp->portnum = tmp->number = num;
329 list_add_tail(&tmp->full_list, l);
330 spin_unlock(&full_list_lock);
331
332
333
334
335 sprintf(name, "parport%d", tmp->portnum = tmp->number);
336 tmp->name = name;
337
338 for (device = 0; device < 5; device++)
339
340 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
341
342 tmp->waithead = tmp->waittail = NULL;
343
344 return tmp;
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359void parport_announce_port (struct parport *port)
360{
361 int i;
362
363#ifdef CONFIG_PARPORT_1284
364
365 parport_daisy_init(port);
366#endif
367
368 if (!port->dev)
369 printk(KERN_WARNING "%s: fix this legacy "
370 "no-device port driver!\n",
371 port->name);
372
373 parport_proc_register(port);
374 mutex_lock(®istration_lock);
375 spin_lock_irq(&parportlist_lock);
376 list_add_tail(&port->list, &portlist);
377 for (i = 1; i < 3; i++) {
378 struct parport *slave = port->slaves[i-1];
379 if (slave)
380 list_add_tail(&slave->list, &portlist);
381 }
382 spin_unlock_irq(&parportlist_lock);
383
384
385 attach_driver_chain (port);
386 for (i = 1; i < 3; i++) {
387 struct parport *slave = port->slaves[i-1];
388 if (slave)
389 attach_driver_chain(slave);
390 }
391 mutex_unlock(®istration_lock);
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413void parport_remove_port(struct parport *port)
414{
415 int i;
416
417 mutex_lock(®istration_lock);
418
419
420 detach_driver_chain (port);
421
422#ifdef CONFIG_PARPORT_1284
423
424 parport_daisy_fini(port);
425 for (i = 1; i < 3; i++) {
426 struct parport *slave = port->slaves[i-1];
427 if (!slave)
428 continue;
429 detach_driver_chain(slave);
430 parport_daisy_fini(slave);
431 }
432#endif
433
434 port->ops = &dead_ops;
435 spin_lock(&parportlist_lock);
436 list_del_init(&port->list);
437 for (i = 1; i < 3; i++) {
438 struct parport *slave = port->slaves[i-1];
439 if (slave)
440 list_del_init(&slave->list);
441 }
442 spin_unlock(&parportlist_lock);
443
444 mutex_unlock(®istration_lock);
445
446 parport_proc_unregister(port);
447
448 for (i = 1; i < 3; i++) {
449 struct parport *slave = port->slaves[i-1];
450 if (slave)
451 parport_put_port(slave);
452 }
453}
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524struct pardevice *
525parport_register_device(struct parport *port, const char *name,
526 int (*pf)(void *), void (*kf)(void *),
527 void (*irq_func)(void *),
528 int flags, void *handle)
529{
530 struct pardevice *tmp;
531
532 if (port->physport->flags & PARPORT_FLAG_EXCL) {
533
534 printk (KERN_DEBUG "%s: no more devices allowed\n",
535 port->name);
536 return NULL;
537 }
538
539 if (flags & PARPORT_DEV_LURK) {
540 if (!pf || !kf) {
541 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
542 return NULL;
543 }
544 }
545
546
547
548
549
550
551 if (!try_module_get(port->ops->owner)) {
552 return NULL;
553 }
554
555 parport_get_port (port);
556
557 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
558 if (tmp == NULL) {
559 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
560 goto out;
561 }
562
563 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
564 if (tmp->state == NULL) {
565 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
566 goto out_free_pardevice;
567 }
568
569 tmp->name = name;
570 tmp->port = port;
571 tmp->daisy = -1;
572 tmp->preempt = pf;
573 tmp->wakeup = kf;
574 tmp->private = handle;
575 tmp->flags = flags;
576 tmp->irq_func = irq_func;
577 tmp->waiting = 0;
578 tmp->timeout = 5 * HZ;
579
580
581 tmp->prev = NULL;
582
583
584
585
586 spin_lock(&port->physport->pardevice_lock);
587
588 if (flags & PARPORT_DEV_EXCL) {
589 if (port->physport->devices) {
590 spin_unlock (&port->physport->pardevice_lock);
591 printk (KERN_DEBUG
592 "%s: cannot grant exclusive access for "
593 "device %s\n", port->name, name);
594 goto out_free_all;
595 }
596 port->flags |= PARPORT_FLAG_EXCL;
597 }
598
599 tmp->next = port->physport->devices;
600 wmb();
601
602
603 if (port->physport->devices)
604 port->physport->devices->prev = tmp;
605 port->physport->devices = tmp;
606 spin_unlock(&port->physport->pardevice_lock);
607
608 init_waitqueue_head(&tmp->wait_q);
609 tmp->timeslice = parport_default_timeslice;
610 tmp->waitnext = tmp->waitprev = NULL;
611
612
613
614
615
616 port->ops->init_state(tmp, tmp->state);
617 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
618 port->proc_device = tmp;
619 parport_device_proc_register(tmp);
620 }
621 return tmp;
622
623 out_free_all:
624 kfree(tmp->state);
625 out_free_pardevice:
626 kfree(tmp);
627 out:
628 parport_put_port (port);
629 module_put(port->ops->owner);
630
631 return NULL;
632}
633
634
635
636
637
638
639
640
641void parport_unregister_device(struct pardevice *dev)
642{
643 struct parport *port;
644
645#ifdef PARPORT_PARANOID
646 if (dev == NULL) {
647 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
648 return;
649 }
650#endif
651
652 port = dev->port->physport;
653
654 if (port->proc_device == dev) {
655 port->proc_device = NULL;
656 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
657 parport_device_proc_unregister(dev);
658 }
659
660 if (port->cad == dev) {
661 printk(KERN_DEBUG "%s: %s forgot to release port\n",
662 port->name, dev->name);
663 parport_release (dev);
664 }
665
666 spin_lock(&port->pardevice_lock);
667 if (dev->next)
668 dev->next->prev = dev->prev;
669 if (dev->prev)
670 dev->prev->next = dev->next;
671 else
672 port->devices = dev->next;
673
674 if (dev->flags & PARPORT_DEV_EXCL)
675 port->flags &= ~PARPORT_FLAG_EXCL;
676
677 spin_unlock(&port->pardevice_lock);
678
679
680
681 spin_lock_irq(&port->waitlist_lock);
682 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
683 if (dev->waitprev)
684 dev->waitprev->waitnext = dev->waitnext;
685 else
686 port->waithead = dev->waitnext;
687 if (dev->waitnext)
688 dev->waitnext->waitprev = dev->waitprev;
689 else
690 port->waittail = dev->waitprev;
691 }
692 spin_unlock_irq(&port->waitlist_lock);
693
694 kfree(dev->state);
695 kfree(dev);
696
697 module_put(port->ops->owner);
698 parport_put_port (port);
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713struct parport *parport_find_number (int number)
714{
715 struct parport *port, *result = NULL;
716
717 if (list_empty(&portlist))
718 get_lowlevel_driver ();
719
720 spin_lock (&parportlist_lock);
721 list_for_each_entry(port, &portlist, list) {
722 if (port->number == number) {
723 result = parport_get_port (port);
724 break;
725 }
726 }
727 spin_unlock (&parportlist_lock);
728 return result;
729}
730
731
732
733
734
735
736
737
738
739
740
741
742
743struct parport *parport_find_base (unsigned long base)
744{
745 struct parport *port, *result = NULL;
746
747 if (list_empty(&portlist))
748 get_lowlevel_driver ();
749
750 spin_lock (&parportlist_lock);
751 list_for_each_entry(port, &portlist, list) {
752 if (port->base == base) {
753 result = parport_get_port (port);
754 break;
755 }
756 }
757 spin_unlock (&parportlist_lock);
758 return result;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773int parport_claim(struct pardevice *dev)
774{
775 struct pardevice *oldcad;
776 struct parport *port = dev->port->physport;
777 unsigned long flags;
778
779 if (port->cad == dev) {
780 printk(KERN_INFO "%s: %s already owner\n",
781 dev->port->name,dev->name);
782 return 0;
783 }
784
785
786 write_lock_irqsave (&port->cad_lock, flags);
787 if ((oldcad = port->cad) != NULL) {
788 if (oldcad->preempt) {
789 if (oldcad->preempt(oldcad->private))
790 goto blocked;
791 port->ops->save_state(port, dev->state);
792 } else
793 goto blocked;
794
795 if (port->cad != oldcad) {
796
797
798 printk(KERN_WARNING
799 "%s: %s released port when preempted!\n",
800 port->name, oldcad->name);
801 if (port->cad)
802 goto blocked;
803 }
804 }
805
806
807 if (dev->waiting & 1) {
808 dev->waiting = 0;
809
810
811 spin_lock_irq (&port->waitlist_lock);
812 if (dev->waitprev)
813 dev->waitprev->waitnext = dev->waitnext;
814 else
815 port->waithead = dev->waitnext;
816 if (dev->waitnext)
817 dev->waitnext->waitprev = dev->waitprev;
818 else
819 port->waittail = dev->waitprev;
820 spin_unlock_irq (&port->waitlist_lock);
821 dev->waitprev = dev->waitnext = NULL;
822 }
823
824
825 port->cad = dev;
826
827#ifdef CONFIG_PARPORT_1284
828
829 if (dev->port->muxport >= 0) {
830
831 port->muxsel = dev->port->muxport;
832 }
833
834
835 if (dev->daisy >= 0) {
836
837 if (!parport_daisy_select (port, dev->daisy,
838 IEEE1284_MODE_COMPAT))
839 port->daisy = dev->daisy;
840 }
841#endif
842
843
844 port->ops->restore_state(port, dev->state);
845 write_unlock_irqrestore(&port->cad_lock, flags);
846 dev->time = jiffies;
847 return 0;
848
849blocked:
850
851
852
853
854
855 if (dev->waiting & 2 || dev->wakeup) {
856 spin_lock (&port->waitlist_lock);
857 if (test_and_set_bit(0, &dev->waiting) == 0) {
858
859 dev->waitnext = NULL;
860 dev->waitprev = port->waittail;
861 if (port->waittail) {
862 port->waittail->waitnext = dev;
863 port->waittail = dev;
864 } else
865 port->waithead = port->waittail = dev;
866 }
867 spin_unlock (&port->waitlist_lock);
868 }
869 write_unlock_irqrestore (&port->cad_lock, flags);
870 return -EAGAIN;
871}
872
873
874
875
876
877
878
879
880
881
882
883int parport_claim_or_block(struct pardevice *dev)
884{
885 int r;
886
887
888
889 dev->waiting = 2;
890
891
892 r = parport_claim(dev);
893 if (r == -EAGAIN) {
894#ifdef PARPORT_DEBUG_SHARING
895 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
896#endif
897
898
899
900
901
902
903
904
905
906
907
908 if (dev->waiting) {
909 interruptible_sleep_on (&dev->wait_q);
910 if (signal_pending (current)) {
911 return -EINTR;
912 }
913 r = 1;
914 } else {
915 r = 0;
916#ifdef PARPORT_DEBUG_SHARING
917 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
918 dev->name);
919#endif
920 }
921
922#ifdef PARPORT_DEBUG_SHARING
923 if (dev->port->physport->cad != dev)
924 printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
925 "but %s owns port!\n", dev->name,
926 dev->port->physport->cad ?
927 dev->port->physport->cad->name:"nobody");
928#endif
929 }
930 dev->waiting = 0;
931 return r;
932}
933
934
935
936
937
938
939
940
941
942
943void parport_release(struct pardevice *dev)
944{
945 struct parport *port = dev->port->physport;
946 struct pardevice *pd;
947 unsigned long flags;
948
949
950 write_lock_irqsave(&port->cad_lock, flags);
951 if (port->cad != dev) {
952 write_unlock_irqrestore (&port->cad_lock, flags);
953 printk(KERN_WARNING "%s: %s tried to release parport "
954 "when not owner\n", port->name, dev->name);
955 return;
956 }
957
958#ifdef CONFIG_PARPORT_1284
959
960 if (dev->port->muxport >= 0) {
961
962 port->muxsel = -1;
963 }
964
965
966 if (dev->daisy >= 0) {
967 parport_daisy_deselect_all (port);
968 port->daisy = -1;
969 }
970#endif
971
972 port->cad = NULL;
973 write_unlock_irqrestore(&port->cad_lock, flags);
974
975
976 port->ops->save_state(port, dev->state);
977
978
979
980
981 for (pd = port->waithead; pd; pd = pd->waitnext) {
982 if (pd->waiting & 2) {
983 parport_claim(pd);
984 if (waitqueue_active(&pd->wait_q))
985 wake_up_interruptible(&pd->wait_q);
986 return;
987 } else if (pd->wakeup) {
988 pd->wakeup(pd->private);
989 if (dev->port->cad)
990 return;
991 } else {
992 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
993 }
994 }
995
996
997
998
999 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
1000 if (pd->wakeup && pd != dev)
1001 pd->wakeup(pd->private);
1002 }
1003}
1004
1005irqreturn_t parport_irq_handler(int irq, void *dev_id)
1006{
1007 struct parport *port = dev_id;
1008
1009 parport_generic_irq(port);
1010
1011 return IRQ_HANDLED;
1012}
1013
1014
1015
1016EXPORT_SYMBOL(parport_claim);
1017EXPORT_SYMBOL(parport_claim_or_block);
1018EXPORT_SYMBOL(parport_release);
1019EXPORT_SYMBOL(parport_register_port);
1020EXPORT_SYMBOL(parport_announce_port);
1021EXPORT_SYMBOL(parport_remove_port);
1022EXPORT_SYMBOL(parport_register_driver);
1023EXPORT_SYMBOL(parport_unregister_driver);
1024EXPORT_SYMBOL(parport_register_device);
1025EXPORT_SYMBOL(parport_unregister_device);
1026EXPORT_SYMBOL(parport_get_port);
1027EXPORT_SYMBOL(parport_put_port);
1028EXPORT_SYMBOL(parport_find_number);
1029EXPORT_SYMBOL(parport_find_base);
1030EXPORT_SYMBOL(parport_irq_handler);
1031
1032MODULE_LICENSE("GPL");
1033