1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef PARPORT_DEBUG_SHARING
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/threads.h>
23#include <linux/parport.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/ioport.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/sched.h>
31#include <linux/kmod.h>
32
33#include <linux/spinlock.h>
34#include <linux/mutex.h>
35#include <asm/irq.h>
36
37#undef PARPORT_PARANOID
38
39#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
40
41unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
42int parport_default_spintime = DEFAULT_SPIN_TIME;
43
44static LIST_HEAD(portlist);
45static DEFINE_SPINLOCK(parportlist_lock);
46
47
48static LIST_HEAD(all_ports);
49static DEFINE_SPINLOCK(full_list_lock);
50
51static LIST_HEAD(drivers);
52
53static DEFINE_MUTEX(registration_lock);
54
55
56static void dead_write_lines (struct parport *p, unsigned char b){}
57static unsigned char dead_read_lines (struct parport *p) { return 0; }
58static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
59 unsigned char c) { return 0; }
60static void dead_onearg (struct parport *p){}
61static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
62static void dead_state (struct parport *p, struct parport_state *s) { }
63static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
64{ return 0; }
65static size_t dead_read (struct parport *p, void *b, size_t l, int f)
66{ return 0; }
67static struct parport_operations dead_ops = {
68 .write_data = dead_write_lines,
69 .read_data = dead_read_lines,
70
71 .write_control = dead_write_lines,
72 .read_control = dead_read_lines,
73 .frob_control = dead_frob_lines,
74
75 .read_status = dead_read_lines,
76
77 .enable_irq = dead_onearg,
78 .disable_irq = dead_onearg,
79
80 .data_forward = dead_onearg,
81 .data_reverse = dead_onearg,
82
83 .init_state = dead_initstate,
84 .save_state = dead_state,
85 .restore_state = dead_state,
86
87 .epp_write_data = dead_write,
88 .epp_read_data = dead_read,
89 .epp_write_addr = dead_write,
90 .epp_read_addr = dead_read,
91
92 .ecp_write_data = dead_write,
93 .ecp_read_data = dead_read,
94 .ecp_write_addr = dead_write,
95
96 .compat_write_data = dead_write,
97 .nibble_read_data = dead_read,
98 .byte_read_data = dead_read,
99
100 .owner = NULL,
101};
102
103
104static void attach_driver_chain(struct parport *port)
105{
106
107 struct parport_driver *drv;
108 list_for_each_entry(drv, &drivers, list)
109 drv->attach(port);
110}
111
112
113static void detach_driver_chain(struct parport *port)
114{
115 struct parport_driver *drv;
116
117 list_for_each_entry(drv, &drivers, list)
118 drv->detach (port);
119}
120
121
122static void get_lowlevel_driver (void)
123{
124
125
126 request_module ("parport_lowlevel");
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154int parport_register_driver (struct parport_driver *drv)
155{
156 struct parport *port;
157
158 if (list_empty(&portlist))
159 get_lowlevel_driver ();
160
161 mutex_lock(®istration_lock);
162 list_for_each_entry(port, &portlist, list)
163 drv->attach(port);
164 list_add(&drv->list, &drivers);
165 mutex_unlock(®istration_lock);
166
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187void parport_unregister_driver (struct parport_driver *drv)
188{
189 struct parport *port;
190
191 mutex_lock(®istration_lock);
192 list_del_init(&drv->list);
193 list_for_each_entry(port, &portlist, list)
194 drv->detach(port);
195 mutex_unlock(®istration_lock);
196}
197
198static void free_port (struct parport *port)
199{
200 int d;
201 spin_lock(&full_list_lock);
202 list_del(&port->full_list);
203 spin_unlock(&full_list_lock);
204 for (d = 0; d < 5; d++) {
205 kfree(port->probe_info[d].class_name);
206 kfree(port->probe_info[d].mfr);
207 kfree(port->probe_info[d].model);
208 kfree(port->probe_info[d].cmdset);
209 kfree(port->probe_info[d].description);
210 }
211
212 kfree(port->name);
213 kfree(port);
214}
215
216
217
218
219
220
221
222
223
224struct parport *parport_get_port (struct parport *port)
225{
226 atomic_inc (&port->ref_count);
227 return port;
228}
229
230
231
232
233
234
235
236
237
238void parport_put_port (struct parport *port)
239{
240 if (atomic_dec_and_test (&port->ref_count))
241
242 free_port (port);
243
244 return;
245}
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276struct parport *parport_register_port(unsigned long base, int irq, int dma,
277 struct parport_operations *ops)
278{
279 struct list_head *l;
280 struct parport *tmp;
281 int num;
282 int device;
283 char *name;
284
285 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
286 if (!tmp) {
287 printk(KERN_WARNING "parport: memory squeeze\n");
288 return NULL;
289 }
290
291
292 tmp->base = base;
293 tmp->irq = irq;
294 tmp->dma = dma;
295 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
296 tmp->modes = 0;
297 INIT_LIST_HEAD(&tmp->list);
298 tmp->devices = tmp->cad = NULL;
299 tmp->flags = 0;
300 tmp->ops = ops;
301 tmp->physport = tmp;
302 memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
303 rwlock_init(&tmp->cad_lock);
304 spin_lock_init(&tmp->waitlist_lock);
305 spin_lock_init(&tmp->pardevice_lock);
306 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
307 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
308 sema_init(&tmp->ieee1284.irq, 0);
309 tmp->spintime = parport_default_spintime;
310 atomic_set (&tmp->ref_count, 1);
311 INIT_LIST_HEAD(&tmp->full_list);
312
313 name = kmalloc(15, GFP_KERNEL);
314 if (!name) {
315 printk(KERN_ERR "parport: memory squeeze\n");
316 kfree(tmp);
317 return NULL;
318 }
319
320
321 spin_lock(&full_list_lock);
322 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
323 struct parport *p = list_entry(l, struct parport, full_list);
324 if (p->number != num)
325 break;
326 }
327 tmp->portnum = tmp->number = num;
328 list_add_tail(&tmp->full_list, l);
329 spin_unlock(&full_list_lock);
330
331
332
333
334 sprintf(name, "parport%d", tmp->portnum = tmp->number);
335 tmp->name = name;
336
337 for (device = 0; device < 5; device++)
338
339 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
340
341 tmp->waithead = tmp->waittail = NULL;
342
343 return tmp;
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358void parport_announce_port (struct parport *port)
359{
360 int i;
361
362#ifdef CONFIG_PARPORT_1284
363
364 parport_daisy_init(port);
365#endif
366
367 if (!port->dev)
368 printk(KERN_WARNING "%s: fix this legacy "
369 "no-device port driver!\n",
370 port->name);
371
372 parport_proc_register(port);
373 mutex_lock(®istration_lock);
374 spin_lock_irq(&parportlist_lock);
375 list_add_tail(&port->list, &portlist);
376 for (i = 1; i < 3; i++) {
377 struct parport *slave = port->slaves[i-1];
378 if (slave)
379 list_add_tail(&slave->list, &portlist);
380 }
381 spin_unlock_irq(&parportlist_lock);
382
383
384 attach_driver_chain (port);
385 for (i = 1; i < 3; i++) {
386 struct parport *slave = port->slaves[i-1];
387 if (slave)
388 attach_driver_chain(slave);
389 }
390 mutex_unlock(®istration_lock);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412void parport_remove_port(struct parport *port)
413{
414 int i;
415
416 mutex_lock(®istration_lock);
417
418
419 detach_driver_chain (port);
420
421#ifdef CONFIG_PARPORT_1284
422
423 parport_daisy_fini(port);
424 for (i = 1; i < 3; i++) {
425 struct parport *slave = port->slaves[i-1];
426 if (!slave)
427 continue;
428 detach_driver_chain(slave);
429 parport_daisy_fini(slave);
430 }
431#endif
432
433 port->ops = &dead_ops;
434 spin_lock(&parportlist_lock);
435 list_del_init(&port->list);
436 for (i = 1; i < 3; i++) {
437 struct parport *slave = port->slaves[i-1];
438 if (slave)
439 list_del_init(&slave->list);
440 }
441 spin_unlock(&parportlist_lock);
442
443 mutex_unlock(®istration_lock);
444
445 parport_proc_unregister(port);
446
447 for (i = 1; i < 3; i++) {
448 struct parport *slave = port->slaves[i-1];
449 if (slave)
450 parport_put_port(slave);
451 }
452}
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct pardevice *
524parport_register_device(struct parport *port, const char *name,
525 int (*pf)(void *), void (*kf)(void *),
526 void (*irq_func)(void *),
527 int flags, void *handle)
528{
529 struct pardevice *tmp;
530
531 if (port->physport->flags & PARPORT_FLAG_EXCL) {
532
533 printk (KERN_DEBUG "%s: no more devices allowed\n",
534 port->name);
535 return NULL;
536 }
537
538 if (flags & PARPORT_DEV_LURK) {
539 if (!pf || !kf) {
540 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
541 return NULL;
542 }
543 }
544
545
546
547
548
549
550 if (!try_module_get(port->ops->owner)) {
551 return NULL;
552 }
553
554 parport_get_port (port);
555
556 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
557 if (tmp == NULL) {
558 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
559 goto out;
560 }
561
562 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
563 if (tmp->state == NULL) {
564 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
565 goto out_free_pardevice;
566 }
567
568 tmp->name = name;
569 tmp->port = port;
570 tmp->daisy = -1;
571 tmp->preempt = pf;
572 tmp->wakeup = kf;
573 tmp->private = handle;
574 tmp->flags = flags;
575 tmp->irq_func = irq_func;
576 tmp->waiting = 0;
577 tmp->timeout = 5 * HZ;
578
579
580 tmp->prev = NULL;
581
582
583
584
585 spin_lock(&port->physport->pardevice_lock);
586
587 if (flags & PARPORT_DEV_EXCL) {
588 if (port->physport->devices) {
589 spin_unlock (&port->physport->pardevice_lock);
590 printk (KERN_DEBUG
591 "%s: cannot grant exclusive access for "
592 "device %s\n", port->name, name);
593 goto out_free_all;
594 }
595 port->flags |= PARPORT_FLAG_EXCL;
596 }
597
598 tmp->next = port->physport->devices;
599 wmb();
600
601
602 if (port->physport->devices)
603 port->physport->devices->prev = tmp;
604 port->physport->devices = tmp;
605 spin_unlock(&port->physport->pardevice_lock);
606
607 init_waitqueue_head(&tmp->wait_q);
608 tmp->timeslice = parport_default_timeslice;
609 tmp->waitnext = tmp->waitprev = NULL;
610
611
612
613
614
615 port->ops->init_state(tmp, tmp->state);
616 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
617 port->proc_device = tmp;
618 parport_device_proc_register(tmp);
619 }
620 return tmp;
621
622 out_free_all:
623 kfree(tmp->state);
624 out_free_pardevice:
625 kfree(tmp);
626 out:
627 parport_put_port (port);
628 module_put(port->ops->owner);
629
630 return NULL;
631}
632
633
634
635
636
637
638
639
640void parport_unregister_device(struct pardevice *dev)
641{
642 struct parport *port;
643
644#ifdef PARPORT_PARANOID
645 if (dev == NULL) {
646 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
647 return;
648 }
649#endif
650
651 port = dev->port->physport;
652
653 if (port->proc_device == dev) {
654 port->proc_device = NULL;
655 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
656 parport_device_proc_unregister(dev);
657 }
658
659 if (port->cad == dev) {
660 printk(KERN_DEBUG "%s: %s forgot to release port\n",
661 port->name, dev->name);
662 parport_release (dev);
663 }
664
665 spin_lock(&port->pardevice_lock);
666 if (dev->next)
667 dev->next->prev = dev->prev;
668 if (dev->prev)
669 dev->prev->next = dev->next;
670 else
671 port->devices = dev->next;
672
673 if (dev->flags & PARPORT_DEV_EXCL)
674 port->flags &= ~PARPORT_FLAG_EXCL;
675
676 spin_unlock(&port->pardevice_lock);
677
678
679
680 spin_lock_irq(&port->waitlist_lock);
681 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
682 if (dev->waitprev)
683 dev->waitprev->waitnext = dev->waitnext;
684 else
685 port->waithead = dev->waitnext;
686 if (dev->waitnext)
687 dev->waitnext->waitprev = dev->waitprev;
688 else
689 port->waittail = dev->waitprev;
690 }
691 spin_unlock_irq(&port->waitlist_lock);
692
693 kfree(dev->state);
694 kfree(dev);
695
696 module_put(port->ops->owner);
697 parport_put_port (port);
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712struct parport *parport_find_number (int number)
713{
714 struct parport *port, *result = NULL;
715
716 if (list_empty(&portlist))
717 get_lowlevel_driver ();
718
719 spin_lock (&parportlist_lock);
720 list_for_each_entry(port, &portlist, list) {
721 if (port->number == number) {
722 result = parport_get_port (port);
723 break;
724 }
725 }
726 spin_unlock (&parportlist_lock);
727 return result;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742struct parport *parport_find_base (unsigned long base)
743{
744 struct parport *port, *result = NULL;
745
746 if (list_empty(&portlist))
747 get_lowlevel_driver ();
748
749 spin_lock (&parportlist_lock);
750 list_for_each_entry(port, &portlist, list) {
751 if (port->base == base) {
752 result = parport_get_port (port);
753 break;
754 }
755 }
756 spin_unlock (&parportlist_lock);
757 return result;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772int parport_claim(struct pardevice *dev)
773{
774 struct pardevice *oldcad;
775 struct parport *port = dev->port->physport;
776 unsigned long flags;
777
778 if (port->cad == dev) {
779 printk(KERN_INFO "%s: %s already owner\n",
780 dev->port->name,dev->name);
781 return 0;
782 }
783
784
785 write_lock_irqsave (&port->cad_lock, flags);
786 if ((oldcad = port->cad) != NULL) {
787 if (oldcad->preempt) {
788 if (oldcad->preempt(oldcad->private))
789 goto blocked;
790 port->ops->save_state(port, dev->state);
791 } else
792 goto blocked;
793
794 if (port->cad != oldcad) {
795
796
797 printk(KERN_WARNING
798 "%s: %s released port when preempted!\n",
799 port->name, oldcad->name);
800 if (port->cad)
801 goto blocked;
802 }
803 }
804
805
806 if (dev->waiting & 1) {
807 dev->waiting = 0;
808
809
810 spin_lock_irq (&port->waitlist_lock);
811 if (dev->waitprev)
812 dev->waitprev->waitnext = dev->waitnext;
813 else
814 port->waithead = dev->waitnext;
815 if (dev->waitnext)
816 dev->waitnext->waitprev = dev->waitprev;
817 else
818 port->waittail = dev->waitprev;
819 spin_unlock_irq (&port->waitlist_lock);
820 dev->waitprev = dev->waitnext = NULL;
821 }
822
823
824 port->cad = dev;
825
826#ifdef CONFIG_PARPORT_1284
827
828 if (dev->port->muxport >= 0) {
829
830 port->muxsel = dev->port->muxport;
831 }
832
833
834 if (dev->daisy >= 0) {
835
836 if (!parport_daisy_select (port, dev->daisy,
837 IEEE1284_MODE_COMPAT))
838 port->daisy = dev->daisy;
839 }
840#endif
841
842
843 port->ops->restore_state(port, dev->state);
844 write_unlock_irqrestore(&port->cad_lock, flags);
845 dev->time = jiffies;
846 return 0;
847
848blocked:
849
850
851
852
853
854 if (dev->waiting & 2 || dev->wakeup) {
855 spin_lock (&port->waitlist_lock);
856 if (test_and_set_bit(0, &dev->waiting) == 0) {
857
858 dev->waitnext = NULL;
859 dev->waitprev = port->waittail;
860 if (port->waittail) {
861 port->waittail->waitnext = dev;
862 port->waittail = dev;
863 } else
864 port->waithead = port->waittail = dev;
865 }
866 spin_unlock (&port->waitlist_lock);
867 }
868 write_unlock_irqrestore (&port->cad_lock, flags);
869 return -EAGAIN;
870}
871
872
873
874
875
876
877
878
879
880
881
882int parport_claim_or_block(struct pardevice *dev)
883{
884 int r;
885
886
887
888 dev->waiting = 2;
889
890
891 r = parport_claim(dev);
892 if (r == -EAGAIN) {
893#ifdef PARPORT_DEBUG_SHARING
894 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
895#endif
896
897
898
899
900
901
902
903
904
905
906
907 if (dev->waiting) {
908 wait_event_interruptible(dev->wait_q,
909 !dev->waiting);
910 if (signal_pending (current)) {
911 return -EINTR;
912 }
913 r = 1;
914 } else {
915 r = 0;
916#ifdef PARPORT_DEBUG_SHARING
917 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
918 dev->name);
919#endif
920 }
921
922#ifdef PARPORT_DEBUG_SHARING
923 if (dev->port->physport->cad != dev)
924 printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
925 "but %s owns port!\n", dev->name,
926 dev->port->physport->cad ?
927 dev->port->physport->cad->name:"nobody");
928#endif
929 }
930 dev->waiting = 0;
931 return r;
932}
933
934
935
936
937
938
939
940
941
942
943void parport_release(struct pardevice *dev)
944{
945 struct parport *port = dev->port->physport;
946 struct pardevice *pd;
947 unsigned long flags;
948
949
950 write_lock_irqsave(&port->cad_lock, flags);
951 if (port->cad != dev) {
952 write_unlock_irqrestore (&port->cad_lock, flags);
953 printk(KERN_WARNING "%s: %s tried to release parport "
954 "when not owner\n", port->name, dev->name);
955 return;
956 }
957
958#ifdef CONFIG_PARPORT_1284
959
960 if (dev->port->muxport >= 0) {
961
962 port->muxsel = -1;
963 }
964
965
966 if (dev->daisy >= 0) {
967 parport_daisy_deselect_all (port);
968 port->daisy = -1;
969 }
970#endif
971
972 port->cad = NULL;
973 write_unlock_irqrestore(&port->cad_lock, flags);
974
975
976 port->ops->save_state(port, dev->state);
977
978
979
980
981 for (pd = port->waithead; pd; pd = pd->waitnext) {
982 if (pd->waiting & 2) {
983 parport_claim(pd);
984 if (waitqueue_active(&pd->wait_q))
985 wake_up_interruptible(&pd->wait_q);
986 return;
987 } else if (pd->wakeup) {
988 pd->wakeup(pd->private);
989 if (dev->port->cad)
990 return;
991 } else {
992 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
993 }
994 }
995
996
997
998
999 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
1000 if (pd->wakeup && pd != dev)
1001 pd->wakeup(pd->private);
1002 }
1003}
1004
1005irqreturn_t parport_irq_handler(int irq, void *dev_id)
1006{
1007 struct parport *port = dev_id;
1008
1009 parport_generic_irq(port);
1010
1011 return IRQ_HANDLED;
1012}
1013
1014
1015
1016EXPORT_SYMBOL(parport_claim);
1017EXPORT_SYMBOL(parport_claim_or_block);
1018EXPORT_SYMBOL(parport_release);
1019EXPORT_SYMBOL(parport_register_port);
1020EXPORT_SYMBOL(parport_announce_port);
1021EXPORT_SYMBOL(parport_remove_port);
1022EXPORT_SYMBOL(parport_register_driver);
1023EXPORT_SYMBOL(parport_unregister_driver);
1024EXPORT_SYMBOL(parport_register_device);
1025EXPORT_SYMBOL(parport_unregister_device);
1026EXPORT_SYMBOL(parport_get_port);
1027EXPORT_SYMBOL(parport_put_port);
1028EXPORT_SYMBOL(parport_find_number);
1029EXPORT_SYMBOL(parport_find_base);
1030EXPORT_SYMBOL(parport_irq_handler);
1031
1032MODULE_LICENSE("GPL");
1033