1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/pci.h>
22#include <linux/device.h>
23#include <linux/netdevice.h>
24#include <linux/slab.h>
25
26#include <asm/irq.h>
27
28#include <pcmcia/ss.h>
29#include <pcmcia/cistpl.h>
30#include <pcmcia/cisreg.h>
31#include <pcmcia/ds.h>
32
33#include "cs_internal.h"
34
35
36
37static int io_speed;
38module_param(io_speed, int, 0444);
39
40
41int pcmcia_validate_mem(struct pcmcia_socket *s)
42{
43 if (s->resource_ops->validate_mem)
44 return s->resource_ops->validate_mem(s);
45
46 return 0;
47}
48
49struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
50 int low, struct pcmcia_socket *s)
51{
52 if (s->resource_ops->find_mem)
53 return s->resource_ops->find_mem(base, num, align, low, s);
54 return NULL;
55}
56
57
58
59
60
61
62
63
64static void release_io_space(struct pcmcia_socket *s, struct resource *res)
65{
66 resource_size_t num = resource_size(res);
67 int i;
68
69 dev_dbg(&s->dev, "release_io_space for %pR\n", res);
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 if (!s->io[i].res)
73 continue;
74 if ((s->io[i].res->start <= res->start) &&
75 (s->io[i].res->end >= res->end)) {
76 s->io[i].InUse -= num;
77 if (res->parent)
78 release_resource(res);
79 res->start = res->end = 0;
80 res->flags = IORESOURCE_IO;
81
82 if (s->io[i].InUse == 0) {
83 release_resource(s->io[i].res);
84 kfree(s->io[i].res);
85 s->io[i].res = NULL;
86 }
87 }
88 }
89}
90
91
92
93
94
95
96
97
98
99
100static int alloc_io_space(struct pcmcia_socket *s, struct resource *res,
101 unsigned int lines)
102{
103 unsigned int align;
104 unsigned int base = res->start;
105 unsigned int num = res->end;
106 int ret;
107
108 res->flags |= IORESOURCE_IO;
109
110 dev_dbg(&s->dev, "alloc_io_space request for %pR, %d lines\n",
111 res, lines);
112
113 align = base ? (lines ? 1<<lines : 0) : 1;
114 if (align && (align < num)) {
115 if (base) {
116 dev_dbg(&s->dev, "odd IO request\n");
117 align = 0;
118 } else
119 while (align && (align < num))
120 align <<= 1;
121 }
122 if (base & ~(align-1)) {
123 dev_dbg(&s->dev, "odd IO request\n");
124 align = 0;
125 }
126
127 ret = s->resource_ops->find_io(s, res->flags, &base, num, align,
128 &res->parent);
129 if (ret) {
130 dev_dbg(&s->dev, "alloc_io_space request failed (%d)\n", ret);
131 return -EINVAL;
132 }
133
134 res->start = base;
135 res->end = res->start + num - 1;
136
137 if (res->parent) {
138 ret = request_resource(res->parent, res);
139 if (ret) {
140 dev_warn(&s->dev,
141 "request_resource %pR failed: %d\n", res, ret);
142 res->parent = NULL;
143 release_io_space(s, res);
144 }
145 }
146 dev_dbg(&s->dev, "alloc_io_space request result %d: %pR\n", ret, res);
147 return ret;
148}
149
150
151
152
153
154
155
156
157
158
159static int pcmcia_access_config(struct pcmcia_device *p_dev,
160 off_t where, u8 *val,
161 int (*accessf) (struct pcmcia_socket *s,
162 int attr, unsigned int addr,
163 unsigned int len, void *ptr))
164{
165 struct pcmcia_socket *s;
166 config_t *c;
167 int addr;
168 int ret = 0;
169
170 s = p_dev->socket;
171
172 mutex_lock(&s->ops_mutex);
173 c = p_dev->function_config;
174
175 if (!(c->state & CONFIG_LOCKED)) {
176 dev_dbg(&p_dev->dev, "Configuration isn't locked\n");
177 mutex_unlock(&s->ops_mutex);
178 return -EACCES;
179 }
180
181 addr = (p_dev->config_base + where) >> 1;
182
183 ret = accessf(s, 1, addr, 1, val);
184
185 mutex_unlock(&s->ops_mutex);
186
187 return ret;
188}
189
190
191
192
193
194
195
196
197int pcmcia_read_config_byte(struct pcmcia_device *p_dev, off_t where, u8 *val)
198{
199 return pcmcia_access_config(p_dev, where, val, pcmcia_read_cis_mem);
200}
201EXPORT_SYMBOL(pcmcia_read_config_byte);
202
203
204
205
206
207
208
209
210int pcmcia_write_config_byte(struct pcmcia_device *p_dev, off_t where, u8 val)
211{
212 return pcmcia_access_config(p_dev, where, &val, pcmcia_write_cis_mem);
213}
214EXPORT_SYMBOL(pcmcia_write_config_byte);
215
216
217
218
219
220
221
222
223
224
225
226
227int pcmcia_map_mem_page(struct pcmcia_device *p_dev, struct resource *res,
228 unsigned int offset)
229{
230 struct pcmcia_socket *s = p_dev->socket;
231 unsigned int w;
232 int ret;
233
234 w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
235 if (w >= MAX_WIN)
236 return -EINVAL;
237
238 mutex_lock(&s->ops_mutex);
239 s->win[w].card_start = offset;
240 ret = s->ops->set_mem_map(s, &s->win[w]);
241 if (ret)
242 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
243 mutex_unlock(&s->ops_mutex);
244 return ret;
245}
246EXPORT_SYMBOL(pcmcia_map_mem_page);
247
248
249
250
251
252
253
254
255
256
257int pcmcia_fixup_iowidth(struct pcmcia_device *p_dev)
258{
259 struct pcmcia_socket *s = p_dev->socket;
260 pccard_io_map io_off = { 0, 0, 0, 0, 1 };
261 pccard_io_map io_on;
262 int i, ret = 0;
263
264 mutex_lock(&s->ops_mutex);
265
266 dev_dbg(&p_dev->dev, "fixup iowidth to 8bit\n");
267
268 if (!(s->state & SOCKET_PRESENT) ||
269 !(p_dev->function_config->state & CONFIG_LOCKED)) {
270 dev_dbg(&p_dev->dev, "No card? Config not locked?\n");
271 ret = -EACCES;
272 goto unlock;
273 }
274
275 io_on.speed = io_speed;
276 for (i = 0; i < MAX_IO_WIN; i++) {
277 if (!s->io[i].res)
278 continue;
279 io_off.map = i;
280 io_on.map = i;
281
282 io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8;
283 io_on.start = s->io[i].res->start;
284 io_on.stop = s->io[i].res->end;
285
286 s->ops->set_io_map(s, &io_off);
287 mdelay(40);
288 s->ops->set_io_map(s, &io_on);
289 }
290unlock:
291 mutex_unlock(&s->ops_mutex);
292
293 return ret;
294}
295EXPORT_SYMBOL(pcmcia_fixup_iowidth);
296
297
298
299
300
301
302
303
304
305
306
307int pcmcia_fixup_vpp(struct pcmcia_device *p_dev, unsigned char new_vpp)
308{
309 struct pcmcia_socket *s = p_dev->socket;
310 int ret = 0;
311
312 mutex_lock(&s->ops_mutex);
313
314 dev_dbg(&p_dev->dev, "fixup Vpp to %d\n", new_vpp);
315
316 if (!(s->state & SOCKET_PRESENT) ||
317 !(p_dev->function_config->state & CONFIG_LOCKED)) {
318 dev_dbg(&p_dev->dev, "No card? Config not locked?\n");
319 ret = -EACCES;
320 goto unlock;
321 }
322
323 s->socket.Vpp = new_vpp;
324 if (s->ops->set_socket(s, &s->socket)) {
325 dev_warn(&p_dev->dev, "Unable to set VPP\n");
326 ret = -EIO;
327 goto unlock;
328 }
329 p_dev->vpp = new_vpp;
330
331unlock:
332 mutex_unlock(&s->ops_mutex);
333
334 return ret;
335}
336EXPORT_SYMBOL(pcmcia_fixup_vpp);
337
338
339
340
341
342
343
344
345
346
347
348
349
350int pcmcia_release_configuration(struct pcmcia_device *p_dev)
351{
352 pccard_io_map io = { 0, 0, 0, 0, 1 };
353 struct pcmcia_socket *s = p_dev->socket;
354 config_t *c;
355 int i;
356
357 mutex_lock(&s->ops_mutex);
358 c = p_dev->function_config;
359 if (p_dev->_locked) {
360 p_dev->_locked = 0;
361 if (--(s->lock_count) == 0) {
362 s->socket.flags = SS_OUTPUT_ENA;
363 s->socket.Vpp = 0;
364 s->socket.io_irq = 0;
365 s->ops->set_socket(s, &s->socket);
366 }
367 }
368 if (c->state & CONFIG_LOCKED) {
369 c->state &= ~CONFIG_LOCKED;
370 if (c->state & CONFIG_IO_REQ)
371 for (i = 0; i < MAX_IO_WIN; i++) {
372 if (!s->io[i].res)
373 continue;
374 s->io[i].Config--;
375 if (s->io[i].Config != 0)
376 continue;
377 io.map = i;
378 s->ops->set_io_map(s, &io);
379 }
380 }
381 mutex_unlock(&s->ops_mutex);
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397static int pcmcia_release_io(struct pcmcia_device *p_dev)
398{
399 struct pcmcia_socket *s = p_dev->socket;
400 int ret = -EINVAL;
401 config_t *c;
402
403 mutex_lock(&s->ops_mutex);
404 if (!p_dev->_io)
405 goto out;
406
407 c = p_dev->function_config;
408
409 release_io_space(s, &c->io[0]);
410
411 if (c->io[1].end)
412 release_io_space(s, &c->io[1]);
413
414 p_dev->_io = 0;
415 c->state &= ~CONFIG_IO_REQ;
416
417out:
418 mutex_unlock(&s->ops_mutex);
419
420 return ret;
421}
422
423
424
425
426
427
428
429
430
431
432int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
433{
434 struct pcmcia_socket *s = p_dev->socket;
435 pccard_mem_map *win;
436 unsigned int w;
437
438 dev_dbg(&p_dev->dev, "releasing window %pR\n", res);
439
440 w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
441 if (w >= MAX_WIN)
442 return -EINVAL;
443
444 mutex_lock(&s->ops_mutex);
445 win = &s->win[w];
446
447 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
448 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
449 mutex_unlock(&s->ops_mutex);
450 return -EINVAL;
451 }
452
453
454 win->flags &= ~MAP_ACTIVE;
455 s->ops->set_mem_map(s, win);
456 s->state &= ~SOCKET_WIN_REQ(w);
457
458
459 if (win->res) {
460 release_resource(res);
461 release_resource(win->res);
462 kfree(win->res);
463 win->res = NULL;
464 }
465 res->start = res->end = 0;
466 res->flags = IORESOURCE_MEM;
467 p_dev->_win &= ~CLIENT_WIN_REQ(w);
468 mutex_unlock(&s->ops_mutex);
469
470 return 0;
471}
472EXPORT_SYMBOL(pcmcia_release_window);
473
474
475
476
477
478
479
480
481
482
483
484int pcmcia_enable_device(struct pcmcia_device *p_dev)
485{
486 int i;
487 unsigned int base;
488 struct pcmcia_socket *s = p_dev->socket;
489 config_t *c;
490 pccard_io_map iomap;
491 unsigned char status = 0;
492 unsigned char ext_status = 0;
493 unsigned char option = 0;
494 unsigned int flags = p_dev->config_flags;
495
496 if (!(s->state & SOCKET_PRESENT))
497 return -ENODEV;
498
499 mutex_lock(&s->ops_mutex);
500 c = p_dev->function_config;
501 if (c->state & CONFIG_LOCKED) {
502 mutex_unlock(&s->ops_mutex);
503 dev_dbg(&p_dev->dev, "Configuration is locked\n");
504 return -EACCES;
505 }
506
507
508 s->socket.Vpp = p_dev->vpp;
509 if (s->ops->set_socket(s, &s->socket)) {
510 mutex_unlock(&s->ops_mutex);
511 dev_printk(KERN_WARNING, &p_dev->dev,
512 "Unable to set socket state\n");
513 return -EINVAL;
514 }
515
516
517 if (p_dev->_io || flags & CONF_ENABLE_IRQ)
518 flags |= CONF_ENABLE_IOCARD;
519 if (flags & CONF_ENABLE_IOCARD)
520 s->socket.flags |= SS_IOCARD;
521 if (flags & CONF_ENABLE_ZVCARD)
522 s->socket.flags |= SS_ZVCARD | SS_IOCARD;
523 if (flags & CONF_ENABLE_SPKR) {
524 s->socket.flags |= SS_SPKR_ENA;
525 status = CCSR_AUDIO_ENA;
526 if (!(p_dev->config_regs & PRESENT_STATUS))
527 dev_warn(&p_dev->dev, "speaker requested, but "
528 "PRESENT_STATUS not set!\n");
529 }
530 if (flags & CONF_ENABLE_IRQ)
531 s->socket.io_irq = s->pcmcia_irq;
532 else
533 s->socket.io_irq = 0;
534 if (flags & CONF_ENABLE_ESR) {
535 p_dev->config_regs |= PRESENT_EXT_STATUS;
536 ext_status = ESR_REQ_ATTN_ENA;
537 }
538 s->ops->set_socket(s, &s->socket);
539 s->lock_count++;
540
541 dev_dbg(&p_dev->dev,
542 "enable_device: V %d, flags %x, base %x, regs %x, idx %x\n",
543 p_dev->vpp, flags, p_dev->config_base, p_dev->config_regs,
544 p_dev->config_index);
545
546
547 base = p_dev->config_base;
548 if (p_dev->config_regs & PRESENT_COPY) {
549 u16 tmp = 0;
550 dev_dbg(&p_dev->dev, "clearing CISREG_SCR\n");
551 pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &tmp);
552 }
553 if (p_dev->config_regs & PRESENT_PIN_REPLACE) {
554 u16 tmp = 0;
555 dev_dbg(&p_dev->dev, "clearing CISREG_PRR\n");
556 pcmcia_write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &tmp);
557 }
558 if (p_dev->config_regs & PRESENT_OPTION) {
559 if (s->functions == 1) {
560 option = p_dev->config_index & COR_CONFIG_MASK;
561 } else {
562 option = p_dev->config_index & COR_MFC_CONFIG_MASK;
563 option |= COR_FUNC_ENA|COR_IREQ_ENA;
564 if (p_dev->config_regs & PRESENT_IOBASE_0)
565 option |= COR_ADDR_DECODE;
566 }
567 if ((flags & CONF_ENABLE_IRQ) &&
568 !(flags & CONF_ENABLE_PULSE_IRQ))
569 option |= COR_LEVEL_REQ;
570 pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &option);
571 mdelay(40);
572 }
573 if (p_dev->config_regs & PRESENT_STATUS)
574 pcmcia_write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &status);
575
576 if (p_dev->config_regs & PRESENT_EXT_STATUS)
577 pcmcia_write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1,
578 &ext_status);
579
580 if (p_dev->config_regs & PRESENT_IOBASE_0) {
581 u8 b = c->io[0].start & 0xff;
582 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b);
583 b = (c->io[0].start >> 8) & 0xff;
584 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b);
585 }
586 if (p_dev->config_regs & PRESENT_IOSIZE) {
587 u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1;
588 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b);
589 }
590
591
592 if (c->state & CONFIG_IO_REQ) {
593 iomap.speed = io_speed;
594 for (i = 0; i < MAX_IO_WIN; i++)
595 if (s->io[i].res) {
596 iomap.map = i;
597 iomap.flags = MAP_ACTIVE;
598 switch (s->io[i].res->flags & IO_DATA_PATH_WIDTH) {
599 case IO_DATA_PATH_WIDTH_16:
600 iomap.flags |= MAP_16BIT; break;
601 case IO_DATA_PATH_WIDTH_AUTO:
602 iomap.flags |= MAP_AUTOSZ; break;
603 default:
604 break;
605 }
606 iomap.start = s->io[i].res->start;
607 iomap.stop = s->io[i].res->end;
608 s->ops->set_io_map(s, &iomap);
609 s->io[i].Config++;
610 }
611 }
612
613 c->state |= CONFIG_LOCKED;
614 p_dev->_locked = 1;
615 mutex_unlock(&s->ops_mutex);
616 return 0;
617}
618EXPORT_SYMBOL(pcmcia_enable_device);
619
620
621
622
623
624
625
626
627
628
629
630
631int pcmcia_request_io(struct pcmcia_device *p_dev)
632{
633 struct pcmcia_socket *s = p_dev->socket;
634 config_t *c = p_dev->function_config;
635 int ret = -EINVAL;
636
637 mutex_lock(&s->ops_mutex);
638 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
639 &c->io[0], &c->io[1]);
640
641 if (!(s->state & SOCKET_PRESENT)) {
642 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
643 goto out;
644 }
645
646 if (c->state & CONFIG_LOCKED) {
647 dev_dbg(&p_dev->dev, "Configuration is locked\n");
648 goto out;
649 }
650 if (c->state & CONFIG_IO_REQ) {
651 dev_dbg(&p_dev->dev, "IO already configured\n");
652 goto out;
653 }
654
655 ret = alloc_io_space(s, &c->io[0], p_dev->io_lines);
656 if (ret)
657 goto out;
658
659 if (c->io[1].end) {
660 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
661 if (ret) {
662 struct resource tmp = c->io[0];
663
664 release_io_space(s, &c->io[0]);
665
666 c->io[0].end = resource_size(&tmp);
667 c->io[0].start = tmp.start;
668 c->io[0].flags = tmp.flags;
669 goto out;
670 }
671 } else
672 c->io[1].start = 0;
673
674 c->state |= CONFIG_IO_REQ;
675 p_dev->_io = 1;
676
677 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
678 &c->io[0], &c->io[1]);
679out:
680 mutex_unlock(&s->ops_mutex);
681
682 return ret;
683}
684EXPORT_SYMBOL(pcmcia_request_io);
685
686
687
688
689
690
691
692
693
694
695
696
697
698int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
699 irq_handler_t handler)
700{
701 int ret;
702
703 if (!p_dev->irq)
704 return -EINVAL;
705
706 ret = request_irq(p_dev->irq, handler, IRQF_SHARED,
707 p_dev->devname, p_dev->priv);
708 if (!ret)
709 p_dev->_irq = 1;
710
711 return ret;
712}
713EXPORT_SYMBOL(pcmcia_request_irq);
714
715
716
717
718
719
720
721
722
723
724
725
726
727int __must_check
728__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
729 irq_handler_t handler)
730{
731 int ret;
732
733 if (!p_dev->irq)
734 return -EINVAL;
735
736 ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
737 if (ret) {
738 ret = pcmcia_request_irq(p_dev, handler);
739 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
740 "request for exclusive IRQ could not be fulfilled.\n");
741 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
742 "needs updating to supported shared IRQ lines.\n");
743 }
744 if (ret)
745 dev_printk(KERN_INFO, &p_dev->dev, "request_irq() failed\n");
746 else
747 p_dev->_irq = 1;
748
749 return ret;
750}
751EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
752
753
754#ifdef CONFIG_PCMCIA_PROBE
755
756
757static u8 pcmcia_used_irq[32];
758
759static irqreturn_t test_action(int cpl, void *dev_id)
760{
761 return IRQ_NONE;
762}
763
764
765
766
767
768
769
770static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
771{
772 struct pcmcia_socket *s = p_dev->socket;
773 unsigned int try, irq;
774 u32 mask = s->irq_mask;
775 int ret = -ENODEV;
776
777 for (try = 0; try < 64; try++) {
778 irq = try % 32;
779
780 if (irq > NR_IRQS)
781 continue;
782
783
784 if (!((mask >> irq) & 1))
785 continue;
786
787
788 if ((try < 32) && pcmcia_used_irq[irq])
789 continue;
790
791
792
793
794 ret = request_irq(irq, test_action, type, p_dev->devname,
795 p_dev);
796 if (!ret) {
797 free_irq(irq, p_dev);
798 p_dev->irq = s->pcmcia_irq = irq;
799 pcmcia_used_irq[irq]++;
800 break;
801 }
802 }
803
804 return ret;
805}
806
807void pcmcia_cleanup_irq(struct pcmcia_socket *s)
808{
809 pcmcia_used_irq[s->pcmcia_irq]--;
810 s->pcmcia_irq = 0;
811}
812
813#else
814
815static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
816{
817 return -EINVAL;
818}
819
820void pcmcia_cleanup_irq(struct pcmcia_socket *s)
821{
822 s->pcmcia_irq = 0;
823 return;
824}
825
826#endif
827
828
829
830
831
832
833
834
835int pcmcia_setup_irq(struct pcmcia_device *p_dev)
836{
837 struct pcmcia_socket *s = p_dev->socket;
838
839 if (p_dev->irq)
840 return 0;
841
842
843 if (s->pcmcia_irq) {
844 p_dev->irq = s->pcmcia_irq;
845 return 0;
846 }
847
848
849 if (!pcmcia_setup_isa_irq(p_dev, 0))
850 return 0;
851
852
853 if (!pcmcia_setup_isa_irq(p_dev, IRQF_SHARED))
854 return 0;
855
856
857 if (s->pci_irq) {
858 p_dev->irq = s->pcmcia_irq = s->pci_irq;
859 return 0;
860 }
861
862 return -EINVAL;
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878int pcmcia_request_window(struct pcmcia_device *p_dev, struct resource *res,
879 unsigned int speed)
880{
881 struct pcmcia_socket *s = p_dev->socket;
882 pccard_mem_map *win;
883 u_long align;
884 int w;
885
886 dev_dbg(&p_dev->dev, "request_window %pR %d\n", res, speed);
887
888 if (!(s->state & SOCKET_PRESENT)) {
889 dev_dbg(&p_dev->dev, "No card present\n");
890 return -ENODEV;
891 }
892
893
894 if (res->end == 0)
895 res->end = s->map_size;
896 align = (s->features & SS_CAP_MEM_ALIGN) ? res->end : s->map_size;
897 if (res->end & (s->map_size-1)) {
898 dev_dbg(&p_dev->dev, "invalid map size\n");
899 return -EINVAL;
900 }
901 if ((res->start && (s->features & SS_CAP_STATIC_MAP)) ||
902 (res->start & (align-1))) {
903 dev_dbg(&p_dev->dev, "invalid base address\n");
904 return -EINVAL;
905 }
906 if (res->start)
907 align = 0;
908
909
910 mutex_lock(&s->ops_mutex);
911 for (w = 0; w < MAX_WIN; w++)
912 if (!(s->state & SOCKET_WIN_REQ(w)))
913 break;
914 if (w == MAX_WIN) {
915 dev_dbg(&p_dev->dev, "all windows are used already\n");
916 mutex_unlock(&s->ops_mutex);
917 return -EINVAL;
918 }
919
920 win = &s->win[w];
921
922 if (!(s->features & SS_CAP_STATIC_MAP)) {
923 win->res = pcmcia_find_mem_region(res->start, res->end, align,
924 0, s);
925 if (!win->res) {
926 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
927 mutex_unlock(&s->ops_mutex);
928 return -EINVAL;
929 }
930 }
931 p_dev->_win |= CLIENT_WIN_REQ(w);
932
933
934 win->map = w+1;
935 win->flags = res->flags & WIN_FLAGS_MAP;
936 win->speed = speed;
937 win->card_start = 0;
938
939 if (s->ops->set_mem_map(s, win) != 0) {
940 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
941 mutex_unlock(&s->ops_mutex);
942 return -EIO;
943 }
944 s->state |= SOCKET_WIN_REQ(w);
945
946
947 if (s->features & SS_CAP_STATIC_MAP)
948 res->start = win->static_start;
949 else
950 res->start = win->res->start;
951
952
953 res->end += res->start - 1;
954 res->flags &= ~WIN_FLAGS_REQ;
955 res->flags |= (win->map << 2) | IORESOURCE_MEM;
956 res->parent = win->res;
957 if (win->res)
958 request_resource(&iomem_resource, res);
959
960 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
961
962 mutex_unlock(&s->ops_mutex);
963
964 return 0;
965}
966EXPORT_SYMBOL(pcmcia_request_window);
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981void pcmcia_disable_device(struct pcmcia_device *p_dev)
982{
983 int i;
984
985 dev_dbg(&p_dev->dev, "disabling device\n");
986
987 for (i = 0; i < MAX_WIN; i++) {
988 struct resource *res = p_dev->resource[MAX_IO_WIN + i];
989 if (res->flags & WIN_FLAGS_REQ)
990 pcmcia_release_window(p_dev, res);
991 }
992
993 pcmcia_release_configuration(p_dev);
994 pcmcia_release_io(p_dev);
995 if (p_dev->_irq) {
996 free_irq(p_dev->irq, p_dev->priv);
997 p_dev->_irq = 0;
998 }
999}
1000EXPORT_SYMBOL(pcmcia_disable_device);
1001