1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/pci.h>
22#include <linux/device.h>
23#include <linux/netdevice.h>
24#include <linux/slab.h>
25
26#include <asm/irq.h>
27
28#include <pcmcia/ss.h>
29#include <pcmcia/cistpl.h>
30#include <pcmcia/cisreg.h>
31#include <pcmcia/ds.h>
32
33#include "cs_internal.h"
34
35
36
37static int io_speed;
38module_param(io_speed, int, 0444);
39
40
41int pcmcia_validate_mem(struct pcmcia_socket *s)
42{
43 if (s->resource_ops->validate_mem)
44 return s->resource_ops->validate_mem(s);
45
46 return 0;
47}
48
49struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
50 int low, struct pcmcia_socket *s)
51{
52 if (s->resource_ops->find_mem)
53 return s->resource_ops->find_mem(base, num, align, low, s);
54 return NULL;
55}
56
57
58
59
60
61
62
63
64static void release_io_space(struct pcmcia_socket *s, struct resource *res)
65{
66 resource_size_t num = resource_size(res);
67 int i;
68
69 dev_dbg(&s->dev, "release_io_space for %pR\n", res);
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 if (!s->io[i].res)
73 continue;
74 if ((s->io[i].res->start <= res->start) &&
75 (s->io[i].res->end >= res->end)) {
76 s->io[i].InUse -= num;
77 if (res->parent)
78 release_resource(res);
79 res->start = res->end = 0;
80 res->flags = IORESOURCE_IO;
81
82 if (s->io[i].InUse == 0) {
83 release_resource(s->io[i].res);
84 kfree(s->io[i].res);
85 s->io[i].res = NULL;
86 }
87 }
88 }
89}
90
91
92
93
94
95
96
97
98
99
100static int alloc_io_space(struct pcmcia_socket *s, struct resource *res,
101 unsigned int lines)
102{
103 unsigned int align;
104 unsigned int base = res->start;
105 unsigned int num = res->end;
106 int ret;
107
108 res->flags |= IORESOURCE_IO;
109
110 dev_dbg(&s->dev, "alloc_io_space request for %pR, %d lines\n",
111 res, lines);
112
113 align = base ? (lines ? 1<<lines : 0) : 1;
114 if (align && (align < num)) {
115 if (base) {
116 dev_dbg(&s->dev, "odd IO request\n");
117 align = 0;
118 } else
119 while (align && (align < num))
120 align <<= 1;
121 }
122 if (base & ~(align-1)) {
123 dev_dbg(&s->dev, "odd IO request\n");
124 align = 0;
125 }
126
127 ret = s->resource_ops->find_io(s, res->flags, &base, num, align,
128 &res->parent);
129 if (ret) {
130 dev_dbg(&s->dev, "alloc_io_space request failed (%d)\n", ret);
131 return -EINVAL;
132 }
133
134 res->start = base;
135 res->end = res->start + num - 1;
136
137 if (res->parent) {
138 ret = request_resource(res->parent, res);
139 if (ret) {
140 dev_warn(&s->dev,
141 "request_resource %pR failed: %d\n", res, ret);
142 res->parent = NULL;
143 release_io_space(s, res);
144 }
145 }
146 dev_dbg(&s->dev, "alloc_io_space request result %d: %pR\n", ret, res);
147 return ret;
148}
149
150
151
152
153
154
155
156
157
158
159static int pcmcia_access_config(struct pcmcia_device *p_dev,
160 off_t where, u8 *val,
161 int (*accessf) (struct pcmcia_socket *s,
162 int attr, unsigned int addr,
163 unsigned int len, void *ptr))
164{
165 struct pcmcia_socket *s;
166 config_t *c;
167 int addr;
168 int ret = 0;
169
170 s = p_dev->socket;
171
172 mutex_lock(&s->ops_mutex);
173 c = p_dev->function_config;
174
175 if (!(c->state & CONFIG_LOCKED)) {
176 dev_dbg(&p_dev->dev, "Configuration isn't locked\n");
177 mutex_unlock(&s->ops_mutex);
178 return -EACCES;
179 }
180
181 addr = (p_dev->config_base + where) >> 1;
182
183 ret = accessf(s, 1, addr, 1, val);
184
185 mutex_unlock(&s->ops_mutex);
186
187 return ret;
188}
189
190
191
192
193
194
195
196
197int pcmcia_read_config_byte(struct pcmcia_device *p_dev, off_t where, u8 *val)
198{
199 return pcmcia_access_config(p_dev, where, val, pcmcia_read_cis_mem);
200}
201EXPORT_SYMBOL(pcmcia_read_config_byte);
202
203
204
205
206
207
208
209
210int pcmcia_write_config_byte(struct pcmcia_device *p_dev, off_t where, u8 val)
211{
212 return pcmcia_access_config(p_dev, where, &val, pcmcia_write_cis_mem);
213}
214EXPORT_SYMBOL(pcmcia_write_config_byte);
215
216
217
218
219
220
221
222
223
224
225
226
227int pcmcia_map_mem_page(struct pcmcia_device *p_dev, struct resource *res,
228 unsigned int offset)
229{
230 struct pcmcia_socket *s = p_dev->socket;
231 unsigned int w;
232 int ret;
233
234 w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
235 if (w >= MAX_WIN)
236 return -EINVAL;
237
238 mutex_lock(&s->ops_mutex);
239 s->win[w].card_start = offset;
240 ret = s->ops->set_mem_map(s, &s->win[w]);
241 if (ret)
242 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
243 mutex_unlock(&s->ops_mutex);
244 return ret;
245}
246EXPORT_SYMBOL(pcmcia_map_mem_page);
247
248
249
250
251
252
253
254
255
256
257int pcmcia_fixup_iowidth(struct pcmcia_device *p_dev)
258{
259 struct pcmcia_socket *s = p_dev->socket;
260 pccard_io_map io_off = { 0, 0, 0, 0, 1 };
261 pccard_io_map io_on;
262 int i, ret = 0;
263
264 mutex_lock(&s->ops_mutex);
265
266 dev_dbg(&p_dev->dev, "fixup iowidth to 8bit\n");
267
268 if (!(s->state & SOCKET_PRESENT) ||
269 !(p_dev->function_config->state & CONFIG_LOCKED)) {
270 dev_dbg(&p_dev->dev, "No card? Config not locked?\n");
271 ret = -EACCES;
272 goto unlock;
273 }
274
275 io_on.speed = io_speed;
276 for (i = 0; i < MAX_IO_WIN; i++) {
277 if (!s->io[i].res)
278 continue;
279 io_off.map = i;
280 io_on.map = i;
281
282 io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8;
283 io_on.start = s->io[i].res->start;
284 io_on.stop = s->io[i].res->end;
285
286 s->ops->set_io_map(s, &io_off);
287 mdelay(40);
288 s->ops->set_io_map(s, &io_on);
289 }
290unlock:
291 mutex_unlock(&s->ops_mutex);
292
293 return ret;
294}
295EXPORT_SYMBOL(pcmcia_fixup_iowidth);
296
297
298
299
300
301
302
303
304
305
306
307int pcmcia_fixup_vpp(struct pcmcia_device *p_dev, unsigned char new_vpp)
308{
309 struct pcmcia_socket *s = p_dev->socket;
310 int ret = 0;
311
312 mutex_lock(&s->ops_mutex);
313
314 dev_dbg(&p_dev->dev, "fixup Vpp to %d\n", new_vpp);
315
316 if (!(s->state & SOCKET_PRESENT) ||
317 !(p_dev->function_config->state & CONFIG_LOCKED)) {
318 dev_dbg(&p_dev->dev, "No card? Config not locked?\n");
319 ret = -EACCES;
320 goto unlock;
321 }
322
323 s->socket.Vpp = new_vpp;
324 if (s->ops->set_socket(s, &s->socket)) {
325 dev_warn(&p_dev->dev, "Unable to set VPP\n");
326 ret = -EIO;
327 goto unlock;
328 }
329 p_dev->vpp = new_vpp;
330
331unlock:
332 mutex_unlock(&s->ops_mutex);
333
334 return ret;
335}
336EXPORT_SYMBOL(pcmcia_fixup_vpp);
337
338
339
340
341
342
343
344
345
346
347
348
349
350int pcmcia_release_configuration(struct pcmcia_device *p_dev)
351{
352 pccard_io_map io = { 0, 0, 0, 0, 1 };
353 struct pcmcia_socket *s = p_dev->socket;
354 config_t *c;
355 int i;
356
357 mutex_lock(&s->ops_mutex);
358 c = p_dev->function_config;
359 if (p_dev->_locked) {
360 p_dev->_locked = 0;
361 if (--(s->lock_count) == 0) {
362 s->socket.flags = SS_OUTPUT_ENA;
363 s->socket.Vpp = 0;
364 s->socket.io_irq = 0;
365 s->ops->set_socket(s, &s->socket);
366 }
367 }
368 if (c->state & CONFIG_LOCKED) {
369 c->state &= ~CONFIG_LOCKED;
370 if (c->state & CONFIG_IO_REQ)
371 for (i = 0; i < MAX_IO_WIN; i++) {
372 if (!s->io[i].res)
373 continue;
374 s->io[i].Config--;
375 if (s->io[i].Config != 0)
376 continue;
377 io.map = i;
378 s->ops->set_io_map(s, &io);
379 }
380 }
381 mutex_unlock(&s->ops_mutex);
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397static int pcmcia_release_io(struct pcmcia_device *p_dev)
398{
399 struct pcmcia_socket *s = p_dev->socket;
400 int ret = -EINVAL;
401 config_t *c;
402
403 mutex_lock(&s->ops_mutex);
404 if (!p_dev->_io)
405 goto out;
406
407 c = p_dev->function_config;
408
409 release_io_space(s, &c->io[0]);
410
411 if (c->io[1].end)
412 release_io_space(s, &c->io[1]);
413
414 p_dev->_io = 0;
415 c->state &= ~CONFIG_IO_REQ;
416
417out:
418 mutex_unlock(&s->ops_mutex);
419
420 return ret;
421}
422
423
424
425
426
427
428
429
430
431
432int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
433{
434 struct pcmcia_socket *s = p_dev->socket;
435 pccard_mem_map *win;
436 unsigned int w;
437
438 dev_dbg(&p_dev->dev, "releasing window %pR\n", res);
439
440 w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
441 if (w >= MAX_WIN)
442 return -EINVAL;
443
444 mutex_lock(&s->ops_mutex);
445 win = &s->win[w];
446
447 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
448 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
449 mutex_unlock(&s->ops_mutex);
450 return -EINVAL;
451 }
452
453
454 win->flags &= ~MAP_ACTIVE;
455 s->ops->set_mem_map(s, win);
456 s->state &= ~SOCKET_WIN_REQ(w);
457
458
459 if (win->res) {
460 release_resource(res);
461 release_resource(win->res);
462 kfree(win->res);
463 win->res = NULL;
464 }
465 res->start = res->end = 0;
466 res->flags = IORESOURCE_MEM;
467 p_dev->_win &= ~CLIENT_WIN_REQ(w);
468 mutex_unlock(&s->ops_mutex);
469
470 return 0;
471}
472EXPORT_SYMBOL(pcmcia_release_window);
473
474
475
476
477
478
479
480
481
482
483
484int pcmcia_enable_device(struct pcmcia_device *p_dev)
485{
486 int i;
487 unsigned int base;
488 struct pcmcia_socket *s = p_dev->socket;
489 config_t *c;
490 pccard_io_map iomap;
491 unsigned char status = 0;
492 unsigned char ext_status = 0;
493 unsigned char option = 0;
494 unsigned int flags = p_dev->config_flags;
495
496 if (!(s->state & SOCKET_PRESENT))
497 return -ENODEV;
498
499 mutex_lock(&s->ops_mutex);
500 c = p_dev->function_config;
501 if (c->state & CONFIG_LOCKED) {
502 mutex_unlock(&s->ops_mutex);
503 dev_dbg(&p_dev->dev, "Configuration is locked\n");
504 return -EACCES;
505 }
506
507
508 s->socket.Vpp = p_dev->vpp;
509 if (s->ops->set_socket(s, &s->socket)) {
510 mutex_unlock(&s->ops_mutex);
511 dev_warn(&p_dev->dev, "Unable to set socket state\n");
512 return -EINVAL;
513 }
514
515
516 if (p_dev->_io || flags & CONF_ENABLE_IRQ)
517 flags |= CONF_ENABLE_IOCARD;
518 if (flags & CONF_ENABLE_IOCARD)
519 s->socket.flags |= SS_IOCARD;
520 if (flags & CONF_ENABLE_ZVCARD)
521 s->socket.flags |= SS_ZVCARD | SS_IOCARD;
522 if (flags & CONF_ENABLE_SPKR) {
523 s->socket.flags |= SS_SPKR_ENA;
524 status = CCSR_AUDIO_ENA;
525 if (!(p_dev->config_regs & PRESENT_STATUS))
526 dev_warn(&p_dev->dev, "speaker requested, but "
527 "PRESENT_STATUS not set!\n");
528 }
529 if (flags & CONF_ENABLE_IRQ)
530 s->socket.io_irq = s->pcmcia_irq;
531 else
532 s->socket.io_irq = 0;
533 if (flags & CONF_ENABLE_ESR) {
534 p_dev->config_regs |= PRESENT_EXT_STATUS;
535 ext_status = ESR_REQ_ATTN_ENA;
536 }
537 s->ops->set_socket(s, &s->socket);
538 s->lock_count++;
539
540 dev_dbg(&p_dev->dev,
541 "enable_device: V %d, flags %x, base %x, regs %x, idx %x\n",
542 p_dev->vpp, flags, p_dev->config_base, p_dev->config_regs,
543 p_dev->config_index);
544
545
546 base = p_dev->config_base;
547 if (p_dev->config_regs & PRESENT_COPY) {
548 u16 tmp = 0;
549 dev_dbg(&p_dev->dev, "clearing CISREG_SCR\n");
550 pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &tmp);
551 }
552 if (p_dev->config_regs & PRESENT_PIN_REPLACE) {
553 u16 tmp = 0;
554 dev_dbg(&p_dev->dev, "clearing CISREG_PRR\n");
555 pcmcia_write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &tmp);
556 }
557 if (p_dev->config_regs & PRESENT_OPTION) {
558 if (s->functions == 1) {
559 option = p_dev->config_index & COR_CONFIG_MASK;
560 } else {
561 option = p_dev->config_index & COR_MFC_CONFIG_MASK;
562 option |= COR_FUNC_ENA|COR_IREQ_ENA;
563 if (p_dev->config_regs & PRESENT_IOBASE_0)
564 option |= COR_ADDR_DECODE;
565 }
566 if ((flags & CONF_ENABLE_IRQ) &&
567 !(flags & CONF_ENABLE_PULSE_IRQ))
568 option |= COR_LEVEL_REQ;
569 pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &option);
570 mdelay(40);
571 }
572 if (p_dev->config_regs & PRESENT_STATUS)
573 pcmcia_write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &status);
574
575 if (p_dev->config_regs & PRESENT_EXT_STATUS)
576 pcmcia_write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1,
577 &ext_status);
578
579 if (p_dev->config_regs & PRESENT_IOBASE_0) {
580 u8 b = c->io[0].start & 0xff;
581 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b);
582 b = (c->io[0].start >> 8) & 0xff;
583 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b);
584 }
585 if (p_dev->config_regs & PRESENT_IOSIZE) {
586 u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1;
587 pcmcia_write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b);
588 }
589
590
591 if (c->state & CONFIG_IO_REQ) {
592 iomap.speed = io_speed;
593 for (i = 0; i < MAX_IO_WIN; i++)
594 if (s->io[i].res) {
595 iomap.map = i;
596 iomap.flags = MAP_ACTIVE;
597 switch (s->io[i].res->flags & IO_DATA_PATH_WIDTH) {
598 case IO_DATA_PATH_WIDTH_16:
599 iomap.flags |= MAP_16BIT; break;
600 case IO_DATA_PATH_WIDTH_AUTO:
601 iomap.flags |= MAP_AUTOSZ; break;
602 default:
603 break;
604 }
605 iomap.start = s->io[i].res->start;
606 iomap.stop = s->io[i].res->end;
607 s->ops->set_io_map(s, &iomap);
608 s->io[i].Config++;
609 }
610 }
611
612 c->state |= CONFIG_LOCKED;
613 p_dev->_locked = 1;
614 mutex_unlock(&s->ops_mutex);
615 return 0;
616}
617EXPORT_SYMBOL(pcmcia_enable_device);
618
619
620
621
622
623
624
625
626
627
628
629
630int pcmcia_request_io(struct pcmcia_device *p_dev)
631{
632 struct pcmcia_socket *s = p_dev->socket;
633 config_t *c = p_dev->function_config;
634 int ret = -EINVAL;
635
636 mutex_lock(&s->ops_mutex);
637 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
638 &c->io[0], &c->io[1]);
639
640 if (!(s->state & SOCKET_PRESENT)) {
641 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
642 goto out;
643 }
644
645 if (c->state & CONFIG_LOCKED) {
646 dev_dbg(&p_dev->dev, "Configuration is locked\n");
647 goto out;
648 }
649 if (c->state & CONFIG_IO_REQ) {
650 dev_dbg(&p_dev->dev, "IO already configured\n");
651 goto out;
652 }
653
654 ret = alloc_io_space(s, &c->io[0], p_dev->io_lines);
655 if (ret)
656 goto out;
657
658 if (c->io[1].end) {
659 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
660 if (ret) {
661 struct resource tmp = c->io[0];
662
663 release_io_space(s, &c->io[0]);
664
665 c->io[0].end = resource_size(&tmp);
666 c->io[0].start = tmp.start;
667 c->io[0].flags = tmp.flags;
668 goto out;
669 }
670 } else
671 c->io[1].start = 0;
672
673 c->state |= CONFIG_IO_REQ;
674 p_dev->_io = 1;
675
676 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
677 &c->io[0], &c->io[1]);
678out:
679 mutex_unlock(&s->ops_mutex);
680
681 return ret;
682}
683EXPORT_SYMBOL(pcmcia_request_io);
684
685
686
687
688
689
690
691
692
693
694
695
696
697int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
698 irq_handler_t handler)
699{
700 int ret;
701
702 if (!p_dev->irq)
703 return -EINVAL;
704
705 ret = request_irq(p_dev->irq, handler, IRQF_SHARED,
706 p_dev->devname, p_dev->priv);
707 if (!ret)
708 p_dev->_irq = 1;
709
710 return ret;
711}
712EXPORT_SYMBOL(pcmcia_request_irq);
713
714
715
716
717
718
719
720
721
722
723
724
725
726int __must_check
727__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
728 irq_handler_t handler)
729{
730 int ret;
731
732 if (!p_dev->irq)
733 return -EINVAL;
734
735 ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
736 if (ret) {
737 ret = pcmcia_request_irq(p_dev, handler);
738 dev_warn(&p_dev->dev, "pcmcia: request for exclusive IRQ could not be fulfilled\n");
739 dev_warn(&p_dev->dev, "pcmcia: the driver needs updating to supported shared IRQ lines\n");
740 }
741 if (ret)
742 dev_info(&p_dev->dev, "request_irq() failed\n");
743 else
744 p_dev->_irq = 1;
745
746 return ret;
747}
748EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
749
750
751#ifdef CONFIG_PCMCIA_PROBE
752
753
754static u8 pcmcia_used_irq[32];
755
756static irqreturn_t test_action(int cpl, void *dev_id)
757{
758 return IRQ_NONE;
759}
760
761
762
763
764
765
766
767static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
768{
769 struct pcmcia_socket *s = p_dev->socket;
770 unsigned int try, irq;
771 u32 mask = s->irq_mask;
772 int ret = -ENODEV;
773
774 for (try = 0; try < 64; try++) {
775 irq = try % 32;
776
777 if (irq > NR_IRQS)
778 continue;
779
780
781 if (!((mask >> irq) & 1))
782 continue;
783
784
785 if ((try < 32) && pcmcia_used_irq[irq])
786 continue;
787
788
789
790
791 ret = request_irq(irq, test_action, type, p_dev->devname,
792 p_dev);
793 if (!ret) {
794 free_irq(irq, p_dev);
795 p_dev->irq = s->pcmcia_irq = irq;
796 pcmcia_used_irq[irq]++;
797 break;
798 }
799 }
800
801 return ret;
802}
803
804void pcmcia_cleanup_irq(struct pcmcia_socket *s)
805{
806 pcmcia_used_irq[s->pcmcia_irq]--;
807 s->pcmcia_irq = 0;
808}
809
810#else
811
812static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
813{
814 return -EINVAL;
815}
816
817void pcmcia_cleanup_irq(struct pcmcia_socket *s)
818{
819 s->pcmcia_irq = 0;
820 return;
821}
822
823#endif
824
825
826
827
828
829
830
831
832int pcmcia_setup_irq(struct pcmcia_device *p_dev)
833{
834 struct pcmcia_socket *s = p_dev->socket;
835
836 if (p_dev->irq)
837 return 0;
838
839
840 if (s->pcmcia_irq) {
841 p_dev->irq = s->pcmcia_irq;
842 return 0;
843 }
844
845
846 if (!pcmcia_setup_isa_irq(p_dev, 0))
847 return 0;
848
849
850 if (!pcmcia_setup_isa_irq(p_dev, IRQF_SHARED))
851 return 0;
852
853
854 if (s->pci_irq) {
855 p_dev->irq = s->pcmcia_irq = s->pci_irq;
856 return 0;
857 }
858
859 return -EINVAL;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875int pcmcia_request_window(struct pcmcia_device *p_dev, struct resource *res,
876 unsigned int speed)
877{
878 struct pcmcia_socket *s = p_dev->socket;
879 pccard_mem_map *win;
880 u_long align;
881 int w;
882
883 dev_dbg(&p_dev->dev, "request_window %pR %d\n", res, speed);
884
885 if (!(s->state & SOCKET_PRESENT)) {
886 dev_dbg(&p_dev->dev, "No card present\n");
887 return -ENODEV;
888 }
889
890
891 if (res->end == 0)
892 res->end = s->map_size;
893 align = (s->features & SS_CAP_MEM_ALIGN) ? res->end : s->map_size;
894 if (res->end & (s->map_size-1)) {
895 dev_dbg(&p_dev->dev, "invalid map size\n");
896 return -EINVAL;
897 }
898 if ((res->start && (s->features & SS_CAP_STATIC_MAP)) ||
899 (res->start & (align-1))) {
900 dev_dbg(&p_dev->dev, "invalid base address\n");
901 return -EINVAL;
902 }
903 if (res->start)
904 align = 0;
905
906
907 mutex_lock(&s->ops_mutex);
908 for (w = 0; w < MAX_WIN; w++)
909 if (!(s->state & SOCKET_WIN_REQ(w)))
910 break;
911 if (w == MAX_WIN) {
912 dev_dbg(&p_dev->dev, "all windows are used already\n");
913 mutex_unlock(&s->ops_mutex);
914 return -EINVAL;
915 }
916
917 win = &s->win[w];
918
919 if (!(s->features & SS_CAP_STATIC_MAP)) {
920 win->res = pcmcia_find_mem_region(res->start, res->end, align,
921 0, s);
922 if (!win->res) {
923 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
924 mutex_unlock(&s->ops_mutex);
925 return -EINVAL;
926 }
927 }
928 p_dev->_win |= CLIENT_WIN_REQ(w);
929
930
931 win->map = w+1;
932 win->flags = res->flags & WIN_FLAGS_MAP;
933 win->speed = speed;
934 win->card_start = 0;
935
936 if (s->ops->set_mem_map(s, win) != 0) {
937 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
938 mutex_unlock(&s->ops_mutex);
939 return -EIO;
940 }
941 s->state |= SOCKET_WIN_REQ(w);
942
943
944 if (s->features & SS_CAP_STATIC_MAP)
945 res->start = win->static_start;
946 else
947 res->start = win->res->start;
948
949
950 res->end += res->start - 1;
951 res->flags &= ~WIN_FLAGS_REQ;
952 res->flags |= (win->map << 2) | IORESOURCE_MEM;
953 res->parent = win->res;
954 if (win->res)
955 request_resource(&iomem_resource, res);
956
957 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
958
959 mutex_unlock(&s->ops_mutex);
960
961 return 0;
962}
963EXPORT_SYMBOL(pcmcia_request_window);
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978void pcmcia_disable_device(struct pcmcia_device *p_dev)
979{
980 int i;
981
982 dev_dbg(&p_dev->dev, "disabling device\n");
983
984 for (i = 0; i < MAX_WIN; i++) {
985 struct resource *res = p_dev->resource[MAX_IO_WIN + i];
986 if (res->flags & WIN_FLAGS_REQ)
987 pcmcia_release_window(p_dev, res);
988 }
989
990 pcmcia_release_configuration(p_dev);
991 pcmcia_release_io(p_dev);
992 if (p_dev->_irq) {
993 free_irq(p_dev->irq, p_dev->priv);
994 p_dev->_irq = 0;
995 }
996}
997EXPORT_SYMBOL(pcmcia_disable_device);
998