1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/blkdev.h>
34#include <linux/delay.h>
35#include <linux/device.h>
36#include <scsi/scsi_host.h>
37#include <linux/libata.h>
38#include <linux/platform_device.h>
39#include <linux/gpio.h>
40#include <asm/dma.h>
41#include <asm/portmux.h>
42
43#define DRV_NAME "pata-bf54x"
44#define DRV_VERSION "0.9"
45
46#define ATA_REG_CTRL 0x0E
47#define ATA_REG_ALTSTATUS ATA_REG_CTRL
48
49
50#define ATAPI_OFFSET_CONTROL 0x00
51#define ATAPI_OFFSET_STATUS 0x04
52#define ATAPI_OFFSET_DEV_ADDR 0x08
53#define ATAPI_OFFSET_DEV_TXBUF 0x0c
54#define ATAPI_OFFSET_DEV_RXBUF 0x10
55#define ATAPI_OFFSET_INT_MASK 0x14
56#define ATAPI_OFFSET_INT_STATUS 0x18
57#define ATAPI_OFFSET_XFER_LEN 0x1c
58#define ATAPI_OFFSET_LINE_STATUS 0x20
59#define ATAPI_OFFSET_SM_STATE 0x24
60#define ATAPI_OFFSET_TERMINATE 0x28
61#define ATAPI_OFFSET_PIO_TFRCNT 0x2c
62#define ATAPI_OFFSET_DMA_TFRCNT 0x30
63#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
64#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
65#define ATAPI_OFFSET_REG_TIM_0 0x40
66#define ATAPI_OFFSET_PIO_TIM_0 0x44
67#define ATAPI_OFFSET_PIO_TIM_1 0x48
68#define ATAPI_OFFSET_MULTI_TIM_0 0x50
69#define ATAPI_OFFSET_MULTI_TIM_1 0x54
70#define ATAPI_OFFSET_MULTI_TIM_2 0x58
71#define ATAPI_OFFSET_ULTRA_TIM_0 0x60
72#define ATAPI_OFFSET_ULTRA_TIM_1 0x64
73#define ATAPI_OFFSET_ULTRA_TIM_2 0x68
74#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
75
76
77#define ATAPI_GET_CONTROL(base)\
78 bfin_read16(base + ATAPI_OFFSET_CONTROL)
79#define ATAPI_SET_CONTROL(base, val)\
80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
81#define ATAPI_GET_STATUS(base)\
82 bfin_read16(base + ATAPI_OFFSET_STATUS)
83#define ATAPI_GET_DEV_ADDR(base)\
84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
85#define ATAPI_SET_DEV_ADDR(base, val)\
86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
87#define ATAPI_GET_DEV_TXBUF(base)\
88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
89#define ATAPI_SET_DEV_TXBUF(base, val)\
90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
91#define ATAPI_GET_DEV_RXBUF(base)\
92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
93#define ATAPI_SET_DEV_RXBUF(base, val)\
94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
95#define ATAPI_GET_INT_MASK(base)\
96 bfin_read16(base + ATAPI_OFFSET_INT_MASK)
97#define ATAPI_SET_INT_MASK(base, val)\
98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
99#define ATAPI_GET_INT_STATUS(base)\
100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
101#define ATAPI_SET_INT_STATUS(base, val)\
102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
103#define ATAPI_GET_XFER_LEN(base)\
104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
105#define ATAPI_SET_XFER_LEN(base, val)\
106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
107#define ATAPI_GET_LINE_STATUS(base)\
108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
109#define ATAPI_GET_SM_STATE(base)\
110 bfin_read16(base + ATAPI_OFFSET_SM_STATE)
111#define ATAPI_GET_TERMINATE(base)\
112 bfin_read16(base + ATAPI_OFFSET_TERMINATE)
113#define ATAPI_SET_TERMINATE(base, val)\
114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
115#define ATAPI_GET_PIO_TFRCNT(base)\
116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
117#define ATAPI_GET_DMA_TFRCNT(base)\
118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
119#define ATAPI_GET_UMAIN_TFRCNT(base)\
120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
121#define ATAPI_GET_UDMAOUT_TFRCNT(base)\
122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
123#define ATAPI_GET_REG_TIM_0(base)\
124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
125#define ATAPI_SET_REG_TIM_0(base, val)\
126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
127#define ATAPI_GET_PIO_TIM_0(base)\
128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
129#define ATAPI_SET_PIO_TIM_0(base, val)\
130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
131#define ATAPI_GET_PIO_TIM_1(base)\
132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
133#define ATAPI_SET_PIO_TIM_1(base, val)\
134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
135#define ATAPI_GET_MULTI_TIM_0(base)\
136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
137#define ATAPI_SET_MULTI_TIM_0(base, val)\
138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
139#define ATAPI_GET_MULTI_TIM_1(base)\
140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
141#define ATAPI_SET_MULTI_TIM_1(base, val)\
142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
143#define ATAPI_GET_MULTI_TIM_2(base)\
144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
145#define ATAPI_SET_MULTI_TIM_2(base, val)\
146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
147#define ATAPI_GET_ULTRA_TIM_0(base)\
148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
149#define ATAPI_SET_ULTRA_TIM_0(base, val)\
150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
151#define ATAPI_GET_ULTRA_TIM_1(base)\
152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
153#define ATAPI_SET_ULTRA_TIM_1(base, val)\
154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
155#define ATAPI_GET_ULTRA_TIM_2(base)\
156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
157#define ATAPI_SET_ULTRA_TIM_2(base, val)\
158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
159#define ATAPI_GET_ULTRA_TIM_3(base)\
160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
161#define ATAPI_SET_ULTRA_TIM_3(base, val)\
162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
163
164
165
166
167
168static const u32 pio_fsclk[] =
169{ 33333333, 33333333, 33333333, 33333333, 33333333 };
170
171
172
173
174
175static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
176
177
178
179
180
181
182
183
184
185
186static const u32 udma_fsclk[] =
187{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
188
189
190
191
192
193
194static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
195
196static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
197
198static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
199
200
201
202
203
204
205static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
206
207static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
208
209static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
210
211static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
212
213static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
214
215
216
217
218
219
220
221static const u32 mdma_t0min[] = { 480, 150, 120 };
222
223static const u32 mdma_tdmin[] = { 215, 80, 70 };
224
225static const u32 mdma_thmin[] = { 20, 15, 10 };
226
227static const u32 mdma_tjmin[] = { 20, 5, 5 };
228
229static const u32 mdma_tkrmin[] = { 50, 50, 25 };
230
231static const u32 mdma_tkwmin[] = { 215, 50, 25 };
232
233static const u32 mdma_tmmin[] = { 50, 30, 25 };
234
235static const u32 mdma_tzmax[] = { 20, 25, 25 };
236
237
238
239
240
241static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
242static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
243static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
244static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
245static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
246
247
248static const u32 udma_tmlimin = 20;
249static const u32 udma_tzahmin = 20;
250static const u32 udma_tenvmin = 20;
251static const u32 udma_tackmin = 20;
252static const u32 udma_tssmin = 50;
253
254#define BFIN_MAX_SG_SEGMENTS 4
255
256
257
258
259
260
261
262
263static unsigned short num_clocks_min(unsigned long tmin,
264 unsigned long fsclk)
265{
266 unsigned long tmp ;
267 unsigned short result;
268
269 tmp = tmin * (fsclk/1000/1000) / 1000;
270 result = (unsigned short)tmp;
271 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
272 result++;
273 }
274
275 return result;
276}
277
278
279
280
281
282
283
284
285
286
287
288
289static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
290{
291 int mode = adev->pio_mode - XFER_PIO_0;
292 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
293 unsigned int fsclk = get_sclk();
294 unsigned short teoc_reg, t2_reg, teoc_pio;
295 unsigned short t4_reg, t2_pio, t1_reg;
296 unsigned short n0, n6, t6min = 5;
297
298
299
300
301
302 n6 = num_clocks_min(t6min, fsclk);
303 if (mode >= 0 && mode <= 4 && n6 >= 1) {
304 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
305
306 while (mode > 0 && pio_fsclk[mode] > fsclk)
307 mode--;
308
309
310 t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
311
312 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
313
314 n0 = num_clocks_min(reg_t0min[mode], fsclk);
315
316
317 if (t2_reg + teoc_reg < n0)
318 t2_reg = n0 - teoc_reg;
319
320
321
322
323 t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
324
325 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
326
327 n0 = num_clocks_min(pio_t0min[mode], fsclk);
328
329
330 if (t2_pio + teoc_pio < n0)
331 t2_pio = n0 - teoc_pio;
332
333
334 t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
335
336
337 t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
338
339 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
340 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
341 ATAPI_SET_PIO_TIM_1(base, teoc_pio);
342 if (mode > 2) {
343 ATAPI_SET_CONTROL(base,
344 ATAPI_GET_CONTROL(base) | IORDY_EN);
345 } else {
346 ATAPI_SET_CONTROL(base,
347 ATAPI_GET_CONTROL(base) & ~IORDY_EN);
348 }
349
350
351 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
352 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
353 SSYNC();
354 }
355}
356
357
358
359
360
361
362
363
364
365
366
367
368static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
369{
370 int mode;
371 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
372 unsigned long fsclk = get_sclk();
373 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
374 unsigned short tm, td, tkr, tkw, teoc, th;
375 unsigned short n0, nf, tfmin = 5;
376 unsigned short nmin, tcyc;
377
378 mode = adev->dma_mode - XFER_UDMA_0;
379 if (mode >= 0 && mode <= 5) {
380 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
381
382
383
384
385
386 while (mode > 0 && udma_fsclk[mode] > fsclk)
387 mode--;
388
389 nmin = num_clocks_min(udma_tmin[mode], fsclk);
390 if (nmin >= 1) {
391
392 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
393 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
394 tcyc_tdvs = 2;
395
396
397
398
399 if (tdvs + tcyc_tdvs < tcyc)
400 tcyc_tdvs = tcyc - tdvs;
401
402
403
404
405 if (tcyc_tdvs < 2)
406 tcyc_tdvs = 2;
407
408 if (tdvs < 2)
409 tdvs = 2;
410
411 tack = num_clocks_min(udma_tackmin, fsclk);
412 tss = num_clocks_min(udma_tssmin, fsclk);
413 tmli = num_clocks_min(udma_tmlimin, fsclk);
414 tzah = num_clocks_min(udma_tzahmin, fsclk);
415 trp = num_clocks_min(udma_trpmin[mode], fsclk);
416 tenv = num_clocks_min(udma_tenvmin, fsclk);
417 if (tenv <= udma_tenvmax[mode]) {
418 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
419 ATAPI_SET_ULTRA_TIM_1(base,
420 (tcyc_tdvs<<8 | tdvs));
421 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
422 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
423 }
424 }
425 }
426
427 mode = adev->dma_mode - XFER_MW_DMA_0;
428 if (mode >= 0 && mode <= 2) {
429 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
430
431
432
433
434
435 while (mode > 0 && mdma_fsclk[mode] > fsclk)
436 mode--;
437
438 nf = num_clocks_min(tfmin, fsclk);
439 if (nf >= 1) {
440
441
442
443 td = num_clocks_min(mdma_tdmin[mode], fsclk);
444
445
446 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
447
448
449 n0 = num_clocks_min(mdma_t0min[mode], fsclk);
450
451
452 if (tkw + td < n0)
453 tkw = n0 - td;
454
455
456 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
457
458 tm = num_clocks_min(mdma_tmmin[mode], fsclk);
459
460 teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
461
462 th = num_clocks_min(mdma_thmin[mode], fsclk);
463
464 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
465 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
466 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
467 SSYNC();
468 }
469 }
470 return;
471}
472
473
474
475
476
477
478
479
480static inline void wait_complete(void __iomem *base, unsigned short mask)
481{
482 unsigned short status;
483 unsigned int i = 0;
484
485#define PATA_BF54X_WAIT_TIMEOUT 10000
486
487 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
488 status = ATAPI_GET_INT_STATUS(base) & mask;
489 if (status)
490 break;
491 }
492
493 ATAPI_SET_INT_STATUS(base, mask);
494}
495
496
497
498
499
500
501
502
503
504static void write_atapi_register(void __iomem *base,
505 unsigned long ata_reg, unsigned short value)
506{
507
508
509
510 ATAPI_SET_DEV_TXBUF(base, value);
511
512
513
514
515 ATAPI_SET_DEV_ADDR(base, ata_reg);
516
517
518
519 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
520
521
522 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
523
524
525 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
526
527
528
529
530 wait_complete(base, PIO_DONE_INT);
531}
532
533
534
535
536
537
538
539
540
541static unsigned short read_atapi_register(void __iomem *base,
542 unsigned long ata_reg)
543{
544
545
546
547 ATAPI_SET_DEV_ADDR(base, ata_reg);
548
549
550
551 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
552
553
554 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
555
556
557 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
558
559
560
561
562
563 wait_complete(base, PIO_DONE_INT);
564
565
566
567
568 return ATAPI_GET_DEV_RXBUF(base);
569}
570
571
572
573
574
575
576
577
578
579static void write_atapi_data(void __iomem *base,
580 int len, unsigned short *buf)
581{
582 int i;
583
584
585 ATAPI_SET_XFER_LEN(base, 1);
586
587
588
589
590 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
591
592
593
594 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
595
596
597 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
598
599 for (i = 0; i < len; i++) {
600
601
602
603 ATAPI_SET_DEV_TXBUF(base, buf[i]);
604
605
606 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
607
608
609
610
611
612 wait_complete(base, PIO_DONE_INT);
613 }
614}
615
616
617
618
619
620
621
622
623
624static void read_atapi_data(void __iomem *base,
625 int len, unsigned short *buf)
626{
627 int i;
628
629
630 ATAPI_SET_XFER_LEN(base, 1);
631
632
633
634
635 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
636
637
638
639 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
640
641
642 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
643
644 for (i = 0; i < len; i++) {
645
646 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
647
648
649
650
651
652 wait_complete(base, PIO_DONE_INT);
653
654
655
656
657 buf[i] = ATAPI_GET_DEV_RXBUF(base);
658 }
659}
660
661
662
663
664
665
666
667
668
669static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
670{
671 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
672 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
673
674 if (tf->ctl != ap->last_ctl) {
675 write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
676 ap->last_ctl = tf->ctl;
677 ata_wait_idle(ap);
678 }
679
680 if (is_addr) {
681 if (tf->flags & ATA_TFLAG_LBA48) {
682 write_atapi_register(base, ATA_REG_FEATURE,
683 tf->hob_feature);
684 write_atapi_register(base, ATA_REG_NSECT,
685 tf->hob_nsect);
686 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
687 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
688 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
689 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
690 "0x%X 0x%X\n",
691 tf->hob_feature,
692 tf->hob_nsect,
693 tf->hob_lbal,
694 tf->hob_lbam,
695 tf->hob_lbah);
696 }
697
698 write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
699 write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
700 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
701 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
702 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
703 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
704 tf->feature,
705 tf->nsect,
706 tf->lbal,
707 tf->lbam,
708 tf->lbah);
709 }
710
711 if (tf->flags & ATA_TFLAG_DEVICE) {
712 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
713 dev_dbg(ap->dev, "device 0x%X\n", tf->device);
714 }
715
716 ata_wait_idle(ap);
717}
718
719
720
721
722
723
724
725
726static u8 bfin_check_status(struct ata_port *ap)
727{
728 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
729 return read_atapi_register(base, ATA_REG_STATUS);
730}
731
732
733
734
735
736
737
738
739
740static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
741{
742 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
743
744 tf->command = bfin_check_status(ap);
745 tf->feature = read_atapi_register(base, ATA_REG_ERR);
746 tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
747 tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
748 tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
749 tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
750 tf->device = read_atapi_register(base, ATA_REG_DEVICE);
751
752 if (tf->flags & ATA_TFLAG_LBA48) {
753 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
754 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
755 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
756 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
757 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
758 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
759 }
760}
761
762
763
764
765
766
767
768
769
770static void bfin_exec_command(struct ata_port *ap,
771 const struct ata_taskfile *tf)
772{
773 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
774 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
775
776 write_atapi_register(base, ATA_REG_CMD, tf->command);
777 ata_sff_pause(ap);
778}
779
780
781
782
783
784
785static u8 bfin_check_altstatus(struct ata_port *ap)
786{
787 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
788 return read_atapi_register(base, ATA_REG_ALTSTATUS);
789}
790
791
792
793
794
795
796
797
798
799static void bfin_dev_select(struct ata_port *ap, unsigned int device)
800{
801 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
802 u8 tmp;
803
804 if (device == 0)
805 tmp = ATA_DEVICE_OBS;
806 else
807 tmp = ATA_DEVICE_OBS | ATA_DEV1;
808
809 write_atapi_register(base, ATA_REG_DEVICE, tmp);
810 ata_sff_pause(ap);
811}
812
813
814
815
816
817
818
819static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
820{
821 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
822 write_atapi_register(base, ATA_REG_CTRL, ctl);
823}
824
825
826
827
828
829
830
831
832static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
833{
834 struct ata_port *ap = qc->ap;
835 struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
836 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
837 unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
838 struct scatterlist *sg;
839 unsigned int si;
840 unsigned int channel;
841 unsigned int dir;
842 unsigned int size = 0;
843
844 dev_dbg(qc->ap->dev, "in atapi dma setup\n");
845
846 if (qc->tf.flags & ATA_TFLAG_WRITE) {
847 channel = CH_ATAPI_TX;
848 dir = DMA_TO_DEVICE;
849 } else {
850 channel = CH_ATAPI_RX;
851 dir = DMA_FROM_DEVICE;
852 config |= WNR;
853 }
854
855 dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
856
857
858 for_each_sg(qc->sg, sg, qc->n_elem, si) {
859 dma_desc_cpu[si].start_addr = sg_dma_address(sg);
860 dma_desc_cpu[si].cfg = config;
861 dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
862 dma_desc_cpu[si].x_modify = 2;
863 size += sg_dma_len(sg);
864 }
865
866
867 dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
868
869 flush_dcache_range((unsigned int)dma_desc_cpu,
870 (unsigned int)dma_desc_cpu +
871 qc->n_elem * sizeof(struct dma_desc_array));
872
873
874 set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
875 set_dma_x_count(channel, 0);
876 set_dma_x_modify(channel, 0);
877 set_dma_config(channel, config);
878
879 SSYNC();
880
881
882 bfin_exec_command(ap, &qc->tf);
883
884 if (qc->tf.flags & ATA_TFLAG_WRITE) {
885
886 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
887 | XFER_DIR));
888 } else {
889
890 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
891 & ~XFER_DIR));
892 }
893
894
895 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
896
897
898 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
899
900
901 ATAPI_SET_XFER_LEN(base, size >> 1);
902}
903
904
905
906
907
908
909
910
911static void bfin_bmdma_start(struct ata_queued_cmd *qc)
912{
913 struct ata_port *ap = qc->ap;
914 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
915
916 dev_dbg(qc->ap->dev, "in atapi dma start\n");
917
918 if (!(ap->udma_mask || ap->mwdma_mask))
919 return;
920
921
922 if (ap->udma_mask)
923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
924 | ULTRA_START);
925 else
926 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
927 | MULTI_START);
928}
929
930
931
932
933
934
935static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
936{
937 struct ata_port *ap = qc->ap;
938 unsigned int dir;
939
940 dev_dbg(qc->ap->dev, "in atapi dma stop\n");
941
942 if (!(ap->udma_mask || ap->mwdma_mask))
943 return;
944
945
946 if (qc->tf.flags & ATA_TFLAG_WRITE) {
947 dir = DMA_TO_DEVICE;
948 disable_dma(CH_ATAPI_TX);
949 } else {
950 dir = DMA_FROM_DEVICE;
951 disable_dma(CH_ATAPI_RX);
952 }
953
954 dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
955}
956
957
958
959
960
961
962
963
964
965static unsigned int bfin_devchk(struct ata_port *ap,
966 unsigned int device)
967{
968 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
969 u8 nsect, lbal;
970
971 bfin_dev_select(ap, device);
972
973 write_atapi_register(base, ATA_REG_NSECT, 0x55);
974 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
975
976 write_atapi_register(base, ATA_REG_NSECT, 0xaa);
977 write_atapi_register(base, ATA_REG_LBAL, 0x55);
978
979 write_atapi_register(base, ATA_REG_NSECT, 0x55);
980 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
981
982 nsect = read_atapi_register(base, ATA_REG_NSECT);
983 lbal = read_atapi_register(base, ATA_REG_LBAL);
984
985 if ((nsect == 0x55) && (lbal == 0xaa))
986 return 1;
987
988 return 0;
989}
990
991
992
993
994
995
996
997static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
998{
999 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1000 unsigned int dev0 = devmask & (1 << 0);
1001 unsigned int dev1 = devmask & (1 << 1);
1002 unsigned long deadline;
1003
1004
1005
1006
1007 if (dev0)
1008 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1009
1010
1011
1012
1013 deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
1014 while (dev1) {
1015 u8 nsect, lbal;
1016
1017 bfin_dev_select(ap, 1);
1018 nsect = read_atapi_register(base, ATA_REG_NSECT);
1019 lbal = read_atapi_register(base, ATA_REG_LBAL);
1020 if ((nsect == 1) && (lbal == 1))
1021 break;
1022 if (time_after(jiffies, deadline)) {
1023 dev1 = 0;
1024 break;
1025 }
1026 ata_msleep(ap, 50);
1027 }
1028 if (dev1)
1029 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1030
1031
1032 bfin_dev_select(ap, 0);
1033 if (dev1)
1034 bfin_dev_select(ap, 1);
1035 if (dev0)
1036 bfin_dev_select(ap, 0);
1037}
1038
1039
1040
1041
1042
1043
1044
1045static unsigned int bfin_bus_softreset(struct ata_port *ap,
1046 unsigned int devmask)
1047{
1048 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1049
1050
1051 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1052 udelay(20);
1053 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
1054 udelay(20);
1055 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 ata_msleep(ap, 150);
1068
1069
1070
1071
1072
1073 if (bfin_check_status(ap) == 0xFF)
1074 return 0;
1075
1076 bfin_bus_post_reset(ap, devmask);
1077
1078 return 0;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static int bfin_softreset(struct ata_link *link, unsigned int *classes,
1090 unsigned long deadline)
1091{
1092 struct ata_port *ap = link->ap;
1093 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1094 unsigned int devmask = 0, err_mask;
1095 u8 err;
1096
1097
1098 if (bfin_devchk(ap, 0))
1099 devmask |= (1 << 0);
1100 if (slave_possible && bfin_devchk(ap, 1))
1101 devmask |= (1 << 1);
1102
1103
1104 bfin_dev_select(ap, 0);
1105
1106
1107 err_mask = bfin_bus_softreset(ap, devmask);
1108 if (err_mask) {
1109 ata_port_err(ap, "SRST failed (err_mask=0x%x)\n",
1110 err_mask);
1111 return -EIO;
1112 }
1113
1114
1115 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
1116 devmask & (1 << 0), &err);
1117 if (slave_possible && err != 0x81)
1118 classes[1] = ata_sff_dev_classify(&ap->link.device[1],
1119 devmask & (1 << 1), &err);
1120
1121 return 0;
1122}
1123
1124
1125
1126
1127
1128
1129static unsigned char bfin_bmdma_status(struct ata_port *ap)
1130{
1131 unsigned char host_stat = 0;
1132 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1133
1134 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
1135 host_stat |= ATA_DMA_ACTIVE;
1136 if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
1137 host_stat |= ATA_DMA_INTR;
1138
1139 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
1140
1141 return host_stat;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static unsigned int bfin_data_xfer(struct ata_queued_cmd *qc,
1155 unsigned char *buf,
1156 unsigned int buflen, int rw)
1157{
1158 struct ata_port *ap = qc->dev->link->ap;
1159 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1160 unsigned int words = buflen >> 1;
1161 unsigned short *buf16 = (u16 *)buf;
1162
1163
1164 if (rw == READ)
1165 read_atapi_data(base, words, buf16);
1166 else
1167 write_atapi_data(base, words, buf16);
1168
1169
1170 if (unlikely(buflen & 0x01)) {
1171 unsigned short align_buf[1] = { 0 };
1172 unsigned char *trailing_buf = buf + buflen - 1;
1173
1174 if (rw == READ) {
1175 read_atapi_data(base, 1, align_buf);
1176 memcpy(trailing_buf, align_buf, 1);
1177 } else {
1178 memcpy(align_buf, trailing_buf, 1);
1179 write_atapi_data(base, 1, align_buf);
1180 }
1181 words++;
1182 }
1183
1184 return words << 1;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194static void bfin_irq_clear(struct ata_port *ap)
1195{
1196 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1197
1198 dev_dbg(ap->dev, "in atapi irq clear\n");
1199 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1200 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1201 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211void bfin_thaw(struct ata_port *ap)
1212{
1213 dev_dbg(ap->dev, "in atapi dma thaw\n");
1214 bfin_check_status(ap);
1215 ata_sff_irq_on(ap);
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static void bfin_postreset(struct ata_link *link, unsigned int *classes)
1227{
1228 struct ata_port *ap = link->ap;
1229 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1230
1231
1232 ata_sff_irq_on(ap);
1233
1234
1235 if (classes[0] != ATA_DEV_NONE)
1236 bfin_dev_select(ap, 1);
1237 if (classes[1] != ATA_DEV_NONE)
1238 bfin_dev_select(ap, 0);
1239
1240
1241 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
1242 return;
1243 }
1244
1245
1246 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1247}
1248
1249static void bfin_port_stop(struct ata_port *ap)
1250{
1251 dev_dbg(ap->dev, "in atapi port stop\n");
1252 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1253 dma_free_coherent(ap->dev,
1254 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1255 ap->bmdma_prd,
1256 ap->bmdma_prd_dma);
1257
1258 free_dma(CH_ATAPI_RX);
1259 free_dma(CH_ATAPI_TX);
1260 }
1261}
1262
1263static int bfin_port_start(struct ata_port *ap)
1264{
1265 dev_dbg(ap->dev, "in atapi port start\n");
1266 if (!(ap->udma_mask || ap->mwdma_mask))
1267 return 0;
1268
1269 ap->bmdma_prd = dma_alloc_coherent(ap->dev,
1270 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1271 &ap->bmdma_prd_dma,
1272 GFP_KERNEL);
1273
1274 if (ap->bmdma_prd == NULL) {
1275 dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
1276 goto out;
1277 }
1278
1279 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1280 if (request_dma(CH_ATAPI_TX,
1281 "BFIN ATAPI TX DMA") >= 0)
1282 return 0;
1283
1284 free_dma(CH_ATAPI_RX);
1285 dma_free_coherent(ap->dev,
1286 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1287 ap->bmdma_prd,
1288 ap->bmdma_prd_dma);
1289 }
1290
1291out:
1292 ap->udma_mask = 0;
1293 ap->mwdma_mask = 0;
1294 dev_err(ap->dev, "Unable to request ATAPI DMA!"
1295 " Continue in PIO mode.\n");
1296
1297 return 0;
1298}
1299
1300static unsigned int bfin_ata_host_intr(struct ata_port *ap,
1301 struct ata_queued_cmd *qc)
1302{
1303 struct ata_eh_info *ehi = &ap->link.eh_info;
1304 u8 status, host_stat = 0;
1305
1306 VPRINTK("ata%u: protocol %d task_state %d\n",
1307 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1308
1309
1310 switch (ap->hsm_task_state) {
1311 case HSM_ST_FIRST:
1312
1313
1314
1315
1316
1317
1318
1319
1320 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1321 goto idle_irq;
1322 break;
1323 case HSM_ST_LAST:
1324 if (qc->tf.protocol == ATA_PROT_DMA ||
1325 qc->tf.protocol == ATAPI_PROT_DMA) {
1326
1327 host_stat = ap->ops->bmdma_status(ap);
1328 VPRINTK("ata%u: host_stat 0x%X\n",
1329 ap->print_id, host_stat);
1330
1331
1332 if (!(host_stat & ATA_DMA_INTR))
1333 goto idle_irq;
1334
1335
1336 ap->ops->bmdma_stop(qc);
1337
1338 if (unlikely(host_stat & ATA_DMA_ERR)) {
1339
1340 qc->err_mask |= AC_ERR_HOST_BUS;
1341 ap->hsm_task_state = HSM_ST_ERR;
1342 }
1343 }
1344 break;
1345 case HSM_ST:
1346 break;
1347 default:
1348 goto idle_irq;
1349 }
1350
1351
1352 status = ap->ops->sff_check_altstatus(ap);
1353 if (status & ATA_BUSY)
1354 goto busy_ata;
1355
1356
1357 status = ap->ops->sff_check_status(ap);
1358 if (unlikely(status & ATA_BUSY))
1359 goto busy_ata;
1360
1361
1362 ap->ops->sff_irq_clear(ap);
1363
1364 ata_sff_hsm_move(ap, qc, status, 0);
1365
1366 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1367 qc->tf.protocol == ATAPI_PROT_DMA))
1368 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1369
1370busy_ata:
1371 return 1;
1372
1373idle_irq:
1374 ap->stats.idle_irq++;
1375
1376#ifdef ATA_IRQ_TRAP
1377 if ((ap->stats.idle_irq % 1000) == 0) {
1378 ap->ops->irq_ack(ap, 0);
1379 ata_port_warn(ap, "irq trap\n");
1380 return 1;
1381 }
1382#endif
1383 return 0;
1384}
1385
1386static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1387{
1388 struct ata_host *host = dev_instance;
1389 unsigned int i;
1390 unsigned int handled = 0;
1391 unsigned long flags;
1392
1393
1394 spin_lock_irqsave(&host->lock, flags);
1395
1396 for (i = 0; i < host->n_ports; i++) {
1397 struct ata_port *ap = host->ports[i];
1398 struct ata_queued_cmd *qc;
1399
1400 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1401 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1402 handled |= bfin_ata_host_intr(ap, qc);
1403 }
1404
1405 spin_unlock_irqrestore(&host->lock, flags);
1406
1407 return IRQ_RETVAL(handled);
1408}
1409
1410
1411static struct scsi_host_template bfin_sht = {
1412 ATA_BASE_SHT(DRV_NAME),
1413 .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
1414 .dma_boundary = ATA_DMA_BOUNDARY,
1415};
1416
1417static struct ata_port_operations bfin_pata_ops = {
1418 .inherits = &ata_bmdma_port_ops,
1419
1420 .set_piomode = bfin_set_piomode,
1421 .set_dmamode = bfin_set_dmamode,
1422
1423 .sff_tf_load = bfin_tf_load,
1424 .sff_tf_read = bfin_tf_read,
1425 .sff_exec_command = bfin_exec_command,
1426 .sff_check_status = bfin_check_status,
1427 .sff_check_altstatus = bfin_check_altstatus,
1428 .sff_dev_select = bfin_dev_select,
1429 .sff_set_devctl = bfin_set_devctl,
1430
1431 .bmdma_setup = bfin_bmdma_setup,
1432 .bmdma_start = bfin_bmdma_start,
1433 .bmdma_stop = bfin_bmdma_stop,
1434 .bmdma_status = bfin_bmdma_status,
1435 .sff_data_xfer = bfin_data_xfer,
1436
1437 .qc_prep = ata_noop_qc_prep,
1438
1439 .thaw = bfin_thaw,
1440 .softreset = bfin_softreset,
1441 .postreset = bfin_postreset,
1442
1443 .sff_irq_clear = bfin_irq_clear,
1444
1445 .port_start = bfin_port_start,
1446 .port_stop = bfin_port_stop,
1447};
1448
1449static struct ata_port_info bfin_port_info[] = {
1450 {
1451 .flags = ATA_FLAG_SLAVE_POSS,
1452 .pio_mask = ATA_PIO4,
1453 .mwdma_mask = 0,
1454 .udma_mask = 0,
1455 .port_ops = &bfin_pata_ops,
1456 },
1457};
1458
1459
1460
1461
1462
1463static int bfin_reset_controller(struct ata_host *host)
1464{
1465 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
1466 int count;
1467 unsigned short status;
1468
1469
1470 ATAPI_SET_INT_MASK(base, 0);
1471 SSYNC();
1472
1473
1474 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
1475 udelay(30);
1476
1477
1478 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
1479 msleep(2);
1480
1481
1482 count = 10000000;
1483 do {
1484 status = read_atapi_register(base, ATA_REG_STATUS);
1485 } while (--count && (status & ATA_BUSY));
1486
1487
1488 ATAPI_SET_INT_MASK(base, 1);
1489 SSYNC();
1490
1491 return (!count);
1492}
1493
1494
1495
1496
1497static unsigned short atapi_io_port[] = {
1498 P_ATAPI_RESET,
1499 P_ATAPI_DIOR,
1500 P_ATAPI_DIOW,
1501 P_ATAPI_CS0,
1502 P_ATAPI_CS1,
1503 P_ATAPI_DMACK,
1504 P_ATAPI_DMARQ,
1505 P_ATAPI_INTRQ,
1506 P_ATAPI_IORDY,
1507 P_ATAPI_D0A,
1508 P_ATAPI_D1A,
1509 P_ATAPI_D2A,
1510 P_ATAPI_D3A,
1511 P_ATAPI_D4A,
1512 P_ATAPI_D5A,
1513 P_ATAPI_D6A,
1514 P_ATAPI_D7A,
1515 P_ATAPI_D8A,
1516 P_ATAPI_D9A,
1517 P_ATAPI_D10A,
1518 P_ATAPI_D11A,
1519 P_ATAPI_D12A,
1520 P_ATAPI_D13A,
1521 P_ATAPI_D14A,
1522 P_ATAPI_D15A,
1523 P_ATAPI_A0A,
1524 P_ATAPI_A1A,
1525 P_ATAPI_A2A,
1526 0
1527};
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542static int bfin_atapi_probe(struct platform_device *pdev)
1543{
1544 int board_idx = 0;
1545 struct resource *res;
1546 struct ata_host *host;
1547 unsigned int fsclk = get_sclk();
1548 int udma_mode = 5;
1549 const struct ata_port_info *ppi[] =
1550 { &bfin_port_info[board_idx], NULL };
1551
1552
1553
1554
1555 if (unlikely(pdev->num_resources != 2)) {
1556 dev_err(&pdev->dev, "invalid number of resources\n");
1557 return -EINVAL;
1558 }
1559
1560
1561
1562
1563 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1564 if (res == NULL)
1565 return -EINVAL;
1566
1567 while (bfin_port_info[board_idx].udma_mask > 0 &&
1568 udma_fsclk[udma_mode] > fsclk) {
1569 udma_mode--;
1570 bfin_port_info[board_idx].udma_mask >>= 1;
1571 }
1572
1573
1574
1575
1576 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1577 if (!host)
1578 return -ENOMEM;
1579
1580 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1581
1582 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1583 dev_err(&pdev->dev, "Requesting Peripherals failed\n");
1584 return -EFAULT;
1585 }
1586
1587 if (bfin_reset_controller(host)) {
1588 peripheral_free_list(atapi_io_port);
1589 dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
1590 return -EFAULT;
1591 }
1592
1593 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1594 bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1595 peripheral_free_list(atapi_io_port);
1596 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1597 return -ENODEV;
1598 }
1599
1600 return 0;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610static int bfin_atapi_remove(struct platform_device *pdev)
1611{
1612 struct ata_host *host = platform_get_drvdata(pdev);
1613
1614 ata_host_detach(host);
1615
1616 peripheral_free_list(atapi_io_port);
1617
1618 return 0;
1619}
1620
1621#ifdef CONFIG_PM_SLEEP
1622static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1623{
1624 struct ata_host *host = platform_get_drvdata(pdev);
1625 if (host)
1626 return ata_host_suspend(host, state);
1627 else
1628 return 0;
1629}
1630
1631static int bfin_atapi_resume(struct platform_device *pdev)
1632{
1633 struct ata_host *host = platform_get_drvdata(pdev);
1634 int ret;
1635
1636 if (host) {
1637 ret = bfin_reset_controller(host);
1638 if (ret) {
1639 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
1640 return ret;
1641 }
1642 ata_host_resume(host);
1643 }
1644
1645 return 0;
1646}
1647#else
1648#define bfin_atapi_suspend NULL
1649#define bfin_atapi_resume NULL
1650#endif
1651
1652static struct platform_driver bfin_atapi_driver = {
1653 .probe = bfin_atapi_probe,
1654 .remove = bfin_atapi_remove,
1655 .suspend = bfin_atapi_suspend,
1656 .resume = bfin_atapi_resume,
1657 .driver = {
1658 .name = DRV_NAME,
1659 },
1660};
1661
1662#define ATAPI_MODE_SIZE 10
1663static char bfin_atapi_mode[ATAPI_MODE_SIZE];
1664
1665static int __init bfin_atapi_init(void)
1666{
1667 pr_info("register bfin atapi driver\n");
1668
1669 switch(bfin_atapi_mode[0]) {
1670 case 'p':
1671 case 'P':
1672 break;
1673 case 'm':
1674 case 'M':
1675 bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
1676 break;
1677 default:
1678 bfin_port_info[0].udma_mask = ATA_UDMA5;
1679 };
1680
1681 return platform_driver_register(&bfin_atapi_driver);
1682}
1683
1684static void __exit bfin_atapi_exit(void)
1685{
1686 platform_driver_unregister(&bfin_atapi_driver);
1687}
1688
1689module_init(bfin_atapi_init);
1690module_exit(bfin_atapi_exit);
1691
1692
1693
1694
1695
1696
1697module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
1698
1699MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
1700MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
1701MODULE_LICENSE("GPL");
1702MODULE_VERSION(DRV_VERSION);
1703MODULE_ALIAS("platform:" DRV_NAME);
1704