1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/libata.h>
15#include <linux/irq.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18#include <linux/workqueue.h>
19#include <scsi/scsi_host.h>
20
21#include <asm/octeon/octeon.h>
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#define DRV_NAME "pata_octeon_cf"
37#define DRV_VERSION "2.1"
38
39
40struct octeon_cf_port {
41 struct workqueue_struct *wq;
42 struct delayed_work delayed_finish;
43 struct ata_port *ap;
44 int dma_finished;
45};
46
47static struct scsi_host_template octeon_cf_sht = {
48 ATA_PIO_SHT(DRV_NAME),
49};
50
51
52
53
54
55static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
56{
57 unsigned int val;
58
59
60
61
62
63 val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
64 1000 * tim_mult);
65
66 return val;
67}
68
69static void octeon_cf_set_boot_reg_cfg(int cs)
70{
71 union cvmx_mio_boot_reg_cfgx reg_cfg;
72 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
73 reg_cfg.s.dmack = 0;
74 reg_cfg.s.tim_mult = 2;
75 reg_cfg.s.rd_dly = 0;
76 reg_cfg.s.sam = 0;
77 reg_cfg.s.we_ext = 0;
78 reg_cfg.s.oe_ext = 0;
79 reg_cfg.s.en = 1;
80 reg_cfg.s.orbit = 0;
81 reg_cfg.s.ale = 0;
82 cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
83}
84
85
86
87
88
89
90
91
92
93static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
94{
95 struct octeon_cf_data *ocd = ap->dev->platform_data;
96 union cvmx_mio_boot_reg_timx reg_tim;
97 int cs = ocd->base_region;
98 int T;
99 struct ata_timing timing;
100
101 int use_iordy;
102 int trh;
103 int pause;
104
105 int t1;
106 int t2;
107 int t2i;
108
109 T = (int)(2000000000000LL / octeon_get_clock_rate());
110
111 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
112 BUG();
113
114 t1 = timing.setup;
115 if (t1)
116 t1--;
117 t2 = timing.active;
118 if (t2)
119 t2--;
120 t2i = timing.act8b;
121 if (t2i)
122 t2i--;
123
124 trh = ns_to_tim_reg(2, 20);
125 if (trh)
126 trh--;
127
128 pause = timing.cycle - timing.active - timing.setup - trh;
129 if (pause)
130 pause--;
131
132 octeon_cf_set_boot_reg_cfg(cs);
133 if (ocd->dma_engine >= 0)
134
135 octeon_cf_set_boot_reg_cfg(cs + 1);
136
137
138 use_iordy = ata_pio_need_iordy(dev);
139
140 reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
141
142 reg_tim.s.pagem = 0;
143
144 reg_tim.s.waitm = use_iordy;
145
146 reg_tim.s.pages = 0;
147
148 reg_tim.s.ale = 0;
149
150 reg_tim.s.page = 0;
151
152 reg_tim.s.wait = 0;
153
154 reg_tim.s.pause = pause;
155
156 reg_tim.s.wr_hld = trh;
157
158 reg_tim.s.rd_hld = trh;
159
160 reg_tim.s.we = t2;
161
162 reg_tim.s.oe = t2;
163
164 reg_tim.s.ce = ns_to_tim_reg(2, 5);
165
166 reg_tim.s.adr = 0;
167
168
169 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
170 if (ocd->dma_engine >= 0)
171
172 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
173}
174
175static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
176{
177 struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
178 union cvmx_mio_boot_dma_timx dma_tim;
179 unsigned int oe_a;
180 unsigned int oe_n;
181 unsigned int dma_ackh;
182 unsigned int dma_arq;
183 unsigned int pause;
184 unsigned int T0, Tkr, Td;
185 unsigned int tim_mult;
186
187 const struct ata_timing *timing;
188
189 timing = ata_timing_find_mode(dev->dma_mode);
190 T0 = timing->cycle;
191 Td = timing->active;
192 Tkr = timing->recover;
193 dma_ackh = timing->dmack_hold;
194
195 dma_tim.u64 = 0;
196
197 tim_mult = 4;
198
199
200 dma_arq = 8;
201 pause = 25 - dma_arq * 1000 /
202 (octeon_get_clock_rate() / 1000000);
203
204 oe_a = Td;
205
206 oe_n = max(T0 - oe_a, Tkr);
207
208 dma_tim.s.dmack_pi = 1;
209
210 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
211 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
212
213
214
215
216
217 dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
218 dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
219
220 dma_tim.s.dmarq = dma_arq;
221 dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
222
223 dma_tim.s.rd_dly = 0;
224
225
226 dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
227 dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
228
229 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
230 ns_to_tim_reg(tim_mult, 60));
231 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
232 "%d, dmarq: %d, pause: %d\n",
233 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
234 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
235
236 cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
237 dma_tim.u64);
238
239}
240
241
242
243
244
245
246
247
248
249static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
250 unsigned char *buffer,
251 unsigned int buflen,
252 int rw)
253{
254 struct ata_port *ap = dev->link->ap;
255 void __iomem *data_addr = ap->ioaddr.data_addr;
256 unsigned long words;
257 int count;
258
259 words = buflen;
260 if (rw) {
261 count = 16;
262 while (words--) {
263 iowrite8(*buffer, data_addr);
264 buffer++;
265
266
267
268
269 if (--count == 0) {
270 ioread8(ap->ioaddr.altstatus_addr);
271 count = 16;
272 }
273 }
274 } else {
275 ioread8_rep(data_addr, buffer, words);
276 }
277 return buflen;
278}
279
280
281
282
283
284
285
286
287
288static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
289 unsigned char *buffer,
290 unsigned int buflen,
291 int rw)
292{
293 struct ata_port *ap = dev->link->ap;
294 void __iomem *data_addr = ap->ioaddr.data_addr;
295 unsigned long words;
296 int count;
297
298 words = buflen / 2;
299 if (rw) {
300 count = 16;
301 while (words--) {
302 iowrite16(*(uint16_t *)buffer, data_addr);
303 buffer += sizeof(uint16_t);
304
305
306
307
308 if (--count == 0) {
309 ioread8(ap->ioaddr.altstatus_addr);
310 count = 16;
311 }
312 }
313 } else {
314 while (words--) {
315 *(uint16_t *)buffer = ioread16(data_addr);
316 buffer += sizeof(uint16_t);
317 }
318 }
319
320 if (unlikely(buflen & 0x01)) {
321 __le16 align_buf[1] = { 0 };
322
323 if (rw == READ) {
324 align_buf[0] = cpu_to_le16(ioread16(data_addr));
325 memcpy(buffer, align_buf, 1);
326 } else {
327 memcpy(align_buf, buffer, 1);
328 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
329 }
330 words++;
331 }
332 return buflen;
333}
334
335
336
337
338static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
339{
340 u16 blob;
341
342 void __iomem *base = ap->ioaddr.data_addr;
343
344 blob = __raw_readw(base + 0xc);
345 tf->feature = blob >> 8;
346
347 blob = __raw_readw(base + 2);
348 tf->nsect = blob & 0xff;
349 tf->lbal = blob >> 8;
350
351 blob = __raw_readw(base + 4);
352 tf->lbam = blob & 0xff;
353 tf->lbah = blob >> 8;
354
355 blob = __raw_readw(base + 6);
356 tf->device = blob & 0xff;
357 tf->command = blob >> 8;
358
359 if (tf->flags & ATA_TFLAG_LBA48) {
360 if (likely(ap->ioaddr.ctl_addr)) {
361 iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
362
363 blob = __raw_readw(base + 0xc);
364 tf->hob_feature = blob >> 8;
365
366 blob = __raw_readw(base + 2);
367 tf->hob_nsect = blob & 0xff;
368 tf->hob_lbal = blob >> 8;
369
370 blob = __raw_readw(base + 4);
371 tf->hob_lbam = blob & 0xff;
372 tf->hob_lbah = blob >> 8;
373
374 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
375 ap->last_ctl = tf->ctl;
376 } else {
377 WARN_ON(1);
378 }
379 }
380}
381
382static u8 octeon_cf_check_status16(struct ata_port *ap)
383{
384 u16 blob;
385 void __iomem *base = ap->ioaddr.data_addr;
386
387 blob = __raw_readw(base + 6);
388 return blob >> 8;
389}
390
391static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
392 unsigned long deadline)
393{
394 struct ata_port *ap = link->ap;
395 void __iomem *base = ap->ioaddr.data_addr;
396 int rc;
397 u8 err;
398
399 DPRINTK("about to softreset\n");
400 __raw_writew(ap->ctl, base + 0xe);
401 udelay(20);
402 __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
403 udelay(20);
404 __raw_writew(ap->ctl, base + 0xe);
405
406 rc = ata_sff_wait_after_reset(link, 1, deadline);
407 if (rc) {
408 ata_link_err(link, "SRST failed (errno=%d)\n", rc);
409 return rc;
410 }
411
412
413 classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
414 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
415 return 0;
416}
417
418
419
420
421
422static void octeon_cf_tf_load16(struct ata_port *ap,
423 const struct ata_taskfile *tf)
424{
425 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
426
427 void __iomem *base = ap->ioaddr.data_addr;
428
429 if (tf->ctl != ap->last_ctl) {
430 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
431 ap->last_ctl = tf->ctl;
432 ata_wait_idle(ap);
433 }
434 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
435 __raw_writew(tf->hob_feature << 8, base + 0xc);
436 __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
437 __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
438 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
439 tf->hob_feature,
440 tf->hob_nsect,
441 tf->hob_lbal,
442 tf->hob_lbam,
443 tf->hob_lbah);
444 }
445 if (is_addr) {
446 __raw_writew(tf->feature << 8, base + 0xc);
447 __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
448 __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
449 VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
450 tf->feature,
451 tf->nsect,
452 tf->lbal,
453 tf->lbam,
454 tf->lbah);
455 }
456 ata_wait_idle(ap);
457}
458
459
460static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
461{
462
463 return;
464}
465
466
467
468
469
470static void octeon_cf_exec_command16(struct ata_port *ap,
471 const struct ata_taskfile *tf)
472{
473
474 void __iomem *base = ap->ioaddr.data_addr;
475 u16 blob;
476
477 if (tf->flags & ATA_TFLAG_DEVICE) {
478 VPRINTK("device 0x%X\n", tf->device);
479 blob = tf->device;
480 } else {
481 blob = 0;
482 }
483
484 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
485 blob |= (tf->command << 8);
486 __raw_writew(blob, base + 6);
487
488
489 ata_wait_idle(ap);
490}
491
492static void octeon_cf_irq_on(struct ata_port *ap)
493{
494}
495
496static void octeon_cf_irq_clear(struct ata_port *ap)
497{
498 return;
499}
500
501static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
502{
503 struct ata_port *ap = qc->ap;
504 struct octeon_cf_port *cf_port;
505
506 cf_port = ap->private_data;
507 DPRINTK("ENTER\n");
508
509 qc->cursg = qc->sg;
510 cf_port->dma_finished = 0;
511 ap->ops->sff_exec_command(ap, &qc->tf);
512 DPRINTK("EXIT\n");
513}
514
515
516
517
518
519
520static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
521{
522 struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
524 union cvmx_mio_boot_dma_intx mio_boot_dma_int;
525 struct scatterlist *sg;
526
527 VPRINTK("%d scatterlists\n", qc->n_elem);
528
529
530 sg = qc->cursg;
531 BUG_ON(!sg);
532
533
534
535
536 mio_boot_dma_int.u64 = 0;
537 mio_boot_dma_int.s.done = 1;
538 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
539 mio_boot_dma_int.u64);
540
541
542 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
543 mio_boot_dma_int.u64);
544
545
546 mio_boot_dma_cfg.u64 = 0;
547 mio_boot_dma_cfg.s.en = 1;
548 mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
549
550
551
552
553
554
555
556
557
558 mio_boot_dma_cfg.s.clr = 0;
559
560
561 mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
562
563
564 mio_boot_dma_cfg.s.swap8 = 1;
565
566 mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
567
568 VPRINTK("%s %d bytes address=%p\n",
569 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
570 (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
571
572 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
573 mio_boot_dma_cfg.u64);
574}
575
576
577
578
579
580
581
582static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
583 struct ata_queued_cmd *qc)
584{
585 struct ata_eh_info *ehi = &ap->link.eh_info;
586 struct octeon_cf_data *ocd = ap->dev->platform_data;
587 union cvmx_mio_boot_dma_cfgx dma_cfg;
588 union cvmx_mio_boot_dma_intx dma_int;
589 struct octeon_cf_port *cf_port;
590 u8 status;
591
592 VPRINTK("ata%u: protocol %d task_state %d\n",
593 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
594
595
596 if (ap->hsm_task_state != HSM_ST_LAST)
597 return 0;
598
599 cf_port = ap->private_data;
600
601 dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
602 if (dma_cfg.s.size != 0xfffff) {
603
604 qc->err_mask |= AC_ERR_HOST_BUS;
605 ap->hsm_task_state = HSM_ST_ERR;
606 }
607
608
609 dma_cfg.u64 = 0;
610 dma_cfg.s.size = -1;
611 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
612
613
614 dma_int.u64 = 0;
615 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
616
617
618 dma_int.s.done = 1;
619 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
620
621 status = ap->ops->sff_check_status(ap);
622
623 ata_sff_hsm_move(ap, qc, status, 0);
624
625 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
626 ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
627
628 return 1;
629}
630
631
632
633
634
635static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
636{
637 struct ata_host *host = dev_instance;
638 struct octeon_cf_port *cf_port;
639 int i;
640 unsigned int handled = 0;
641 unsigned long flags;
642
643 spin_lock_irqsave(&host->lock, flags);
644
645 DPRINTK("ENTER\n");
646 for (i = 0; i < host->n_ports; i++) {
647 u8 status;
648 struct ata_port *ap;
649 struct ata_queued_cmd *qc;
650 union cvmx_mio_boot_dma_intx dma_int;
651 union cvmx_mio_boot_dma_cfgx dma_cfg;
652 struct octeon_cf_data *ocd;
653
654 ap = host->ports[i];
655 ocd = ap->dev->platform_data;
656 cf_port = ap->private_data;
657 dma_int.u64 =
658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
659 dma_cfg.u64 =
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
661
662 qc = ata_qc_from_tag(ap, ap->link.active_tag);
663
664 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
665 if (dma_int.s.done && !dma_cfg.s.en) {
666 if (!sg_is_last(qc->cursg)) {
667 qc->cursg = sg_next(qc->cursg);
668 handled = 1;
669 octeon_cf_dma_start(qc);
670 continue;
671 } else {
672 cf_port->dma_finished = 1;
673 }
674 }
675 if (!cf_port->dma_finished)
676 continue;
677 status = ioread8(ap->ioaddr.altstatus_addr);
678 if (status & (ATA_BUSY | ATA_DRQ)) {
679
680
681
682
683
684
685
686
687 dma_int.u64 = 0;
688 dma_int.s.done = 1;
689 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
690 dma_int.u64);
691
692 queue_delayed_work(cf_port->wq,
693 &cf_port->delayed_finish, 1);
694 handled = 1;
695 } else {
696 handled |= octeon_cf_dma_finished(ap, qc);
697 }
698 }
699 }
700 spin_unlock_irqrestore(&host->lock, flags);
701 DPRINTK("EXIT\n");
702 return IRQ_RETVAL(handled);
703}
704
705static void octeon_cf_delayed_finish(struct work_struct *work)
706{
707 struct octeon_cf_port *cf_port = container_of(work,
708 struct octeon_cf_port,
709 delayed_finish.work);
710 struct ata_port *ap = cf_port->ap;
711 struct ata_host *host = ap->host;
712 struct ata_queued_cmd *qc;
713 unsigned long flags;
714 u8 status;
715
716 spin_lock_irqsave(&host->lock, flags);
717
718
719
720
721
722
723 if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
724 goto out;
725
726 status = ioread8(ap->ioaddr.altstatus_addr);
727 if (status & (ATA_BUSY | ATA_DRQ)) {
728
729 queue_delayed_work(cf_port->wq,
730 &cf_port->delayed_finish, 1);
731 goto out;
732 }
733 qc = ata_qc_from_tag(ap, ap->link.active_tag);
734 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
735 octeon_cf_dma_finished(ap, qc);
736out:
737 spin_unlock_irqrestore(&host->lock, flags);
738}
739
740static void octeon_cf_dev_config(struct ata_device *dev)
741{
742
743
744
745
746
747 dev->max_sectors = min(dev->max_sectors, 4095U);
748}
749
750
751
752
753static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
754{
755 return 0;
756}
757
758static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
759{
760 struct ata_port *ap = qc->ap;
761
762 switch (qc->tf.protocol) {
763 case ATA_PROT_DMA:
764 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
765
766 ap->ops->sff_tf_load(ap, &qc->tf);
767 octeon_cf_dma_setup(qc);
768 octeon_cf_dma_start(qc);
769 ap->hsm_task_state = HSM_ST_LAST;
770 break;
771
772 case ATAPI_PROT_DMA:
773 dev_err(ap->dev, "Error, ATAPI not supported\n");
774 BUG();
775
776 default:
777 return ata_sff_qc_issue(qc);
778 }
779
780 return 0;
781}
782
783static struct ata_port_operations octeon_cf_ops = {
784 .inherits = &ata_sff_port_ops,
785 .check_atapi_dma = octeon_cf_check_atapi_dma,
786 .qc_prep = ata_noop_qc_prep,
787 .qc_issue = octeon_cf_qc_issue,
788 .sff_dev_select = octeon_cf_dev_select,
789 .sff_irq_on = octeon_cf_irq_on,
790 .sff_irq_clear = octeon_cf_irq_clear,
791 .cable_detect = ata_cable_40wire,
792 .set_piomode = octeon_cf_set_piomode,
793 .set_dmamode = octeon_cf_set_dmamode,
794 .dev_config = octeon_cf_dev_config,
795};
796
797static int __devinit octeon_cf_probe(struct platform_device *pdev)
798{
799 struct resource *res_cs0, *res_cs1;
800
801 void __iomem *cs0;
802 void __iomem *cs1 = NULL;
803 struct ata_host *host;
804 struct ata_port *ap;
805 struct octeon_cf_data *ocd;
806 int irq = 0;
807 irq_handler_t irq_handler = NULL;
808 void __iomem *base;
809 struct octeon_cf_port *cf_port;
810 char version[32];
811
812 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
813
814 if (!res_cs0)
815 return -EINVAL;
816
817 ocd = pdev->dev.platform_data;
818
819 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
820 resource_size(res_cs0));
821
822 if (!cs0)
823 return -ENOMEM;
824
825
826 if (ocd->dma_engine >= 0) {
827 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
828 if (!res_cs1)
829 return -EINVAL;
830
831 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
832 resource_size(res_cs1));
833
834 if (!cs1)
835 return -ENOMEM;
836 }
837
838 cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
839 if (!cf_port)
840 return -ENOMEM;
841
842
843 host = ata_host_alloc(&pdev->dev, 1);
844 if (!host)
845 goto free_cf_port;
846
847 ap = host->ports[0];
848 ap->private_data = cf_port;
849 cf_port->ap = ap;
850 ap->ops = &octeon_cf_ops;
851 ap->pio_mask = ATA_PIO6;
852 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
853
854 base = cs0 + ocd->base_region_bias;
855 if (!ocd->is16bit) {
856 ap->ioaddr.cmd_addr = base;
857 ata_sff_std_ports(&ap->ioaddr);
858
859 ap->ioaddr.altstatus_addr = base + 0xe;
860 ap->ioaddr.ctl_addr = base + 0xe;
861 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
862 } else if (cs1) {
863
864 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
865 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
866 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
867 ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
868 ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
869 ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
870 ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
871 ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
872 ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
873 ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
874 ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
875 ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
876 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
877 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
878
879 ap->mwdma_mask = ATA_MWDMA4;
880 irq = platform_get_irq(pdev, 0);
881 irq_handler = octeon_cf_interrupt;
882
883
884 cf_port->wq = create_singlethread_workqueue(DRV_NAME);
885 if (!cf_port->wq)
886 goto free_cf_port;
887 INIT_DELAYED_WORK(&cf_port->delayed_finish,
888 octeon_cf_delayed_finish);
889
890 } else {
891
892 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
893 octeon_cf_ops.softreset = octeon_cf_softreset16;
894 octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
895 octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
896 octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
897 octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
898
899 ap->ioaddr.data_addr = base + ATA_REG_DATA;
900 ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
901 ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
902 ap->ioaddr.ctl_addr = base + 0xe;
903 ap->ioaddr.altstatus_addr = base + 0xe;
904 }
905
906 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
907
908
909 snprintf(version, sizeof(version), "%s %d bit%s",
910 DRV_VERSION,
911 (ocd->is16bit) ? 16 : 8,
912 (cs1) ? ", True IDE" : "");
913 ata_print_version_once(&pdev->dev, version);
914
915 return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
916
917free_cf_port:
918 kfree(cf_port);
919 return -ENOMEM;
920}
921
922static struct platform_driver octeon_cf_driver = {
923 .probe = octeon_cf_probe,
924 .driver = {
925 .name = DRV_NAME,
926 .owner = THIS_MODULE,
927 },
928};
929
930static int __init octeon_cf_init(void)
931{
932 return platform_driver_register(&octeon_cf_driver);
933}
934
935
936MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
937MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
938MODULE_LICENSE("GPL");
939MODULE_VERSION(DRV_VERSION);
940MODULE_ALIAS("platform:" DRV_NAME);
941
942module_init(octeon_cf_init);
943