1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/gfp.h>
26#include <linux/pci.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "sata_nv"
36#define DRV_VERSION "3.5"
37
38#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
39
40enum {
41 NV_MMIO_BAR = 5,
42
43 NV_PORTS = 2,
44 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
47 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
49
50
51 NV_INT_STATUS = 0x10,
52 NV_INT_ENABLE = 0x11,
53 NV_INT_STATUS_CK804 = 0x440,
54 NV_INT_ENABLE_CK804 = 0x441,
55
56
57 NV_INT_DEV = 0x01,
58 NV_INT_PM = 0x02,
59 NV_INT_ADDED = 0x04,
60 NV_INT_REMOVED = 0x08,
61
62 NV_INT_PORT_SHIFT = 4,
63
64 NV_INT_ALL = 0x0f,
65 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
67
68
69 NV_INT_CONFIG = 0x12,
70 NV_INT_CONFIG_METHD = 0x01,
71
72
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
79
80 NV_ADMA_MAX_CPBS = 32,
81 NV_ADMA_CPB_SZ = 128,
82 NV_ADMA_APRD_SZ = 16,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
84 NV_ADMA_APRD_SZ,
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89
90
91 NV_ADMA_GEN = 0x400,
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
94
95
96 NV_ADMA_PORT = 0x480,
97
98
99 NV_ADMA_PORT_SIZE = 0x100,
100
101
102 NV_ADMA_CTL = 0x40,
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
105 NV_ADMA_STAT = 0x44,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
111
112
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
119
120
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
125
126
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
132
133
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
137
138
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
150 NV_ADMA_STAT_TIMEOUT,
151
152
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
155
156
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
161
162
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16,
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
166
167
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
170
171
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
176
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
181
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
184
185};
186
187
188struct nv_adma_prd {
189 __le64 addr;
190 __le32 len;
191 u8 flags;
192 u8 packet_len;
193 __le16 reserved;
194};
195
196enum nv_adma_regbits {
197 CMDEND = (1 << 15),
198 WNB = (1 << 14),
199 IGN = (1 << 13),
200 CS1n = (1 << (4 + 8)),
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
204};
205
206
207
208
209
210struct nv_adma_cpb {
211 u8 resp_flags;
212 u8 reserved1;
213 u8 ctl_flags;
214
215 u8 len;
216 u8 tag;
217 u8 next_cpb_idx;
218 __le16 reserved2;
219 __le16 tf[12];
220 struct nv_adma_prd aprd[5];
221 __le64 next_aprd;
222 __le64 reserved3;
223};
224
225
226struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
228 dma_addr_t cpb_dma;
229 struct nv_adma_prd *aprd;
230 dma_addr_t aprd_dma;
231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
234 u64 adma_dma_mask;
235 u8 flags;
236 int last_issue_ncq;
237};
238
239struct nv_host_priv {
240 unsigned long type;
241};
242
243struct defer_queue {
244 u32 defer_bits;
245 unsigned int head;
246 unsigned int tail;
247 unsigned int tag[ATA_MAX_QUEUE];
248};
249
250enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
255};
256
257struct nv_swncq_port_priv {
258 struct ata_bmdma_prd *prd;
259 dma_addr_t prd_dma;
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
263 u32 qc_active;
264
265 unsigned int last_issue_tag;
266
267
268 struct defer_queue defer_queue;
269
270
271 u32 dhfis_bits;
272 u32 dmafis_bits;
273 u32 sdbfis_bits;
274
275 unsigned int ncq_flags;
276};
277
278
279#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
280
281static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282#ifdef CONFIG_PM_SLEEP
283static int nv_pci_device_resume(struct pci_dev *pdev);
284#endif
285static void nv_ck804_host_stop(struct ata_host *host);
286static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
291
292static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
294static void nv_nf2_freeze(struct ata_port *ap);
295static void nv_nf2_thaw(struct ata_port *ap);
296static void nv_ck804_freeze(struct ata_port *ap);
297static void nv_ck804_thaw(struct ata_port *ap);
298static int nv_adma_slave_config(struct scsi_device *sdev);
299static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
301static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303static void nv_adma_irq_clear(struct ata_port *ap);
304static int nv_adma_port_start(struct ata_port *ap);
305static void nv_adma_port_stop(struct ata_port *ap);
306#ifdef CONFIG_PM
307static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308static int nv_adma_port_resume(struct ata_port *ap);
309#endif
310static void nv_adma_freeze(struct ata_port *ap);
311static void nv_adma_thaw(struct ata_port *ap);
312static void nv_adma_error_handler(struct ata_port *ap);
313static void nv_adma_host_stop(struct ata_host *host);
314static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
316
317static void nv_mcp55_thaw(struct ata_port *ap);
318static void nv_mcp55_freeze(struct ata_port *ap);
319static void nv_swncq_error_handler(struct ata_port *ap);
320static int nv_swncq_slave_config(struct scsi_device *sdev);
321static int nv_swncq_port_start(struct ata_port *ap);
322static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327#ifdef CONFIG_PM
328static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329static int nv_swncq_port_resume(struct ata_port *ap);
330#endif
331
332enum nv_host_type
333{
334 GENERIC,
335 NFORCE2,
336 NFORCE3 = NFORCE2,
337 CK804,
338 ADMA,
339 MCP5x,
340 SWNCQ,
341};
342
343static const struct pci_device_id nv_pci_tbl[] = {
344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
358
359 { }
360};
361
362static struct pci_driver nv_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
366#ifdef CONFIG_PM_SLEEP
367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
369#endif
370 .remove = ata_pci_remove_one,
371};
372
373static struct scsi_host_template nv_sht = {
374 ATA_BMDMA_SHT(DRV_NAME),
375};
376
377static struct scsi_host_template nv_adma_sht = {
378 ATA_NCQ_SHT(DRV_NAME),
379 .can_queue = NV_ADMA_MAX_CPBS,
380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
383};
384
385static struct scsi_host_template nv_swncq_sht = {
386 ATA_NCQ_SHT(DRV_NAME),
387 .can_queue = ATA_MAX_QUEUE - 1,
388 .sg_tablesize = LIBATA_MAX_PRD,
389 .dma_boundary = ATA_DMA_BOUNDARY,
390 .slave_configure = nv_swncq_slave_config,
391};
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451static struct ata_port_operations nv_generic_ops = {
452 .inherits = &ata_bmdma_port_ops,
453 .lost_interrupt = ATA_OP_NULL,
454 .scr_read = nv_scr_read,
455 .scr_write = nv_scr_write,
456 .hardreset = nv_hardreset,
457};
458
459static struct ata_port_operations nv_nf2_ops = {
460 .inherits = &nv_generic_ops,
461 .freeze = nv_nf2_freeze,
462 .thaw = nv_nf2_thaw,
463};
464
465static struct ata_port_operations nv_ck804_ops = {
466 .inherits = &nv_generic_ops,
467 .freeze = nv_ck804_freeze,
468 .thaw = nv_ck804_thaw,
469 .host_stop = nv_ck804_host_stop,
470};
471
472static struct ata_port_operations nv_adma_ops = {
473 .inherits = &nv_ck804_ops,
474
475 .check_atapi_dma = nv_adma_check_atapi_dma,
476 .sff_tf_read = nv_adma_tf_read,
477 .qc_defer = ata_std_qc_defer,
478 .qc_prep = nv_adma_qc_prep,
479 .qc_issue = nv_adma_qc_issue,
480 .sff_irq_clear = nv_adma_irq_clear,
481
482 .freeze = nv_adma_freeze,
483 .thaw = nv_adma_thaw,
484 .error_handler = nv_adma_error_handler,
485 .post_internal_cmd = nv_adma_post_internal_cmd,
486
487 .port_start = nv_adma_port_start,
488 .port_stop = nv_adma_port_stop,
489#ifdef CONFIG_PM
490 .port_suspend = nv_adma_port_suspend,
491 .port_resume = nv_adma_port_resume,
492#endif
493 .host_stop = nv_adma_host_stop,
494};
495
496static struct ata_port_operations nv_swncq_ops = {
497 .inherits = &nv_generic_ops,
498
499 .qc_defer = ata_std_qc_defer,
500 .qc_prep = nv_swncq_qc_prep,
501 .qc_issue = nv_swncq_qc_issue,
502
503 .freeze = nv_mcp55_freeze,
504 .thaw = nv_mcp55_thaw,
505 .error_handler = nv_swncq_error_handler,
506
507#ifdef CONFIG_PM
508 .port_suspend = nv_swncq_port_suspend,
509 .port_resume = nv_swncq_port_resume,
510#endif
511 .port_start = nv_swncq_port_start,
512};
513
514struct nv_pi_priv {
515 irq_handler_t irq_handler;
516 struct scsi_host_template *sht;
517};
518
519#define NV_PI_PRIV(_irq_handler, _sht) \
520 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
521
522static const struct ata_port_info nv_port_info[] = {
523
524 {
525 .flags = ATA_FLAG_SATA,
526 .pio_mask = NV_PIO_MASK,
527 .mwdma_mask = NV_MWDMA_MASK,
528 .udma_mask = NV_UDMA_MASK,
529 .port_ops = &nv_generic_ops,
530 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
531 },
532
533 {
534 .flags = ATA_FLAG_SATA,
535 .pio_mask = NV_PIO_MASK,
536 .mwdma_mask = NV_MWDMA_MASK,
537 .udma_mask = NV_UDMA_MASK,
538 .port_ops = &nv_nf2_ops,
539 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
540 },
541
542 {
543 .flags = ATA_FLAG_SATA,
544 .pio_mask = NV_PIO_MASK,
545 .mwdma_mask = NV_MWDMA_MASK,
546 .udma_mask = NV_UDMA_MASK,
547 .port_ops = &nv_ck804_ops,
548 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
549 },
550
551 {
552 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
553 .pio_mask = NV_PIO_MASK,
554 .mwdma_mask = NV_MWDMA_MASK,
555 .udma_mask = NV_UDMA_MASK,
556 .port_ops = &nv_adma_ops,
557 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
558 },
559
560 {
561 .flags = ATA_FLAG_SATA,
562 .pio_mask = NV_PIO_MASK,
563 .mwdma_mask = NV_MWDMA_MASK,
564 .udma_mask = NV_UDMA_MASK,
565 .port_ops = &nv_generic_ops,
566 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
567 },
568
569 {
570 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_swncq_ops,
575 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
576 },
577};
578
579MODULE_AUTHOR("NVIDIA");
580MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581MODULE_LICENSE("GPL");
582MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583MODULE_VERSION(DRV_VERSION);
584
585static bool adma_enabled;
586static bool swncq_enabled = true;
587static bool msi_enabled;
588
589static void nv_adma_register_mode(struct ata_port *ap)
590{
591 struct nv_adma_port_priv *pp = ap->private_data;
592 void __iomem *mmio = pp->ctl_block;
593 u16 tmp, status;
594 int count = 0;
595
596 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 return;
598
599 status = readw(mmio + NV_ADMA_STAT);
600 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
601 ndelay(50);
602 status = readw(mmio + NV_ADMA_STAT);
603 count++;
604 }
605 if (count == 20)
606 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 status);
608
609 tmp = readw(mmio + NV_ADMA_CTL);
610 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
611
612 count = 0;
613 status = readw(mmio + NV_ADMA_STAT);
614 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
615 ndelay(50);
616 status = readw(mmio + NV_ADMA_STAT);
617 count++;
618 }
619 if (count == 20)
620 ata_port_warn(ap,
621 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 status);
623
624 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
625}
626
627static void nv_adma_mode(struct ata_port *ap)
628{
629 struct nv_adma_port_priv *pp = ap->private_data;
630 void __iomem *mmio = pp->ctl_block;
631 u16 tmp, status;
632 int count = 0;
633
634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 return;
636
637 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
638
639 tmp = readw(mmio + NV_ADMA_CTL);
640 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
641
642 status = readw(mmio + NV_ADMA_STAT);
643 while (((status & NV_ADMA_STAT_LEGACY) ||
644 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 ndelay(50);
646 status = readw(mmio + NV_ADMA_STAT);
647 count++;
648 }
649 if (count == 20)
650 ata_port_warn(ap,
651 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 status);
653
654 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
655}
656
657static int nv_adma_slave_config(struct scsi_device *sdev)
658{
659 struct ata_port *ap = ata_shost_to_port(sdev->host);
660 struct nv_adma_port_priv *pp = ap->private_data;
661 struct nv_adma_port_priv *port0, *port1;
662 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
663 unsigned long segment_boundary, flags;
664 unsigned short sg_tablesize;
665 int rc;
666 int adma_enable;
667 u32 current_reg, new_reg, config_mask;
668
669 rc = ata_scsi_slave_config(sdev);
670
671 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672
673 return rc;
674
675 spin_lock_irqsave(ap->lock, flags);
676
677 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
678
679
680
681
682
683
684
685 segment_boundary = ATA_DMA_BOUNDARY;
686
687
688 sg_tablesize = LIBATA_MAX_PRD - 1;
689
690
691
692 adma_enable = 0;
693 nv_adma_register_mode(ap);
694 } else {
695 segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
697 adma_enable = 1;
698 }
699
700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
701
702 if (ap->port_no == 1)
703 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 else
706 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
708
709 if (adma_enable) {
710 new_reg = current_reg | config_mask;
711 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
712 } else {
713 new_reg = current_reg & ~config_mask;
714 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
715 }
716
717 if (current_reg != new_reg)
718 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
719
720 port0 = ap->host->ports[0]->private_data;
721 port1 = ap->host->ports[1]->private_data;
722 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
724
725
726
727
728
729
730
731 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
732 } else {
733 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
734 }
735
736 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
737 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
738 ata_port_info(ap,
739 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 (unsigned long long)*ap->host->dev->dma_mask,
741 segment_boundary, sg_tablesize);
742
743 spin_unlock_irqrestore(ap->lock, flags);
744
745 return rc;
746}
747
748static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
749{
750 struct nv_adma_port_priv *pp = qc->ap->private_data;
751 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
752}
753
754static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
755{
756
757
758
759
760
761
762
763 nv_adma_register_mode(ap);
764
765 ata_sff_tf_read(ap, tf);
766}
767
768static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
769{
770 unsigned int idx = 0;
771
772 if (tf->flags & ATA_TFLAG_ISADDR) {
773 if (tf->flags & ATA_TFLAG_LBA48) {
774 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
775 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
777 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
778 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
779 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
780 } else
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
782
783 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
787 }
788
789 if (tf->flags & ATA_TFLAG_DEVICE)
790 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
791
792 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
793
794 while (idx < 12)
795 cpb[idx++] = cpu_to_le16(IGN);
796
797 return idx;
798}
799
800static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
801{
802 struct nv_adma_port_priv *pp = ap->private_data;
803 u8 flags = pp->cpb[cpb_num].resp_flags;
804
805 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
806
807 if (unlikely((force_err ||
808 flags & (NV_CPB_RESP_ATA_ERR |
809 NV_CPB_RESP_CMD_ERR |
810 NV_CPB_RESP_CPB_ERR)))) {
811 struct ata_eh_info *ehi = &ap->link.eh_info;
812 int freeze = 0;
813
814 ata_ehi_clear_desc(ehi);
815 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
816 if (flags & NV_CPB_RESP_ATA_ERR) {
817 ata_ehi_push_desc(ehi, "ATA error");
818 ehi->err_mask |= AC_ERR_DEV;
819 } else if (flags & NV_CPB_RESP_CMD_ERR) {
820 ata_ehi_push_desc(ehi, "CMD error");
821 ehi->err_mask |= AC_ERR_DEV;
822 } else if (flags & NV_CPB_RESP_CPB_ERR) {
823 ata_ehi_push_desc(ehi, "CPB error");
824 ehi->err_mask |= AC_ERR_SYSTEM;
825 freeze = 1;
826 } else {
827
828 ata_ehi_push_desc(ehi, "unknown");
829 ehi->err_mask |= AC_ERR_OTHER;
830 freeze = 1;
831 }
832
833 if (freeze)
834 ata_port_freeze(ap);
835 else
836 ata_port_abort(ap);
837 return -1;
838 }
839
840 if (likely(flags & NV_CPB_RESP_DONE))
841 return 1;
842 return 0;
843}
844
845static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846{
847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
848
849
850 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 ata_port_freeze(ap);
852 return 1;
853 }
854
855
856 if (!(irq_stat & NV_INT_DEV))
857 return 0;
858
859
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
861 ata_sff_check_status(ap);
862 return 1;
863 }
864
865
866 return ata_bmdma_port_intr(ap, qc);
867}
868
869static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870{
871 struct ata_host *host = dev_instance;
872 int i, handled = 0;
873 u32 notifier_clears[2];
874
875 spin_lock(&host->lock);
876
877 for (i = 0; i < host->n_ports; i++) {
878 struct ata_port *ap = host->ports[i];
879 struct nv_adma_port_priv *pp = ap->private_data;
880 void __iomem *mmio = pp->ctl_block;
881 u16 status;
882 u32 gen_ctl;
883 u32 notifier, notifier_error;
884
885 notifier_clears[i] = 0;
886
887
888 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 >> (NV_INT_PORT_SHIFT * i);
891 handled += nv_host_intr(ap, irq_stat);
892 continue;
893 }
894
895
896 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 >> (NV_INT_PORT_SHIFT * i);
899 if (ata_tag_valid(ap->link.active_tag))
900
901
902
903
904 irq_stat |= NV_INT_DEV;
905 handled += nv_host_intr(ap, irq_stat);
906 }
907
908 notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 notifier_clears[i] = notifier | notifier_error;
911
912 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913
914 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 !notifier_error)
916
917 continue;
918
919 status = readw(mmio + NV_ADMA_STAT);
920
921
922
923
924
925
926
927 writew(status, mmio + NV_ADMA_STAT);
928 readw(mmio + NV_ADMA_STAT);
929 rmb();
930
931 handled++;
932
933
934 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 NV_ADMA_STAT_HOTUNPLUG |
936 NV_ADMA_STAT_TIMEOUT |
937 NV_ADMA_STAT_SERROR))) {
938 struct ata_eh_info *ehi = &ap->link.eh_info;
939
940 ata_ehi_clear_desc(ehi);
941 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 if (status & NV_ADMA_STAT_TIMEOUT) {
943 ehi->err_mask |= AC_ERR_SYSTEM;
944 ata_ehi_push_desc(ehi, "timeout");
945 } else if (status & NV_ADMA_STAT_HOTPLUG) {
946 ata_ehi_hotplugged(ehi);
947 ata_ehi_push_desc(ehi, "hotplug");
948 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 ata_ehi_hotplugged(ehi);
950 ata_ehi_push_desc(ehi, "hot unplug");
951 } else if (status & NV_ADMA_STAT_SERROR) {
952
953 ata_ehi_push_desc(ehi, "SError");
954 } else
955 ata_ehi_push_desc(ehi, "unknown");
956 ata_port_freeze(ap);
957 continue;
958 }
959
960 if (status & (NV_ADMA_STAT_DONE |
961 NV_ADMA_STAT_CPBERR |
962 NV_ADMA_STAT_CMD_COMPLETE)) {
963 u32 check_commands = notifier_clears[i];
964 u32 done_mask = 0;
965 int pos, rc;
966
967 if (status & NV_ADMA_STAT_CPBERR) {
968
969 if (ata_tag_valid(ap->link.active_tag))
970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->link.sactive;
974 }
975
976
977 while ((pos = ffs(check_commands))) {
978 pos--;
979 rc = nv_adma_check_cpb(ap, pos,
980 notifier_error & (1 << pos));
981 if (rc > 0)
982 done_mask |= 1 << pos;
983 else if (unlikely(rc < 0))
984 check_commands = 0;
985 check_commands &= ~(1 << pos);
986 }
987 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
988 }
989 }
990
991 if (notifier_clears[0] || notifier_clears[1]) {
992
993
994 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 writel(notifier_clears[0], pp->notifier_clear_block);
996 pp = host->ports[1]->private_data;
997 writel(notifier_clears[1], pp->notifier_clear_block);
998 }
999
1000 spin_unlock(&host->lock);
1001
1002 return IRQ_RETVAL(handled);
1003}
1004
1005static void nv_adma_freeze(struct ata_port *ap)
1006{
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1009 u16 tmp;
1010
1011 nv_ck804_freeze(ap);
1012
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 return;
1015
1016
1017 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1018 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019
1020
1021 tmp = readw(mmio + NV_ADMA_CTL);
1022 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1023 mmio + NV_ADMA_CTL);
1024 readw(mmio + NV_ADMA_CTL);
1025}
1026
1027static void nv_adma_thaw(struct ata_port *ap)
1028{
1029 struct nv_adma_port_priv *pp = ap->private_data;
1030 void __iomem *mmio = pp->ctl_block;
1031 u16 tmp;
1032
1033 nv_ck804_thaw(ap);
1034
1035 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 return;
1037
1038
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1041 mmio + NV_ADMA_CTL);
1042 readw(mmio + NV_ADMA_CTL);
1043}
1044
1045static void nv_adma_irq_clear(struct ata_port *ap)
1046{
1047 struct nv_adma_port_priv *pp = ap->private_data;
1048 void __iomem *mmio = pp->ctl_block;
1049 u32 notifier_clears[2];
1050
1051 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1052 ata_bmdma_irq_clear(ap);
1053 return;
1054 }
1055
1056
1057 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1059
1060
1061 writew(0xffff, mmio + NV_ADMA_STAT);
1062
1063
1064
1065 if (ap->port_no == 0) {
1066 notifier_clears[0] = 0xFFFFFFFF;
1067 notifier_clears[1] = 0;
1068 } else {
1069 notifier_clears[0] = 0;
1070 notifier_clears[1] = 0xFFFFFFFF;
1071 }
1072 pp = ap->host->ports[0]->private_data;
1073 writel(notifier_clears[0], pp->notifier_clear_block);
1074 pp = ap->host->ports[1]->private_data;
1075 writel(notifier_clears[1], pp->notifier_clear_block);
1076}
1077
1078static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1079{
1080 struct nv_adma_port_priv *pp = qc->ap->private_data;
1081
1082 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1083 ata_bmdma_post_internal_cmd(qc);
1084}
1085
1086static int nv_adma_port_start(struct ata_port *ap)
1087{
1088 struct device *dev = ap->host->dev;
1089 struct nv_adma_port_priv *pp;
1090 int rc;
1091 void *mem;
1092 dma_addr_t mem_dma;
1093 void __iomem *mmio;
1094 struct pci_dev *pdev = to_pci_dev(dev);
1095 u16 tmp;
1096
1097 VPRINTK("ENTER\n");
1098
1099
1100
1101
1102
1103 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1104 if (rc)
1105 return rc;
1106
1107
1108 rc = ata_bmdma_port_start(ap);
1109 if (rc)
1110 return rc;
1111
1112 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 if (!pp)
1114 return -ENOMEM;
1115
1116 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1117 ap->port_no * NV_ADMA_PORT_SIZE;
1118 pp->ctl_block = mmio;
1119 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1120 pp->notifier_clear_block = pp->gen_block +
1121 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122
1123
1124
1125
1126
1127 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128 if (rc) {
1129 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1130 if (rc)
1131 return rc;
1132 }
1133 pp->adma_dma_mask = *dev->dma_mask;
1134
1135 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1136 &mem_dma, GFP_KERNEL);
1137 if (!mem)
1138 return -ENOMEM;
1139
1140
1141
1142
1143
1144
1145 pp->cpb = mem;
1146 pp->cpb_dma = mem_dma;
1147
1148 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1149 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1150
1151 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1152 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153
1154
1155
1156
1157 pp->aprd = mem;
1158 pp->aprd_dma = mem_dma;
1159
1160 ap->private_data = pp;
1161
1162
1163 writew(0xffff, mmio + NV_ADMA_STAT);
1164
1165
1166 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1167
1168
1169 writew(0, mmio + NV_ADMA_CPB_COUNT);
1170
1171
1172 tmp = readw(mmio + NV_ADMA_CTL);
1173 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1174 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1175
1176 tmp = readw(mmio + NV_ADMA_CTL);
1177 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1178 readw(mmio + NV_ADMA_CTL);
1179 udelay(1);
1180 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1181 readw(mmio + NV_ADMA_CTL);
1182
1183 return 0;
1184}
1185
1186static void nv_adma_port_stop(struct ata_port *ap)
1187{
1188 struct nv_adma_port_priv *pp = ap->private_data;
1189 void __iomem *mmio = pp->ctl_block;
1190
1191 VPRINTK("ENTER\n");
1192 writew(0, mmio + NV_ADMA_CTL);
1193}
1194
1195#ifdef CONFIG_PM
1196static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197{
1198 struct nv_adma_port_priv *pp = ap->private_data;
1199 void __iomem *mmio = pp->ctl_block;
1200
1201
1202 nv_adma_register_mode(ap);
1203
1204
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207
1208 writew(0, mmio + NV_ADMA_CTL);
1209
1210 return 0;
1211}
1212
1213static int nv_adma_port_resume(struct ata_port *ap)
1214{
1215 struct nv_adma_port_priv *pp = ap->private_data;
1216 void __iomem *mmio = pp->ctl_block;
1217 u16 tmp;
1218
1219
1220 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1221 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1222
1223
1224 writew(0xffff, mmio + NV_ADMA_STAT);
1225
1226
1227 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228
1229
1230 writew(0, mmio + NV_ADMA_CPB_COUNT);
1231
1232
1233 tmp = readw(mmio + NV_ADMA_CTL);
1234 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236
1237 tmp = readw(mmio + NV_ADMA_CTL);
1238 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1239 readw(mmio + NV_ADMA_CTL);
1240 udelay(1);
1241 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242 readw(mmio + NV_ADMA_CTL);
1243
1244 return 0;
1245}
1246#endif
1247
1248static void nv_adma_setup_port(struct ata_port *ap)
1249{
1250 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251 struct ata_ioports *ioport = &ap->ioaddr;
1252
1253 VPRINTK("ENTER\n");
1254
1255 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1256
1257 ioport->cmd_addr = mmio;
1258 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1259 ioport->error_addr =
1260 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1261 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1262 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1263 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1264 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1265 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1266 ioport->status_addr =
1267 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1268 ioport->altstatus_addr =
1269 ioport->ctl_addr = mmio + 0x20;
1270}
1271
1272static int nv_adma_host_init(struct ata_host *host)
1273{
1274 struct pci_dev *pdev = to_pci_dev(host->dev);
1275 unsigned int i;
1276 u32 tmp32;
1277
1278 VPRINTK("ENTER\n");
1279
1280
1281 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1282 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1283 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1284 NV_MCP_SATA_CFG_20_PORT1_EN |
1285 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1286
1287 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1288
1289 for (i = 0; i < host->n_ports; i++)
1290 nv_adma_setup_port(host->ports[i]);
1291
1292 return 0;
1293}
1294
1295static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1296 struct scatterlist *sg,
1297 int idx,
1298 struct nv_adma_prd *aprd)
1299{
1300 u8 flags = 0;
1301 if (qc->tf.flags & ATA_TFLAG_WRITE)
1302 flags |= NV_APRD_WRITE;
1303 if (idx == qc->n_elem - 1)
1304 flags |= NV_APRD_END;
1305 else if (idx != 4)
1306 flags |= NV_APRD_CONT;
1307
1308 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1309 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg)));
1310 aprd->flags = flags;
1311 aprd->packet_len = 0;
1312}
1313
1314static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1315{
1316 struct nv_adma_port_priv *pp = qc->ap->private_data;
1317 struct nv_adma_prd *aprd;
1318 struct scatterlist *sg;
1319 unsigned int si;
1320
1321 VPRINTK("ENTER\n");
1322
1323 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1324 aprd = (si < 5) ? &cpb->aprd[si] :
1325 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1326 nv_adma_fill_aprd(qc, sg, si, aprd);
1327 }
1328 if (si > 5)
1329 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1330 else
1331 cpb->next_aprd = cpu_to_le64(0);
1332}
1333
1334static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1335{
1336 struct nv_adma_port_priv *pp = qc->ap->private_data;
1337
1338
1339
1340 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1341 (qc->tf.flags & ATA_TFLAG_POLLING))
1342 return 1;
1343
1344 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1345 (qc->tf.protocol == ATA_PROT_NODATA))
1346 return 0;
1347
1348 return 1;
1349}
1350
1351static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1352{
1353 struct nv_adma_port_priv *pp = qc->ap->private_data;
1354 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1355 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1356 NV_CPB_CTL_IEN;
1357
1358 if (nv_adma_use_reg_mode(qc)) {
1359 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1360 (qc->flags & ATA_QCFLAG_DMAMAP));
1361 nv_adma_register_mode(qc->ap);
1362 ata_bmdma_qc_prep(qc);
1363 return;
1364 }
1365
1366 cpb->resp_flags = NV_CPB_RESP_DONE;
1367 wmb();
1368 cpb->ctl_flags = 0;
1369 wmb();
1370
1371 cpb->len = 3;
1372 cpb->tag = qc->hw_tag;
1373 cpb->next_cpb_idx = 0;
1374
1375
1376 if (qc->tf.protocol == ATA_PROT_NCQ)
1377 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1378
1379 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1380
1381 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1382
1383 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1384 nv_adma_fill_sg(qc, cpb);
1385 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1386 } else
1387 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1388
1389
1390
1391 wmb();
1392 cpb->ctl_flags = ctl_flags;
1393 wmb();
1394 cpb->resp_flags = 0;
1395}
1396
1397static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1398{
1399 struct nv_adma_port_priv *pp = qc->ap->private_data;
1400 void __iomem *mmio = pp->ctl_block;
1401 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1402
1403 VPRINTK("ENTER\n");
1404
1405
1406
1407
1408 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1409 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1410 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1411 return AC_ERR_SYSTEM;
1412 }
1413
1414 if (nv_adma_use_reg_mode(qc)) {
1415
1416 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1417 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1418 (qc->flags & ATA_QCFLAG_DMAMAP));
1419 nv_adma_register_mode(qc->ap);
1420 return ata_bmdma_qc_issue(qc);
1421 } else
1422 nv_adma_mode(qc->ap);
1423
1424
1425
1426 wmb();
1427
1428 if (curr_ncq != pp->last_issue_ncq) {
1429
1430
1431 udelay(20);
1432 pp->last_issue_ncq = curr_ncq;
1433 }
1434
1435 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1436
1437 DPRINTK("Issued tag %u\n", qc->hw_tag);
1438
1439 return 0;
1440}
1441
1442static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1443{
1444 struct ata_host *host = dev_instance;
1445 unsigned int i;
1446 unsigned int handled = 0;
1447 unsigned long flags;
1448
1449 spin_lock_irqsave(&host->lock, flags);
1450
1451 for (i = 0; i < host->n_ports; i++) {
1452 struct ata_port *ap = host->ports[i];
1453 struct ata_queued_cmd *qc;
1454
1455 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1456 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1457 handled += ata_bmdma_port_intr(ap, qc);
1458 } else {
1459
1460
1461
1462
1463 ap->ops->sff_check_status(ap);
1464 }
1465 }
1466
1467 spin_unlock_irqrestore(&host->lock, flags);
1468
1469 return IRQ_RETVAL(handled);
1470}
1471
1472static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1473{
1474 int i, handled = 0;
1475
1476 for (i = 0; i < host->n_ports; i++) {
1477 handled += nv_host_intr(host->ports[i], irq_stat);
1478 irq_stat >>= NV_INT_PORT_SHIFT;
1479 }
1480
1481 return IRQ_RETVAL(handled);
1482}
1483
1484static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1485{
1486 struct ata_host *host = dev_instance;
1487 u8 irq_stat;
1488 irqreturn_t ret;
1489
1490 spin_lock(&host->lock);
1491 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1492 ret = nv_do_interrupt(host, irq_stat);
1493 spin_unlock(&host->lock);
1494
1495 return ret;
1496}
1497
1498static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1499{
1500 struct ata_host *host = dev_instance;
1501 u8 irq_stat;
1502 irqreturn_t ret;
1503
1504 spin_lock(&host->lock);
1505 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1506 ret = nv_do_interrupt(host, irq_stat);
1507 spin_unlock(&host->lock);
1508
1509 return ret;
1510}
1511
1512static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1513{
1514 if (sc_reg > SCR_CONTROL)
1515 return -EINVAL;
1516
1517 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1518 return 0;
1519}
1520
1521static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1522{
1523 if (sc_reg > SCR_CONTROL)
1524 return -EINVAL;
1525
1526 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1527 return 0;
1528}
1529
1530static int nv_hardreset(struct ata_link *link, unsigned int *class,
1531 unsigned long deadline)
1532{
1533 struct ata_eh_context *ehc = &link->eh_context;
1534
1535
1536
1537
1538 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1539 !ata_dev_enabled(link->device))
1540 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1541 NULL, NULL);
1542 else {
1543 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1544 int rc;
1545
1546 if (!(ehc->i.flags & ATA_EHI_QUIET))
1547 ata_link_info(link,
1548 "nv: skipping hardreset on occupied port\n");
1549
1550
1551 rc = sata_link_resume(link, timing, deadline);
1552
1553 if (rc && rc != -EOPNOTSUPP)
1554 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1555 rc);
1556 }
1557
1558
1559 return -EAGAIN;
1560}
1561
1562static void nv_nf2_freeze(struct ata_port *ap)
1563{
1564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566 u8 mask;
1567
1568 mask = ioread8(scr_addr + NV_INT_ENABLE);
1569 mask &= ~(NV_INT_ALL << shift);
1570 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1571}
1572
1573static void nv_nf2_thaw(struct ata_port *ap)
1574{
1575 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1576 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1577 u8 mask;
1578
1579 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1580
1581 mask = ioread8(scr_addr + NV_INT_ENABLE);
1582 mask |= (NV_INT_MASK << shift);
1583 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1584}
1585
1586static void nv_ck804_freeze(struct ata_port *ap)
1587{
1588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590 u8 mask;
1591
1592 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1593 mask &= ~(NV_INT_ALL << shift);
1594 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1595}
1596
1597static void nv_ck804_thaw(struct ata_port *ap)
1598{
1599 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1600 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1601 u8 mask;
1602
1603 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1604
1605 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1606 mask |= (NV_INT_MASK << shift);
1607 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1608}
1609
1610static void nv_mcp55_freeze(struct ata_port *ap)
1611{
1612 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1613 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1614 u32 mask;
1615
1616 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1617
1618 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1619 mask &= ~(NV_INT_ALL_MCP55 << shift);
1620 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1621}
1622
1623static void nv_mcp55_thaw(struct ata_port *ap)
1624{
1625 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1626 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1627 u32 mask;
1628
1629 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1630
1631 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1632 mask |= (NV_INT_MASK_MCP55 << shift);
1633 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1634}
1635
1636static void nv_adma_error_handler(struct ata_port *ap)
1637{
1638 struct nv_adma_port_priv *pp = ap->private_data;
1639 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1640 void __iomem *mmio = pp->ctl_block;
1641 int i;
1642 u16 tmp;
1643
1644 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1645 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1646 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1647 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1648 u32 status = readw(mmio + NV_ADMA_STAT);
1649 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1650 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1651
1652 ata_port_err(ap,
1653 "EH in ADMA mode, notifier 0x%X "
1654 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1655 "next cpb count 0x%X next cpb idx 0x%x\n",
1656 notifier, notifier_error, gen_ctl, status,
1657 cpb_count, next_cpb_idx);
1658
1659 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1660 struct nv_adma_cpb *cpb = &pp->cpb[i];
1661 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1662 ap->link.sactive & (1 << i))
1663 ata_port_err(ap,
1664 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1665 i, cpb->ctl_flags, cpb->resp_flags);
1666 }
1667 }
1668
1669
1670 nv_adma_register_mode(ap);
1671
1672
1673
1674 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1675 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1676
1677
1678 writew(0, mmio + NV_ADMA_CPB_COUNT);
1679
1680
1681 tmp = readw(mmio + NV_ADMA_CTL);
1682 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1683 readw(mmio + NV_ADMA_CTL);
1684 udelay(1);
1685 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1686 readw(mmio + NV_ADMA_CTL);
1687 }
1688
1689 ata_bmdma_error_handler(ap);
1690}
1691
1692static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1693{
1694 struct nv_swncq_port_priv *pp = ap->private_data;
1695 struct defer_queue *dq = &pp->defer_queue;
1696
1697
1698 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1699 dq->defer_bits |= (1 << qc->hw_tag);
1700 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1701}
1702
1703static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1704{
1705 struct nv_swncq_port_priv *pp = ap->private_data;
1706 struct defer_queue *dq = &pp->defer_queue;
1707 unsigned int tag;
1708
1709 if (dq->head == dq->tail)
1710 return NULL;
1711
1712 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1713 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1714 WARN_ON(!(dq->defer_bits & (1 << tag)));
1715 dq->defer_bits &= ~(1 << tag);
1716
1717 return ata_qc_from_tag(ap, tag);
1718}
1719
1720static void nv_swncq_fis_reinit(struct ata_port *ap)
1721{
1722 struct nv_swncq_port_priv *pp = ap->private_data;
1723
1724 pp->dhfis_bits = 0;
1725 pp->dmafis_bits = 0;
1726 pp->sdbfis_bits = 0;
1727 pp->ncq_flags = 0;
1728}
1729
1730static void nv_swncq_pp_reinit(struct ata_port *ap)
1731{
1732 struct nv_swncq_port_priv *pp = ap->private_data;
1733 struct defer_queue *dq = &pp->defer_queue;
1734
1735 dq->head = 0;
1736 dq->tail = 0;
1737 dq->defer_bits = 0;
1738 pp->qc_active = 0;
1739 pp->last_issue_tag = ATA_TAG_POISON;
1740 nv_swncq_fis_reinit(ap);
1741}
1742
1743static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1744{
1745 struct nv_swncq_port_priv *pp = ap->private_data;
1746
1747 writew(fis, pp->irq_block);
1748}
1749
1750static void __ata_bmdma_stop(struct ata_port *ap)
1751{
1752 struct ata_queued_cmd qc;
1753
1754 qc.ap = ap;
1755 ata_bmdma_stop(&qc);
1756}
1757
1758static void nv_swncq_ncq_stop(struct ata_port *ap)
1759{
1760 struct nv_swncq_port_priv *pp = ap->private_data;
1761 unsigned int i;
1762 u32 sactive;
1763 u32 done_mask;
1764
1765 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1766 ap->qc_active, ap->link.sactive);
1767 ata_port_err(ap,
1768 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1769 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1770 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1771 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1772
1773 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1774 ap->ops->sff_check_status(ap),
1775 ioread8(ap->ioaddr.error_addr));
1776
1777 sactive = readl(pp->sactive_block);
1778 done_mask = pp->qc_active ^ sactive;
1779
1780 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1781 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1782 u8 err = 0;
1783 if (pp->qc_active & (1 << i))
1784 err = 0;
1785 else if (done_mask & (1 << i))
1786 err = 1;
1787 else
1788 continue;
1789
1790 ata_port_err(ap,
1791 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1792 (pp->dhfis_bits >> i) & 0x1,
1793 (pp->dmafis_bits >> i) & 0x1,
1794 (pp->sdbfis_bits >> i) & 0x1,
1795 (sactive >> i) & 0x1,
1796 (err ? "error! tag doesn't exit" : " "));
1797 }
1798
1799 nv_swncq_pp_reinit(ap);
1800 ap->ops->sff_irq_clear(ap);
1801 __ata_bmdma_stop(ap);
1802 nv_swncq_irq_clear(ap, 0xffff);
1803}
1804
1805static void nv_swncq_error_handler(struct ata_port *ap)
1806{
1807 struct ata_eh_context *ehc = &ap->link.eh_context;
1808
1809 if (ap->link.sactive) {
1810 nv_swncq_ncq_stop(ap);
1811 ehc->i.action |= ATA_EH_RESET;
1812 }
1813
1814 ata_bmdma_error_handler(ap);
1815}
1816
1817#ifdef CONFIG_PM
1818static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1819{
1820 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1821 u32 tmp;
1822
1823
1824 writel(~0, mmio + NV_INT_STATUS_MCP55);
1825
1826
1827 writel(0, mmio + NV_INT_ENABLE_MCP55);
1828
1829
1830 tmp = readl(mmio + NV_CTL_MCP55);
1831 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1832 writel(tmp, mmio + NV_CTL_MCP55);
1833
1834 return 0;
1835}
1836
1837static int nv_swncq_port_resume(struct ata_port *ap)
1838{
1839 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1840 u32 tmp;
1841
1842
1843 writel(~0, mmio + NV_INT_STATUS_MCP55);
1844
1845
1846 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1847
1848
1849 tmp = readl(mmio + NV_CTL_MCP55);
1850 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1851
1852 return 0;
1853}
1854#endif
1855
1856static void nv_swncq_host_init(struct ata_host *host)
1857{
1858 u32 tmp;
1859 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1860 struct pci_dev *pdev = to_pci_dev(host->dev);
1861 u8 regval;
1862
1863
1864 pci_read_config_byte(pdev, 0x7f, ®val);
1865 regval &= ~(1 << 7);
1866 pci_write_config_byte(pdev, 0x7f, regval);
1867
1868
1869 tmp = readl(mmio + NV_CTL_MCP55);
1870 VPRINTK("HOST_CTL:0x%X\n", tmp);
1871 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1872
1873
1874 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1875 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1876 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1877
1878
1879 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1880}
1881
1882static int nv_swncq_slave_config(struct scsi_device *sdev)
1883{
1884 struct ata_port *ap = ata_shost_to_port(sdev->host);
1885 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1886 struct ata_device *dev;
1887 int rc;
1888 u8 rev;
1889 u8 check_maxtor = 0;
1890 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1891
1892 rc = ata_scsi_slave_config(sdev);
1893 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1894
1895 return rc;
1896
1897 dev = &ap->link.device[sdev->id];
1898 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1899 return rc;
1900
1901
1902 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1903 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1904 check_maxtor = 1;
1905
1906
1907 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1908 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1909 pci_read_config_byte(pdev, 0x8, &rev);
1910 if (rev <= 0xa2)
1911 check_maxtor = 1;
1912 }
1913
1914 if (!check_maxtor)
1915 return rc;
1916
1917 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1918
1919 if (strncmp(model_num, "Maxtor", 6) == 0) {
1920 ata_scsi_change_queue_depth(sdev, 1);
1921 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1922 sdev->queue_depth);
1923 }
1924
1925 return rc;
1926}
1927
1928static int nv_swncq_port_start(struct ata_port *ap)
1929{
1930 struct device *dev = ap->host->dev;
1931 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1932 struct nv_swncq_port_priv *pp;
1933 int rc;
1934
1935
1936 rc = ata_bmdma_port_start(ap);
1937 if (rc)
1938 return rc;
1939
1940 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1941 if (!pp)
1942 return -ENOMEM;
1943
1944 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1945 &pp->prd_dma, GFP_KERNEL);
1946 if (!pp->prd)
1947 return -ENOMEM;
1948
1949 ap->private_data = pp;
1950 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1951 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1952 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1953
1954 return 0;
1955}
1956
1957static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1958{
1959 if (qc->tf.protocol != ATA_PROT_NCQ) {
1960 ata_bmdma_qc_prep(qc);
1961 return;
1962 }
1963
1964 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1965 return;
1966
1967 nv_swncq_fill_sg(qc);
1968}
1969
1970static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1971{
1972 struct ata_port *ap = qc->ap;
1973 struct scatterlist *sg;
1974 struct nv_swncq_port_priv *pp = ap->private_data;
1975 struct ata_bmdma_prd *prd;
1976 unsigned int si, idx;
1977
1978 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1979
1980 idx = 0;
1981 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1982 u32 addr, offset;
1983 u32 sg_len, len;
1984
1985 addr = (u32)sg_dma_address(sg);
1986 sg_len = sg_dma_len(sg);
1987
1988 while (sg_len) {
1989 offset = addr & 0xffff;
1990 len = sg_len;
1991 if ((offset + sg_len) > 0x10000)
1992 len = 0x10000 - offset;
1993
1994 prd[idx].addr = cpu_to_le32(addr);
1995 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1996
1997 idx++;
1998 sg_len -= len;
1999 addr += len;
2000 }
2001 }
2002
2003 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2004}
2005
2006static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2007 struct ata_queued_cmd *qc)
2008{
2009 struct nv_swncq_port_priv *pp = ap->private_data;
2010
2011 if (qc == NULL)
2012 return 0;
2013
2014 DPRINTK("Enter\n");
2015
2016 writel((1 << qc->hw_tag), pp->sactive_block);
2017 pp->last_issue_tag = qc->hw_tag;
2018 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2019 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2020 pp->qc_active |= (0x1 << qc->hw_tag);
2021
2022 ap->ops->sff_tf_load(ap, &qc->tf);
2023 ap->ops->sff_exec_command(ap, &qc->tf);
2024
2025 DPRINTK("Issued tag %u\n", qc->hw_tag);
2026
2027 return 0;
2028}
2029
2030static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2031{
2032 struct ata_port *ap = qc->ap;
2033 struct nv_swncq_port_priv *pp = ap->private_data;
2034
2035 if (qc->tf.protocol != ATA_PROT_NCQ)
2036 return ata_bmdma_qc_issue(qc);
2037
2038 DPRINTK("Enter\n");
2039
2040 if (!pp->qc_active)
2041 nv_swncq_issue_atacmd(ap, qc);
2042 else
2043 nv_swncq_qc_to_dq(ap, qc);
2044
2045 return 0;
2046}
2047
2048static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2049{
2050 u32 serror;
2051 struct ata_eh_info *ehi = &ap->link.eh_info;
2052
2053 ata_ehi_clear_desc(ehi);
2054
2055
2056 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2057 sata_scr_write(&ap->link, SCR_ERROR, serror);
2058
2059
2060 if (fis & NV_SWNCQ_IRQ_ADDED)
2061 ata_ehi_push_desc(ehi, "hot plug");
2062 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2063 ata_ehi_push_desc(ehi, "hot unplug");
2064
2065 ata_ehi_hotplugged(ehi);
2066
2067
2068 ehi->serror |= serror;
2069
2070 ata_port_freeze(ap);
2071}
2072
2073static int nv_swncq_sdbfis(struct ata_port *ap)
2074{
2075 struct ata_queued_cmd *qc;
2076 struct nv_swncq_port_priv *pp = ap->private_data;
2077 struct ata_eh_info *ehi = &ap->link.eh_info;
2078 u32 sactive;
2079 u32 done_mask;
2080 u8 host_stat;
2081 u8 lack_dhfis = 0;
2082
2083 host_stat = ap->ops->bmdma_status(ap);
2084 if (unlikely(host_stat & ATA_DMA_ERR)) {
2085
2086 ata_ehi_clear_desc(ehi);
2087 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2088 ehi->err_mask |= AC_ERR_HOST_BUS;
2089 ehi->action |= ATA_EH_RESET;
2090 return -EINVAL;
2091 }
2092
2093 ap->ops->sff_irq_clear(ap);
2094 __ata_bmdma_stop(ap);
2095
2096 sactive = readl(pp->sactive_block);
2097 done_mask = pp->qc_active ^ sactive;
2098
2099 pp->qc_active &= ~done_mask;
2100 pp->dhfis_bits &= ~done_mask;
2101 pp->dmafis_bits &= ~done_mask;
2102 pp->sdbfis_bits |= done_mask;
2103 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2104
2105 if (!ap->qc_active) {
2106 DPRINTK("over\n");
2107 nv_swncq_pp_reinit(ap);
2108 return 0;
2109 }
2110
2111 if (pp->qc_active & pp->dhfis_bits)
2112 return 0;
2113
2114 if ((pp->ncq_flags & ncq_saw_backout) ||
2115 (pp->qc_active ^ pp->dhfis_bits))
2116
2117
2118
2119 lack_dhfis = 1;
2120
2121 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2122 "SWNCQ:qc_active 0x%X defer_bits %X "
2123 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2124 ap->print_id, ap->qc_active, pp->qc_active,
2125 pp->defer_queue.defer_bits, pp->dhfis_bits,
2126 pp->dmafis_bits, pp->last_issue_tag);
2127
2128 nv_swncq_fis_reinit(ap);
2129
2130 if (lack_dhfis) {
2131 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2132 nv_swncq_issue_atacmd(ap, qc);
2133 return 0;
2134 }
2135
2136 if (pp->defer_queue.defer_bits) {
2137
2138 qc = nv_swncq_qc_from_dq(ap);
2139 WARN_ON(qc == NULL);
2140 nv_swncq_issue_atacmd(ap, qc);
2141 }
2142
2143 return 0;
2144}
2145
2146static inline u32 nv_swncq_tag(struct ata_port *ap)
2147{
2148 struct nv_swncq_port_priv *pp = ap->private_data;
2149 u32 tag;
2150
2151 tag = readb(pp->tag_block) >> 2;
2152 return (tag & 0x1f);
2153}
2154
2155static void nv_swncq_dmafis(struct ata_port *ap)
2156{
2157 struct ata_queued_cmd *qc;
2158 unsigned int rw;
2159 u8 dmactl;
2160 u32 tag;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2162
2163 __ata_bmdma_stop(ap);
2164 tag = nv_swncq_tag(ap);
2165
2166 DPRINTK("dma setup tag 0x%x\n", tag);
2167 qc = ata_qc_from_tag(ap, tag);
2168
2169 if (unlikely(!qc))
2170 return;
2171
2172 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2173
2174
2175 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2176 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2177
2178
2179 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2180 dmactl &= ~ATA_DMA_WR;
2181 if (!rw)
2182 dmactl |= ATA_DMA_WR;
2183
2184 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2185}
2186
2187static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2188{
2189 struct nv_swncq_port_priv *pp = ap->private_data;
2190 struct ata_queued_cmd *qc;
2191 struct ata_eh_info *ehi = &ap->link.eh_info;
2192 u32 serror;
2193 u8 ata_stat;
2194
2195 ata_stat = ap->ops->sff_check_status(ap);
2196 nv_swncq_irq_clear(ap, fis);
2197 if (!fis)
2198 return;
2199
2200 if (ap->pflags & ATA_PFLAG_FROZEN)
2201 return;
2202
2203 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2204 nv_swncq_hotplug(ap, fis);
2205 return;
2206 }
2207
2208 if (!pp->qc_active)
2209 return;
2210
2211 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2212 return;
2213 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2214
2215 if (ata_stat & ATA_ERR) {
2216 ata_ehi_clear_desc(ehi);
2217 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2218 ehi->err_mask |= AC_ERR_DEV;
2219 ehi->serror |= serror;
2220 ehi->action |= ATA_EH_RESET;
2221 ata_port_freeze(ap);
2222 return;
2223 }
2224
2225 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2226
2227
2228
2229 pp->ncq_flags |= ncq_saw_backout;
2230 }
2231
2232 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2233 pp->ncq_flags |= ncq_saw_sdb;
2234 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2235 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2236 ap->print_id, pp->qc_active, pp->dhfis_bits,
2237 pp->dmafis_bits, readl(pp->sactive_block));
2238 if (nv_swncq_sdbfis(ap) < 0)
2239 goto irq_error;
2240 }
2241
2242 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2243
2244
2245
2246 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2247 pp->ncq_flags |= ncq_saw_d2h;
2248 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2249 ata_ehi_push_desc(ehi, "illegal fis transaction");
2250 ehi->err_mask |= AC_ERR_HSM;
2251 ehi->action |= ATA_EH_RESET;
2252 goto irq_error;
2253 }
2254
2255 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2256 !(pp->ncq_flags & ncq_saw_dmas)) {
2257 ata_stat = ap->ops->sff_check_status(ap);
2258 if (ata_stat & ATA_BUSY)
2259 goto irq_exit;
2260
2261 if (pp->defer_queue.defer_bits) {
2262 DPRINTK("send next command\n");
2263 qc = nv_swncq_qc_from_dq(ap);
2264 nv_swncq_issue_atacmd(ap, qc);
2265 }
2266 }
2267 }
2268
2269 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2270
2271
2272
2273 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2274 pp->ncq_flags |= ncq_saw_dmas;
2275 nv_swncq_dmafis(ap);
2276 }
2277
2278irq_exit:
2279 return;
2280irq_error:
2281 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2282 ata_port_freeze(ap);
2283 return;
2284}
2285
2286static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2287{
2288 struct ata_host *host = dev_instance;
2289 unsigned int i;
2290 unsigned int handled = 0;
2291 unsigned long flags;
2292 u32 irq_stat;
2293
2294 spin_lock_irqsave(&host->lock, flags);
2295
2296 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2297
2298 for (i = 0; i < host->n_ports; i++) {
2299 struct ata_port *ap = host->ports[i];
2300
2301 if (ap->link.sactive) {
2302 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2303 handled = 1;
2304 } else {
2305 if (irq_stat)
2306 nv_swncq_irq_clear(ap, 0xfff0);
2307
2308 handled += nv_host_intr(ap, (u8)irq_stat);
2309 }
2310 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2311 }
2312
2313 spin_unlock_irqrestore(&host->lock, flags);
2314
2315 return IRQ_RETVAL(handled);
2316}
2317
2318static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2319{
2320 const struct ata_port_info *ppi[] = { NULL, NULL };
2321 struct nv_pi_priv *ipriv;
2322 struct ata_host *host;
2323 struct nv_host_priv *hpriv;
2324 int rc;
2325 u32 bar;
2326 void __iomem *base;
2327 unsigned long type = ent->driver_data;
2328
2329
2330
2331
2332 for (bar = 0; bar < 6; bar++)
2333 if (pci_resource_start(pdev, bar) == 0)
2334 return -ENODEV;
2335
2336 ata_print_version_once(&pdev->dev, DRV_VERSION);
2337
2338 rc = pcim_enable_device(pdev);
2339 if (rc)
2340 return rc;
2341
2342
2343 if (type == CK804 && adma_enabled) {
2344 dev_notice(&pdev->dev, "Using ADMA mode\n");
2345 type = ADMA;
2346 } else if (type == MCP5x && swncq_enabled) {
2347 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2348 type = SWNCQ;
2349 }
2350
2351 ppi[0] = &nv_port_info[type];
2352 ipriv = ppi[0]->private_data;
2353 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2354 if (rc)
2355 return rc;
2356
2357 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2358 if (!hpriv)
2359 return -ENOMEM;
2360 hpriv->type = type;
2361 host->private_data = hpriv;
2362
2363
2364 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2365 if (rc)
2366 return rc;
2367
2368
2369 base = host->iomap[NV_MMIO_BAR];
2370 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2371 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2372
2373
2374 if (type >= CK804) {
2375 u8 regval;
2376
2377 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2378 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2379 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2380 }
2381
2382
2383 if (type == ADMA) {
2384 rc = nv_adma_host_init(host);
2385 if (rc)
2386 return rc;
2387 } else if (type == SWNCQ)
2388 nv_swncq_host_init(host);
2389
2390 if (msi_enabled) {
2391 dev_notice(&pdev->dev, "Using MSI\n");
2392 pci_enable_msi(pdev);
2393 }
2394
2395 pci_set_master(pdev);
2396 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2397}
2398
2399#ifdef CONFIG_PM_SLEEP
2400static int nv_pci_device_resume(struct pci_dev *pdev)
2401{
2402 struct ata_host *host = pci_get_drvdata(pdev);
2403 struct nv_host_priv *hpriv = host->private_data;
2404 int rc;
2405
2406 rc = ata_pci_device_do_resume(pdev);
2407 if (rc)
2408 return rc;
2409
2410 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2411 if (hpriv->type >= CK804) {
2412 u8 regval;
2413
2414 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2415 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2416 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2417 }
2418 if (hpriv->type == ADMA) {
2419 u32 tmp32;
2420 struct nv_adma_port_priv *pp;
2421
2422 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2423
2424 pp = host->ports[0]->private_data;
2425 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2426 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2427 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2428 else
2429 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2430 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2431 pp = host->ports[1]->private_data;
2432 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2433 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2434 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2435 else
2436 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2437 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2438
2439 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2440 }
2441 }
2442
2443 ata_host_resume(host);
2444
2445 return 0;
2446}
2447#endif
2448
2449static void nv_ck804_host_stop(struct ata_host *host)
2450{
2451 struct pci_dev *pdev = to_pci_dev(host->dev);
2452 u8 regval;
2453
2454
2455 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2456 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2458}
2459
2460static void nv_adma_host_stop(struct ata_host *host)
2461{
2462 struct pci_dev *pdev = to_pci_dev(host->dev);
2463 u32 tmp32;
2464
2465
2466 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2467 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2468 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2469 NV_MCP_SATA_CFG_20_PORT1_EN |
2470 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471
2472 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2473
2474 nv_ck804_host_stop(host);
2475}
2476
2477module_pci_driver(nv_pci_driver);
2478
2479module_param_named(adma, adma_enabled, bool, 0444);
2480MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2481module_param_named(swncq, swncq_enabled, bool, 0444);
2482MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2483module_param_named(msi, msi_enabled, bool, 0444);
2484MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2485