1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/gfp.h>
26#include <linux/pci.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <linux/libata.h>
34
35#define DRV_NAME "sata_nv"
36#define DRV_VERSION "3.5"
37
38#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
39
40enum {
41 NV_MMIO_BAR = 5,
42
43 NV_PORTS = 2,
44 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
47 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
49
50
51 NV_INT_STATUS = 0x10,
52 NV_INT_ENABLE = 0x11,
53 NV_INT_STATUS_CK804 = 0x440,
54 NV_INT_ENABLE_CK804 = 0x441,
55
56
57 NV_INT_DEV = 0x01,
58 NV_INT_PM = 0x02,
59 NV_INT_ADDED = 0x04,
60 NV_INT_REMOVED = 0x08,
61
62 NV_INT_PORT_SHIFT = 4,
63
64 NV_INT_ALL = 0x0f,
65 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
67
68
69 NV_INT_CONFIG = 0x12,
70 NV_INT_CONFIG_METHD = 0x01,
71
72
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
79
80 NV_ADMA_MAX_CPBS = 32,
81 NV_ADMA_CPB_SZ = 128,
82 NV_ADMA_APRD_SZ = 16,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
84 NV_ADMA_APRD_SZ,
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89
90
91 NV_ADMA_GEN = 0x400,
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
94
95
96 NV_ADMA_PORT = 0x480,
97
98
99 NV_ADMA_PORT_SIZE = 0x100,
100
101
102 NV_ADMA_CTL = 0x40,
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
105 NV_ADMA_STAT = 0x44,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
111
112
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
119
120
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
125
126
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
132
133
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
137
138
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
150 NV_ADMA_STAT_TIMEOUT,
151
152
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
155
156
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
161
162
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16,
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
166
167
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
170
171
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
176
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
181
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
184
185};
186
187
188struct nv_adma_prd {
189 __le64 addr;
190 __le32 len;
191 u8 flags;
192 u8 packet_len;
193 __le16 reserved;
194};
195
196enum nv_adma_regbits {
197 CMDEND = (1 << 15),
198 WNB = (1 << 14),
199 IGN = (1 << 13),
200 CS1n = (1 << (4 + 8)),
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
204};
205
206
207
208
209
210struct nv_adma_cpb {
211 u8 resp_flags;
212 u8 reserved1;
213 u8 ctl_flags;
214
215 u8 len;
216 u8 tag;
217 u8 next_cpb_idx;
218 __le16 reserved2;
219 __le16 tf[12];
220 struct nv_adma_prd aprd[5];
221 __le64 next_aprd;
222 __le64 reserved3;
223};
224
225
226struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
228 dma_addr_t cpb_dma;
229 struct nv_adma_prd *aprd;
230 dma_addr_t aprd_dma;
231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
234 u64 adma_dma_mask;
235 u8 flags;
236 int last_issue_ncq;
237};
238
239struct nv_host_priv {
240 unsigned long type;
241};
242
243struct defer_queue {
244 u32 defer_bits;
245 unsigned int head;
246 unsigned int tail;
247 unsigned int tag[ATA_MAX_QUEUE];
248};
249
250enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
255};
256
257struct nv_swncq_port_priv {
258 struct ata_bmdma_prd *prd;
259 dma_addr_t prd_dma;
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
263 u32 qc_active;
264
265 unsigned int last_issue_tag;
266
267
268 struct defer_queue defer_queue;
269
270
271 u32 dhfis_bits;
272 u32 dmafis_bits;
273 u32 sdbfis_bits;
274
275 unsigned int ncq_flags;
276};
277
278
279#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
280
281static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282#ifdef CONFIG_PM_SLEEP
283static int nv_pci_device_resume(struct pci_dev *pdev);
284#endif
285static void nv_ck804_host_stop(struct ata_host *host);
286static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
291
292static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
294static void nv_nf2_freeze(struct ata_port *ap);
295static void nv_nf2_thaw(struct ata_port *ap);
296static void nv_ck804_freeze(struct ata_port *ap);
297static void nv_ck804_thaw(struct ata_port *ap);
298static int nv_adma_slave_config(struct scsi_device *sdev);
299static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
301static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303static void nv_adma_irq_clear(struct ata_port *ap);
304static int nv_adma_port_start(struct ata_port *ap);
305static void nv_adma_port_stop(struct ata_port *ap);
306#ifdef CONFIG_PM
307static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308static int nv_adma_port_resume(struct ata_port *ap);
309#endif
310static void nv_adma_freeze(struct ata_port *ap);
311static void nv_adma_thaw(struct ata_port *ap);
312static void nv_adma_error_handler(struct ata_port *ap);
313static void nv_adma_host_stop(struct ata_host *host);
314static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
316
317static void nv_mcp55_thaw(struct ata_port *ap);
318static void nv_mcp55_freeze(struct ata_port *ap);
319static void nv_swncq_error_handler(struct ata_port *ap);
320static int nv_swncq_slave_config(struct scsi_device *sdev);
321static int nv_swncq_port_start(struct ata_port *ap);
322static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327#ifdef CONFIG_PM
328static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329static int nv_swncq_port_resume(struct ata_port *ap);
330#endif
331
332enum nv_host_type
333{
334 GENERIC,
335 NFORCE2,
336 NFORCE3 = NFORCE2,
337 CK804,
338 ADMA,
339 MCP5x,
340 SWNCQ,
341};
342
343static const struct pci_device_id nv_pci_tbl[] = {
344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
358
359 { }
360};
361
362static struct pci_driver nv_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
366#ifdef CONFIG_PM_SLEEP
367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
369#endif
370 .remove = ata_pci_remove_one,
371};
372
373static struct scsi_host_template nv_sht = {
374 ATA_BMDMA_SHT(DRV_NAME),
375};
376
377static struct scsi_host_template nv_adma_sht = {
378 __ATA_BASE_SHT(DRV_NAME),
379 .can_queue = NV_ADMA_MAX_CPBS,
380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
383 .sdev_groups = ata_ncq_sdev_groups,
384 .change_queue_depth = ata_scsi_change_queue_depth,
385 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
386};
387
388static struct scsi_host_template nv_swncq_sht = {
389 __ATA_BASE_SHT(DRV_NAME),
390 .can_queue = ATA_MAX_QUEUE - 1,
391 .sg_tablesize = LIBATA_MAX_PRD,
392 .dma_boundary = ATA_DMA_BOUNDARY,
393 .slave_configure = nv_swncq_slave_config,
394 .sdev_groups = ata_ncq_sdev_groups,
395 .change_queue_depth = ata_scsi_change_queue_depth,
396 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
397};
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457static struct ata_port_operations nv_generic_ops = {
458 .inherits = &ata_bmdma_port_ops,
459 .lost_interrupt = ATA_OP_NULL,
460 .scr_read = nv_scr_read,
461 .scr_write = nv_scr_write,
462 .hardreset = nv_hardreset,
463};
464
465static struct ata_port_operations nv_nf2_ops = {
466 .inherits = &nv_generic_ops,
467 .freeze = nv_nf2_freeze,
468 .thaw = nv_nf2_thaw,
469};
470
471static struct ata_port_operations nv_ck804_ops = {
472 .inherits = &nv_generic_ops,
473 .freeze = nv_ck804_freeze,
474 .thaw = nv_ck804_thaw,
475 .host_stop = nv_ck804_host_stop,
476};
477
478static struct ata_port_operations nv_adma_ops = {
479 .inherits = &nv_ck804_ops,
480
481 .check_atapi_dma = nv_adma_check_atapi_dma,
482 .sff_tf_read = nv_adma_tf_read,
483 .qc_defer = ata_std_qc_defer,
484 .qc_prep = nv_adma_qc_prep,
485 .qc_issue = nv_adma_qc_issue,
486 .sff_irq_clear = nv_adma_irq_clear,
487
488 .freeze = nv_adma_freeze,
489 .thaw = nv_adma_thaw,
490 .error_handler = nv_adma_error_handler,
491 .post_internal_cmd = nv_adma_post_internal_cmd,
492
493 .port_start = nv_adma_port_start,
494 .port_stop = nv_adma_port_stop,
495#ifdef CONFIG_PM
496 .port_suspend = nv_adma_port_suspend,
497 .port_resume = nv_adma_port_resume,
498#endif
499 .host_stop = nv_adma_host_stop,
500};
501
502static struct ata_port_operations nv_swncq_ops = {
503 .inherits = &nv_generic_ops,
504
505 .qc_defer = ata_std_qc_defer,
506 .qc_prep = nv_swncq_qc_prep,
507 .qc_issue = nv_swncq_qc_issue,
508
509 .freeze = nv_mcp55_freeze,
510 .thaw = nv_mcp55_thaw,
511 .error_handler = nv_swncq_error_handler,
512
513#ifdef CONFIG_PM
514 .port_suspend = nv_swncq_port_suspend,
515 .port_resume = nv_swncq_port_resume,
516#endif
517 .port_start = nv_swncq_port_start,
518};
519
520struct nv_pi_priv {
521 irq_handler_t irq_handler;
522 struct scsi_host_template *sht;
523};
524
525#define NV_PI_PRIV(_irq_handler, _sht) \
526 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
527
528static const struct ata_port_info nv_port_info[] = {
529
530 {
531 .flags = ATA_FLAG_SATA,
532 .pio_mask = NV_PIO_MASK,
533 .mwdma_mask = NV_MWDMA_MASK,
534 .udma_mask = NV_UDMA_MASK,
535 .port_ops = &nv_generic_ops,
536 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
537 },
538
539 {
540 .flags = ATA_FLAG_SATA,
541 .pio_mask = NV_PIO_MASK,
542 .mwdma_mask = NV_MWDMA_MASK,
543 .udma_mask = NV_UDMA_MASK,
544 .port_ops = &nv_nf2_ops,
545 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
546 },
547
548 {
549 .flags = ATA_FLAG_SATA,
550 .pio_mask = NV_PIO_MASK,
551 .mwdma_mask = NV_MWDMA_MASK,
552 .udma_mask = NV_UDMA_MASK,
553 .port_ops = &nv_ck804_ops,
554 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
555 },
556
557 {
558 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
559 .pio_mask = NV_PIO_MASK,
560 .mwdma_mask = NV_MWDMA_MASK,
561 .udma_mask = NV_UDMA_MASK,
562 .port_ops = &nv_adma_ops,
563 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
564 },
565
566 {
567 .flags = ATA_FLAG_SATA,
568 .pio_mask = NV_PIO_MASK,
569 .mwdma_mask = NV_MWDMA_MASK,
570 .udma_mask = NV_UDMA_MASK,
571 .port_ops = &nv_generic_ops,
572 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
573 },
574
575 {
576 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
577 .pio_mask = NV_PIO_MASK,
578 .mwdma_mask = NV_MWDMA_MASK,
579 .udma_mask = NV_UDMA_MASK,
580 .port_ops = &nv_swncq_ops,
581 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
582 },
583};
584
585MODULE_AUTHOR("NVIDIA");
586MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
587MODULE_LICENSE("GPL");
588MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
589MODULE_VERSION(DRV_VERSION);
590
591static bool adma_enabled;
592static bool swncq_enabled = true;
593static bool msi_enabled;
594
595static void nv_adma_register_mode(struct ata_port *ap)
596{
597 struct nv_adma_port_priv *pp = ap->private_data;
598 void __iomem *mmio = pp->ctl_block;
599 u16 tmp, status;
600 int count = 0;
601
602 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
603 return;
604
605 status = readw(mmio + NV_ADMA_STAT);
606 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
607 ndelay(50);
608 status = readw(mmio + NV_ADMA_STAT);
609 count++;
610 }
611 if (count == 20)
612 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
613 status);
614
615 tmp = readw(mmio + NV_ADMA_CTL);
616 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
617
618 count = 0;
619 status = readw(mmio + NV_ADMA_STAT);
620 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
621 ndelay(50);
622 status = readw(mmio + NV_ADMA_STAT);
623 count++;
624 }
625 if (count == 20)
626 ata_port_warn(ap,
627 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
628 status);
629
630 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
631}
632
633static void nv_adma_mode(struct ata_port *ap)
634{
635 struct nv_adma_port_priv *pp = ap->private_data;
636 void __iomem *mmio = pp->ctl_block;
637 u16 tmp, status;
638 int count = 0;
639
640 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
641 return;
642
643 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
644
645 tmp = readw(mmio + NV_ADMA_CTL);
646 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
647
648 status = readw(mmio + NV_ADMA_STAT);
649 while (((status & NV_ADMA_STAT_LEGACY) ||
650 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
651 ndelay(50);
652 status = readw(mmio + NV_ADMA_STAT);
653 count++;
654 }
655 if (count == 20)
656 ata_port_warn(ap,
657 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
658 status);
659
660 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
661}
662
663static int nv_adma_slave_config(struct scsi_device *sdev)
664{
665 struct ata_port *ap = ata_shost_to_port(sdev->host);
666 struct nv_adma_port_priv *pp = ap->private_data;
667 struct nv_adma_port_priv *port0, *port1;
668 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
669 unsigned long segment_boundary, flags;
670 unsigned short sg_tablesize;
671 int rc;
672 int adma_enable;
673 u32 current_reg, new_reg, config_mask;
674
675 rc = ata_scsi_slave_config(sdev);
676
677 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
678
679 return rc;
680
681 spin_lock_irqsave(ap->lock, flags);
682
683 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
684
685
686
687
688
689
690
691 segment_boundary = ATA_DMA_BOUNDARY;
692
693
694 sg_tablesize = LIBATA_MAX_PRD - 1;
695
696
697
698 adma_enable = 0;
699 nv_adma_register_mode(ap);
700 } else {
701 segment_boundary = NV_ADMA_DMA_BOUNDARY;
702 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
703 adma_enable = 1;
704 }
705
706 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
707
708 if (ap->port_no == 1)
709 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
710 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
711 else
712 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
713 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
714
715 if (adma_enable) {
716 new_reg = current_reg | config_mask;
717 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
718 } else {
719 new_reg = current_reg & ~config_mask;
720 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
721 }
722
723 if (current_reg != new_reg)
724 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
725
726 port0 = ap->host->ports[0]->private_data;
727 port1 = ap->host->ports[1]->private_data;
728 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
729 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
730
731
732
733
734
735
736
737 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
738 } else {
739 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
740 }
741
742 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
743 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
744 ata_port_info(ap,
745 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
746 (unsigned long long)*ap->host->dev->dma_mask,
747 segment_boundary, sg_tablesize);
748
749 spin_unlock_irqrestore(ap->lock, flags);
750
751 return rc;
752}
753
754static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
755{
756 struct nv_adma_port_priv *pp = qc->ap->private_data;
757 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
758}
759
760static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
761{
762
763
764
765
766
767
768
769 nv_adma_register_mode(ap);
770
771 ata_sff_tf_read(ap, tf);
772}
773
774static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
775{
776 unsigned int idx = 0;
777
778 if (tf->flags & ATA_TFLAG_ISADDR) {
779 if (tf->flags & ATA_TFLAG_LBA48) {
780 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
781 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
782 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
783 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
785 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
786 } else
787 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
788
789 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
790 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
793 }
794
795 if (tf->flags & ATA_TFLAG_DEVICE)
796 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
797
798 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
799
800 while (idx < 12)
801 cpb[idx++] = cpu_to_le16(IGN);
802
803 return idx;
804}
805
806static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
807{
808 struct nv_adma_port_priv *pp = ap->private_data;
809 u8 flags = pp->cpb[cpb_num].resp_flags;
810
811 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
812
813 if (unlikely((force_err ||
814 flags & (NV_CPB_RESP_ATA_ERR |
815 NV_CPB_RESP_CMD_ERR |
816 NV_CPB_RESP_CPB_ERR)))) {
817 struct ata_eh_info *ehi = &ap->link.eh_info;
818 int freeze = 0;
819
820 ata_ehi_clear_desc(ehi);
821 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
822 if (flags & NV_CPB_RESP_ATA_ERR) {
823 ata_ehi_push_desc(ehi, "ATA error");
824 ehi->err_mask |= AC_ERR_DEV;
825 } else if (flags & NV_CPB_RESP_CMD_ERR) {
826 ata_ehi_push_desc(ehi, "CMD error");
827 ehi->err_mask |= AC_ERR_DEV;
828 } else if (flags & NV_CPB_RESP_CPB_ERR) {
829 ata_ehi_push_desc(ehi, "CPB error");
830 ehi->err_mask |= AC_ERR_SYSTEM;
831 freeze = 1;
832 } else {
833
834 ata_ehi_push_desc(ehi, "unknown");
835 ehi->err_mask |= AC_ERR_OTHER;
836 freeze = 1;
837 }
838
839 if (freeze)
840 ata_port_freeze(ap);
841 else
842 ata_port_abort(ap);
843 return -1;
844 }
845
846 if (likely(flags & NV_CPB_RESP_DONE))
847 return 1;
848 return 0;
849}
850
851static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
852{
853 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
854
855
856 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
857 ata_port_freeze(ap);
858 return 1;
859 }
860
861
862 if (!(irq_stat & NV_INT_DEV))
863 return 0;
864
865
866 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
867 ata_sff_check_status(ap);
868 return 1;
869 }
870
871
872 return ata_bmdma_port_intr(ap, qc);
873}
874
875static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
876{
877 struct ata_host *host = dev_instance;
878 int i, handled = 0;
879 u32 notifier_clears[2];
880
881 spin_lock(&host->lock);
882
883 for (i = 0; i < host->n_ports; i++) {
884 struct ata_port *ap = host->ports[i];
885 struct nv_adma_port_priv *pp = ap->private_data;
886 void __iomem *mmio = pp->ctl_block;
887 u16 status;
888 u32 gen_ctl;
889 u32 notifier, notifier_error;
890
891 notifier_clears[i] = 0;
892
893
894 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
895 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
896 >> (NV_INT_PORT_SHIFT * i);
897 handled += nv_host_intr(ap, irq_stat);
898 continue;
899 }
900
901
902 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
903 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
904 >> (NV_INT_PORT_SHIFT * i);
905 if (ata_tag_valid(ap->link.active_tag))
906
907
908
909
910 irq_stat |= NV_INT_DEV;
911 handled += nv_host_intr(ap, irq_stat);
912 }
913
914 notifier = readl(mmio + NV_ADMA_NOTIFIER);
915 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
916 notifier_clears[i] = notifier | notifier_error;
917
918 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
919
920 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
921 !notifier_error)
922
923 continue;
924
925 status = readw(mmio + NV_ADMA_STAT);
926
927
928
929
930
931
932
933 writew(status, mmio + NV_ADMA_STAT);
934 readw(mmio + NV_ADMA_STAT);
935 rmb();
936
937 handled++;
938
939
940 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
941 NV_ADMA_STAT_HOTUNPLUG |
942 NV_ADMA_STAT_TIMEOUT |
943 NV_ADMA_STAT_SERROR))) {
944 struct ata_eh_info *ehi = &ap->link.eh_info;
945
946 ata_ehi_clear_desc(ehi);
947 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
948 if (status & NV_ADMA_STAT_TIMEOUT) {
949 ehi->err_mask |= AC_ERR_SYSTEM;
950 ata_ehi_push_desc(ehi, "timeout");
951 } else if (status & NV_ADMA_STAT_HOTPLUG) {
952 ata_ehi_hotplugged(ehi);
953 ata_ehi_push_desc(ehi, "hotplug");
954 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
955 ata_ehi_hotplugged(ehi);
956 ata_ehi_push_desc(ehi, "hot unplug");
957 } else if (status & NV_ADMA_STAT_SERROR) {
958
959 ata_ehi_push_desc(ehi, "SError");
960 } else
961 ata_ehi_push_desc(ehi, "unknown");
962 ata_port_freeze(ap);
963 continue;
964 }
965
966 if (status & (NV_ADMA_STAT_DONE |
967 NV_ADMA_STAT_CPBERR |
968 NV_ADMA_STAT_CMD_COMPLETE)) {
969 u32 check_commands = notifier_clears[i];
970 u32 done_mask = 0;
971 int pos, rc;
972
973 if (status & NV_ADMA_STAT_CPBERR) {
974
975 if (ata_tag_valid(ap->link.active_tag))
976 check_commands = 1 <<
977 ap->link.active_tag;
978 else
979 check_commands = ap->link.sactive;
980 }
981
982
983 while ((pos = ffs(check_commands))) {
984 pos--;
985 rc = nv_adma_check_cpb(ap, pos,
986 notifier_error & (1 << pos));
987 if (rc > 0)
988 done_mask |= 1 << pos;
989 else if (unlikely(rc < 0))
990 check_commands = 0;
991 check_commands &= ~(1 << pos);
992 }
993 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
994 }
995 }
996
997 if (notifier_clears[0] || notifier_clears[1]) {
998
999
1000 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1001 writel(notifier_clears[0], pp->notifier_clear_block);
1002 pp = host->ports[1]->private_data;
1003 writel(notifier_clears[1], pp->notifier_clear_block);
1004 }
1005
1006 spin_unlock(&host->lock);
1007
1008 return IRQ_RETVAL(handled);
1009}
1010
1011static void nv_adma_freeze(struct ata_port *ap)
1012{
1013 struct nv_adma_port_priv *pp = ap->private_data;
1014 void __iomem *mmio = pp->ctl_block;
1015 u16 tmp;
1016
1017 nv_ck804_freeze(ap);
1018
1019 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1020 return;
1021
1022
1023 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1024 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1025
1026
1027 tmp = readw(mmio + NV_ADMA_CTL);
1028 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1029 mmio + NV_ADMA_CTL);
1030 readw(mmio + NV_ADMA_CTL);
1031}
1032
1033static void nv_adma_thaw(struct ata_port *ap)
1034{
1035 struct nv_adma_port_priv *pp = ap->private_data;
1036 void __iomem *mmio = pp->ctl_block;
1037 u16 tmp;
1038
1039 nv_ck804_thaw(ap);
1040
1041 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1042 return;
1043
1044
1045 tmp = readw(mmio + NV_ADMA_CTL);
1046 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1047 mmio + NV_ADMA_CTL);
1048 readw(mmio + NV_ADMA_CTL);
1049}
1050
1051static void nv_adma_irq_clear(struct ata_port *ap)
1052{
1053 struct nv_adma_port_priv *pp = ap->private_data;
1054 void __iomem *mmio = pp->ctl_block;
1055 u32 notifier_clears[2];
1056
1057 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1058 ata_bmdma_irq_clear(ap);
1059 return;
1060 }
1061
1062
1063 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1064 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1065
1066
1067 writew(0xffff, mmio + NV_ADMA_STAT);
1068
1069
1070
1071 if (ap->port_no == 0) {
1072 notifier_clears[0] = 0xFFFFFFFF;
1073 notifier_clears[1] = 0;
1074 } else {
1075 notifier_clears[0] = 0;
1076 notifier_clears[1] = 0xFFFFFFFF;
1077 }
1078 pp = ap->host->ports[0]->private_data;
1079 writel(notifier_clears[0], pp->notifier_clear_block);
1080 pp = ap->host->ports[1]->private_data;
1081 writel(notifier_clears[1], pp->notifier_clear_block);
1082}
1083
1084static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1085{
1086 struct nv_adma_port_priv *pp = qc->ap->private_data;
1087
1088 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1089 ata_bmdma_post_internal_cmd(qc);
1090}
1091
1092static int nv_adma_port_start(struct ata_port *ap)
1093{
1094 struct device *dev = ap->host->dev;
1095 struct nv_adma_port_priv *pp;
1096 int rc;
1097 void *mem;
1098 dma_addr_t mem_dma;
1099 void __iomem *mmio;
1100 struct pci_dev *pdev = to_pci_dev(dev);
1101 u16 tmp;
1102
1103 VPRINTK("ENTER\n");
1104
1105
1106
1107
1108
1109 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1110 if (rc)
1111 return rc;
1112
1113
1114 rc = ata_bmdma_port_start(ap);
1115 if (rc)
1116 return rc;
1117
1118 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1119 if (!pp)
1120 return -ENOMEM;
1121
1122 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1123 ap->port_no * NV_ADMA_PORT_SIZE;
1124 pp->ctl_block = mmio;
1125 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1126 pp->notifier_clear_block = pp->gen_block +
1127 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1128
1129
1130
1131
1132
1133 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1134
1135 pp->adma_dma_mask = *dev->dma_mask;
1136
1137 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1138 &mem_dma, GFP_KERNEL);
1139 if (!mem)
1140 return -ENOMEM;
1141
1142
1143
1144
1145
1146
1147 pp->cpb = mem;
1148 pp->cpb_dma = mem_dma;
1149
1150 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1151 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1152
1153 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1155
1156
1157
1158
1159 pp->aprd = mem;
1160 pp->aprd_dma = mem_dma;
1161
1162 ap->private_data = pp;
1163
1164
1165 writew(0xffff, mmio + NV_ADMA_STAT);
1166
1167
1168 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1169
1170
1171 writew(0, mmio + NV_ADMA_CPB_COUNT);
1172
1173
1174 tmp = readw(mmio + NV_ADMA_CTL);
1175 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1176 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1177
1178 tmp = readw(mmio + NV_ADMA_CTL);
1179 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1180 readw(mmio + NV_ADMA_CTL);
1181 udelay(1);
1182 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1183 readw(mmio + NV_ADMA_CTL);
1184
1185 return 0;
1186}
1187
1188static void nv_adma_port_stop(struct ata_port *ap)
1189{
1190 struct nv_adma_port_priv *pp = ap->private_data;
1191 void __iomem *mmio = pp->ctl_block;
1192
1193 VPRINTK("ENTER\n");
1194 writew(0, mmio + NV_ADMA_CTL);
1195}
1196
1197#ifdef CONFIG_PM
1198static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1199{
1200 struct nv_adma_port_priv *pp = ap->private_data;
1201 void __iomem *mmio = pp->ctl_block;
1202
1203
1204 nv_adma_register_mode(ap);
1205
1206
1207 writew(0, mmio + NV_ADMA_CPB_COUNT);
1208
1209
1210 writew(0, mmio + NV_ADMA_CTL);
1211
1212 return 0;
1213}
1214
1215static int nv_adma_port_resume(struct ata_port *ap)
1216{
1217 struct nv_adma_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = pp->ctl_block;
1219 u16 tmp;
1220
1221
1222 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1223 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1224
1225
1226 writew(0xffff, mmio + NV_ADMA_STAT);
1227
1228
1229 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1230
1231
1232 writew(0, mmio + NV_ADMA_CPB_COUNT);
1233
1234
1235 tmp = readw(mmio + NV_ADMA_CTL);
1236 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1237 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1238
1239 tmp = readw(mmio + NV_ADMA_CTL);
1240 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1241 readw(mmio + NV_ADMA_CTL);
1242 udelay(1);
1243 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1244 readw(mmio + NV_ADMA_CTL);
1245
1246 return 0;
1247}
1248#endif
1249
1250static void nv_adma_setup_port(struct ata_port *ap)
1251{
1252 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1253 struct ata_ioports *ioport = &ap->ioaddr;
1254
1255 VPRINTK("ENTER\n");
1256
1257 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1258
1259 ioport->cmd_addr = mmio;
1260 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1261 ioport->error_addr =
1262 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1263 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1264 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1265 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1266 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1267 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1268 ioport->status_addr =
1269 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1270 ioport->altstatus_addr =
1271 ioport->ctl_addr = mmio + 0x20;
1272}
1273
1274static int nv_adma_host_init(struct ata_host *host)
1275{
1276 struct pci_dev *pdev = to_pci_dev(host->dev);
1277 unsigned int i;
1278 u32 tmp32;
1279
1280 VPRINTK("ENTER\n");
1281
1282
1283 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1284 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1285 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1286 NV_MCP_SATA_CFG_20_PORT1_EN |
1287 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1288
1289 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1290
1291 for (i = 0; i < host->n_ports; i++)
1292 nv_adma_setup_port(host->ports[i]);
1293
1294 return 0;
1295}
1296
1297static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1298 struct scatterlist *sg,
1299 int idx,
1300 struct nv_adma_prd *aprd)
1301{
1302 u8 flags = 0;
1303 if (qc->tf.flags & ATA_TFLAG_WRITE)
1304 flags |= NV_APRD_WRITE;
1305 if (idx == qc->n_elem - 1)
1306 flags |= NV_APRD_END;
1307 else if (idx != 4)
1308 flags |= NV_APRD_CONT;
1309
1310 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1311 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg)));
1312 aprd->flags = flags;
1313 aprd->packet_len = 0;
1314}
1315
1316static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1317{
1318 struct nv_adma_port_priv *pp = qc->ap->private_data;
1319 struct nv_adma_prd *aprd;
1320 struct scatterlist *sg;
1321 unsigned int si;
1322
1323 VPRINTK("ENTER\n");
1324
1325 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1326 aprd = (si < 5) ? &cpb->aprd[si] :
1327 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1328 nv_adma_fill_aprd(qc, sg, si, aprd);
1329 }
1330 if (si > 5)
1331 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1332 else
1333 cpb->next_aprd = cpu_to_le64(0);
1334}
1335
1336static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1337{
1338 struct nv_adma_port_priv *pp = qc->ap->private_data;
1339
1340
1341
1342 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1343 (qc->tf.flags & ATA_TFLAG_POLLING))
1344 return 1;
1345
1346 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1347 (qc->tf.protocol == ATA_PROT_NODATA))
1348 return 0;
1349
1350 return 1;
1351}
1352
1353static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1354{
1355 struct nv_adma_port_priv *pp = qc->ap->private_data;
1356 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1357 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1358 NV_CPB_CTL_IEN;
1359
1360 if (nv_adma_use_reg_mode(qc)) {
1361 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1362 (qc->flags & ATA_QCFLAG_DMAMAP));
1363 nv_adma_register_mode(qc->ap);
1364 ata_bmdma_qc_prep(qc);
1365 return AC_ERR_OK;
1366 }
1367
1368 cpb->resp_flags = NV_CPB_RESP_DONE;
1369 wmb();
1370 cpb->ctl_flags = 0;
1371 wmb();
1372
1373 cpb->len = 3;
1374 cpb->tag = qc->hw_tag;
1375 cpb->next_cpb_idx = 0;
1376
1377
1378 if (qc->tf.protocol == ATA_PROT_NCQ)
1379 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1380
1381 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1382
1383 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1384
1385 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1386 nv_adma_fill_sg(qc, cpb);
1387 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1388 } else
1389 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1390
1391
1392
1393 wmb();
1394 cpb->ctl_flags = ctl_flags;
1395 wmb();
1396 cpb->resp_flags = 0;
1397
1398 return AC_ERR_OK;
1399}
1400
1401static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1402{
1403 struct nv_adma_port_priv *pp = qc->ap->private_data;
1404 void __iomem *mmio = pp->ctl_block;
1405 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1406
1407 VPRINTK("ENTER\n");
1408
1409
1410
1411
1412 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1413 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1414 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1415 return AC_ERR_SYSTEM;
1416 }
1417
1418 if (nv_adma_use_reg_mode(qc)) {
1419
1420 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1421 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1422 (qc->flags & ATA_QCFLAG_DMAMAP));
1423 nv_adma_register_mode(qc->ap);
1424 return ata_bmdma_qc_issue(qc);
1425 } else
1426 nv_adma_mode(qc->ap);
1427
1428
1429
1430 wmb();
1431
1432 if (curr_ncq != pp->last_issue_ncq) {
1433
1434
1435 udelay(20);
1436 pp->last_issue_ncq = curr_ncq;
1437 }
1438
1439 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1440
1441 DPRINTK("Issued tag %u\n", qc->hw_tag);
1442
1443 return 0;
1444}
1445
1446static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1447{
1448 struct ata_host *host = dev_instance;
1449 unsigned int i;
1450 unsigned int handled = 0;
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&host->lock, flags);
1454
1455 for (i = 0; i < host->n_ports; i++) {
1456 struct ata_port *ap = host->ports[i];
1457 struct ata_queued_cmd *qc;
1458
1459 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1460 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1461 handled += ata_bmdma_port_intr(ap, qc);
1462 } else {
1463
1464
1465
1466
1467 ap->ops->sff_check_status(ap);
1468 }
1469 }
1470
1471 spin_unlock_irqrestore(&host->lock, flags);
1472
1473 return IRQ_RETVAL(handled);
1474}
1475
1476static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1477{
1478 int i, handled = 0;
1479
1480 for (i = 0; i < host->n_ports; i++) {
1481 handled += nv_host_intr(host->ports[i], irq_stat);
1482 irq_stat >>= NV_INT_PORT_SHIFT;
1483 }
1484
1485 return IRQ_RETVAL(handled);
1486}
1487
1488static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1489{
1490 struct ata_host *host = dev_instance;
1491 u8 irq_stat;
1492 irqreturn_t ret;
1493
1494 spin_lock(&host->lock);
1495 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1496 ret = nv_do_interrupt(host, irq_stat);
1497 spin_unlock(&host->lock);
1498
1499 return ret;
1500}
1501
1502static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1503{
1504 struct ata_host *host = dev_instance;
1505 u8 irq_stat;
1506 irqreturn_t ret;
1507
1508 spin_lock(&host->lock);
1509 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1510 ret = nv_do_interrupt(host, irq_stat);
1511 spin_unlock(&host->lock);
1512
1513 return ret;
1514}
1515
1516static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1517{
1518 if (sc_reg > SCR_CONTROL)
1519 return -EINVAL;
1520
1521 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1522 return 0;
1523}
1524
1525static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1526{
1527 if (sc_reg > SCR_CONTROL)
1528 return -EINVAL;
1529
1530 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1531 return 0;
1532}
1533
1534static int nv_hardreset(struct ata_link *link, unsigned int *class,
1535 unsigned long deadline)
1536{
1537 struct ata_eh_context *ehc = &link->eh_context;
1538
1539
1540
1541
1542 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1543 !ata_dev_enabled(link->device))
1544 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1545 NULL, NULL);
1546 else {
1547 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1548 int rc;
1549
1550 if (!(ehc->i.flags & ATA_EHI_QUIET))
1551 ata_link_info(link,
1552 "nv: skipping hardreset on occupied port\n");
1553
1554
1555 rc = sata_link_resume(link, timing, deadline);
1556
1557 if (rc && rc != -EOPNOTSUPP)
1558 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1559 rc);
1560 }
1561
1562
1563 return -EAGAIN;
1564}
1565
1566static void nv_nf2_freeze(struct ata_port *ap)
1567{
1568 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1569 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1570 u8 mask;
1571
1572 mask = ioread8(scr_addr + NV_INT_ENABLE);
1573 mask &= ~(NV_INT_ALL << shift);
1574 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1575}
1576
1577static void nv_nf2_thaw(struct ata_port *ap)
1578{
1579 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1580 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1581 u8 mask;
1582
1583 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1584
1585 mask = ioread8(scr_addr + NV_INT_ENABLE);
1586 mask |= (NV_INT_MASK << shift);
1587 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1588}
1589
1590static void nv_ck804_freeze(struct ata_port *ap)
1591{
1592 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1593 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1594 u8 mask;
1595
1596 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1597 mask &= ~(NV_INT_ALL << shift);
1598 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1599}
1600
1601static void nv_ck804_thaw(struct ata_port *ap)
1602{
1603 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1604 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1605 u8 mask;
1606
1607 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1608
1609 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1610 mask |= (NV_INT_MASK << shift);
1611 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1612}
1613
1614static void nv_mcp55_freeze(struct ata_port *ap)
1615{
1616 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1617 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1618 u32 mask;
1619
1620 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1621
1622 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1623 mask &= ~(NV_INT_ALL_MCP55 << shift);
1624 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1625}
1626
1627static void nv_mcp55_thaw(struct ata_port *ap)
1628{
1629 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1630 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1631 u32 mask;
1632
1633 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1634
1635 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1636 mask |= (NV_INT_MASK_MCP55 << shift);
1637 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1638}
1639
1640static void nv_adma_error_handler(struct ata_port *ap)
1641{
1642 struct nv_adma_port_priv *pp = ap->private_data;
1643 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1644 void __iomem *mmio = pp->ctl_block;
1645 int i;
1646 u16 tmp;
1647
1648 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1649 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1650 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1651 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1652 u32 status = readw(mmio + NV_ADMA_STAT);
1653 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1654 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1655
1656 ata_port_err(ap,
1657 "EH in ADMA mode, notifier 0x%X "
1658 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1659 "next cpb count 0x%X next cpb idx 0x%x\n",
1660 notifier, notifier_error, gen_ctl, status,
1661 cpb_count, next_cpb_idx);
1662
1663 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1664 struct nv_adma_cpb *cpb = &pp->cpb[i];
1665 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1666 ap->link.sactive & (1 << i))
1667 ata_port_err(ap,
1668 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1669 i, cpb->ctl_flags, cpb->resp_flags);
1670 }
1671 }
1672
1673
1674 nv_adma_register_mode(ap);
1675
1676
1677
1678 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1679 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1680
1681
1682 writew(0, mmio + NV_ADMA_CPB_COUNT);
1683
1684
1685 tmp = readw(mmio + NV_ADMA_CTL);
1686 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1687 readw(mmio + NV_ADMA_CTL);
1688 udelay(1);
1689 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1690 readw(mmio + NV_ADMA_CTL);
1691 }
1692
1693 ata_bmdma_error_handler(ap);
1694}
1695
1696static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1697{
1698 struct nv_swncq_port_priv *pp = ap->private_data;
1699 struct defer_queue *dq = &pp->defer_queue;
1700
1701
1702 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1703 dq->defer_bits |= (1 << qc->hw_tag);
1704 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1705}
1706
1707static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1708{
1709 struct nv_swncq_port_priv *pp = ap->private_data;
1710 struct defer_queue *dq = &pp->defer_queue;
1711 unsigned int tag;
1712
1713 if (dq->head == dq->tail)
1714 return NULL;
1715
1716 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1717 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1718 WARN_ON(!(dq->defer_bits & (1 << tag)));
1719 dq->defer_bits &= ~(1 << tag);
1720
1721 return ata_qc_from_tag(ap, tag);
1722}
1723
1724static void nv_swncq_fis_reinit(struct ata_port *ap)
1725{
1726 struct nv_swncq_port_priv *pp = ap->private_data;
1727
1728 pp->dhfis_bits = 0;
1729 pp->dmafis_bits = 0;
1730 pp->sdbfis_bits = 0;
1731 pp->ncq_flags = 0;
1732}
1733
1734static void nv_swncq_pp_reinit(struct ata_port *ap)
1735{
1736 struct nv_swncq_port_priv *pp = ap->private_data;
1737 struct defer_queue *dq = &pp->defer_queue;
1738
1739 dq->head = 0;
1740 dq->tail = 0;
1741 dq->defer_bits = 0;
1742 pp->qc_active = 0;
1743 pp->last_issue_tag = ATA_TAG_POISON;
1744 nv_swncq_fis_reinit(ap);
1745}
1746
1747static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1748{
1749 struct nv_swncq_port_priv *pp = ap->private_data;
1750
1751 writew(fis, pp->irq_block);
1752}
1753
1754static void __ata_bmdma_stop(struct ata_port *ap)
1755{
1756 struct ata_queued_cmd qc;
1757
1758 qc.ap = ap;
1759 ata_bmdma_stop(&qc);
1760}
1761
1762static void nv_swncq_ncq_stop(struct ata_port *ap)
1763{
1764 struct nv_swncq_port_priv *pp = ap->private_data;
1765 unsigned int i;
1766 u32 sactive;
1767 u32 done_mask;
1768
1769 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1770 ap->qc_active, ap->link.sactive);
1771 ata_port_err(ap,
1772 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1773 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1774 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1775 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1776
1777 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1778 ap->ops->sff_check_status(ap),
1779 ioread8(ap->ioaddr.error_addr));
1780
1781 sactive = readl(pp->sactive_block);
1782 done_mask = pp->qc_active ^ sactive;
1783
1784 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1785 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1786 u8 err = 0;
1787 if (pp->qc_active & (1 << i))
1788 err = 0;
1789 else if (done_mask & (1 << i))
1790 err = 1;
1791 else
1792 continue;
1793
1794 ata_port_err(ap,
1795 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1796 (pp->dhfis_bits >> i) & 0x1,
1797 (pp->dmafis_bits >> i) & 0x1,
1798 (pp->sdbfis_bits >> i) & 0x1,
1799 (sactive >> i) & 0x1,
1800 (err ? "error! tag doesn't exit" : " "));
1801 }
1802
1803 nv_swncq_pp_reinit(ap);
1804 ap->ops->sff_irq_clear(ap);
1805 __ata_bmdma_stop(ap);
1806 nv_swncq_irq_clear(ap, 0xffff);
1807}
1808
1809static void nv_swncq_error_handler(struct ata_port *ap)
1810{
1811 struct ata_eh_context *ehc = &ap->link.eh_context;
1812
1813 if (ap->link.sactive) {
1814 nv_swncq_ncq_stop(ap);
1815 ehc->i.action |= ATA_EH_RESET;
1816 }
1817
1818 ata_bmdma_error_handler(ap);
1819}
1820
1821#ifdef CONFIG_PM
1822static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1823{
1824 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1825 u32 tmp;
1826
1827
1828 writel(~0, mmio + NV_INT_STATUS_MCP55);
1829
1830
1831 writel(0, mmio + NV_INT_ENABLE_MCP55);
1832
1833
1834 tmp = readl(mmio + NV_CTL_MCP55);
1835 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1836 writel(tmp, mmio + NV_CTL_MCP55);
1837
1838 return 0;
1839}
1840
1841static int nv_swncq_port_resume(struct ata_port *ap)
1842{
1843 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1844 u32 tmp;
1845
1846
1847 writel(~0, mmio + NV_INT_STATUS_MCP55);
1848
1849
1850 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1851
1852
1853 tmp = readl(mmio + NV_CTL_MCP55);
1854 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1855
1856 return 0;
1857}
1858#endif
1859
1860static void nv_swncq_host_init(struct ata_host *host)
1861{
1862 u32 tmp;
1863 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1864 struct pci_dev *pdev = to_pci_dev(host->dev);
1865 u8 regval;
1866
1867
1868 pci_read_config_byte(pdev, 0x7f, ®val);
1869 regval &= ~(1 << 7);
1870 pci_write_config_byte(pdev, 0x7f, regval);
1871
1872
1873 tmp = readl(mmio + NV_CTL_MCP55);
1874 VPRINTK("HOST_CTL:0x%X\n", tmp);
1875 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1876
1877
1878 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1879 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1880 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1881
1882
1883 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1884}
1885
1886static int nv_swncq_slave_config(struct scsi_device *sdev)
1887{
1888 struct ata_port *ap = ata_shost_to_port(sdev->host);
1889 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1890 struct ata_device *dev;
1891 int rc;
1892 u8 rev;
1893 u8 check_maxtor = 0;
1894 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1895
1896 rc = ata_scsi_slave_config(sdev);
1897 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1898
1899 return rc;
1900
1901 dev = &ap->link.device[sdev->id];
1902 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1903 return rc;
1904
1905
1906 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1907 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1908 check_maxtor = 1;
1909
1910
1911 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1912 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1913 pci_read_config_byte(pdev, 0x8, &rev);
1914 if (rev <= 0xa2)
1915 check_maxtor = 1;
1916 }
1917
1918 if (!check_maxtor)
1919 return rc;
1920
1921 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1922
1923 if (strncmp(model_num, "Maxtor", 6) == 0) {
1924 ata_scsi_change_queue_depth(sdev, 1);
1925 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1926 sdev->queue_depth);
1927 }
1928
1929 return rc;
1930}
1931
1932static int nv_swncq_port_start(struct ata_port *ap)
1933{
1934 struct device *dev = ap->host->dev;
1935 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1936 struct nv_swncq_port_priv *pp;
1937 int rc;
1938
1939
1940 rc = ata_bmdma_port_start(ap);
1941 if (rc)
1942 return rc;
1943
1944 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1945 if (!pp)
1946 return -ENOMEM;
1947
1948 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1949 &pp->prd_dma, GFP_KERNEL);
1950 if (!pp->prd)
1951 return -ENOMEM;
1952
1953 ap->private_data = pp;
1954 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1955 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1956 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1957
1958 return 0;
1959}
1960
1961static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1962{
1963 if (qc->tf.protocol != ATA_PROT_NCQ) {
1964 ata_bmdma_qc_prep(qc);
1965 return AC_ERR_OK;
1966 }
1967
1968 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1969 return AC_ERR_OK;
1970
1971 nv_swncq_fill_sg(qc);
1972
1973 return AC_ERR_OK;
1974}
1975
1976static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1977{
1978 struct ata_port *ap = qc->ap;
1979 struct scatterlist *sg;
1980 struct nv_swncq_port_priv *pp = ap->private_data;
1981 struct ata_bmdma_prd *prd;
1982 unsigned int si, idx;
1983
1984 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1985
1986 idx = 0;
1987 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1988 u32 addr, offset;
1989 u32 sg_len, len;
1990
1991 addr = (u32)sg_dma_address(sg);
1992 sg_len = sg_dma_len(sg);
1993
1994 while (sg_len) {
1995 offset = addr & 0xffff;
1996 len = sg_len;
1997 if ((offset + sg_len) > 0x10000)
1998 len = 0x10000 - offset;
1999
2000 prd[idx].addr = cpu_to_le32(addr);
2001 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2002
2003 idx++;
2004 sg_len -= len;
2005 addr += len;
2006 }
2007 }
2008
2009 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2010}
2011
2012static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2013 struct ata_queued_cmd *qc)
2014{
2015 struct nv_swncq_port_priv *pp = ap->private_data;
2016
2017 if (qc == NULL)
2018 return 0;
2019
2020 DPRINTK("Enter\n");
2021
2022 writel((1 << qc->hw_tag), pp->sactive_block);
2023 pp->last_issue_tag = qc->hw_tag;
2024 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2025 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2026 pp->qc_active |= (0x1 << qc->hw_tag);
2027
2028 ap->ops->sff_tf_load(ap, &qc->tf);
2029 ap->ops->sff_exec_command(ap, &qc->tf);
2030
2031 DPRINTK("Issued tag %u\n", qc->hw_tag);
2032
2033 return 0;
2034}
2035
2036static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2037{
2038 struct ata_port *ap = qc->ap;
2039 struct nv_swncq_port_priv *pp = ap->private_data;
2040
2041 if (qc->tf.protocol != ATA_PROT_NCQ)
2042 return ata_bmdma_qc_issue(qc);
2043
2044 DPRINTK("Enter\n");
2045
2046 if (!pp->qc_active)
2047 nv_swncq_issue_atacmd(ap, qc);
2048 else
2049 nv_swncq_qc_to_dq(ap, qc);
2050
2051 return 0;
2052}
2053
2054static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2055{
2056 u32 serror;
2057 struct ata_eh_info *ehi = &ap->link.eh_info;
2058
2059 ata_ehi_clear_desc(ehi);
2060
2061
2062 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2063 sata_scr_write(&ap->link, SCR_ERROR, serror);
2064
2065
2066 if (fis & NV_SWNCQ_IRQ_ADDED)
2067 ata_ehi_push_desc(ehi, "hot plug");
2068 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2069 ata_ehi_push_desc(ehi, "hot unplug");
2070
2071 ata_ehi_hotplugged(ehi);
2072
2073
2074 ehi->serror |= serror;
2075
2076 ata_port_freeze(ap);
2077}
2078
2079static int nv_swncq_sdbfis(struct ata_port *ap)
2080{
2081 struct ata_queued_cmd *qc;
2082 struct nv_swncq_port_priv *pp = ap->private_data;
2083 struct ata_eh_info *ehi = &ap->link.eh_info;
2084 u32 sactive;
2085 u32 done_mask;
2086 u8 host_stat;
2087 u8 lack_dhfis = 0;
2088
2089 host_stat = ap->ops->bmdma_status(ap);
2090 if (unlikely(host_stat & ATA_DMA_ERR)) {
2091
2092 ata_ehi_clear_desc(ehi);
2093 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2094 ehi->err_mask |= AC_ERR_HOST_BUS;
2095 ehi->action |= ATA_EH_RESET;
2096 return -EINVAL;
2097 }
2098
2099 ap->ops->sff_irq_clear(ap);
2100 __ata_bmdma_stop(ap);
2101
2102 sactive = readl(pp->sactive_block);
2103 done_mask = pp->qc_active ^ sactive;
2104
2105 pp->qc_active &= ~done_mask;
2106 pp->dhfis_bits &= ~done_mask;
2107 pp->dmafis_bits &= ~done_mask;
2108 pp->sdbfis_bits |= done_mask;
2109 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2110
2111 if (!ap->qc_active) {
2112 DPRINTK("over\n");
2113 nv_swncq_pp_reinit(ap);
2114 return 0;
2115 }
2116
2117 if (pp->qc_active & pp->dhfis_bits)
2118 return 0;
2119
2120 if ((pp->ncq_flags & ncq_saw_backout) ||
2121 (pp->qc_active ^ pp->dhfis_bits))
2122
2123
2124
2125 lack_dhfis = 1;
2126
2127 DPRINTK("id 0x%x QC: qc_active 0x%llx,"
2128 "SWNCQ:qc_active 0x%X defer_bits %X "
2129 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2130 ap->print_id, ap->qc_active, pp->qc_active,
2131 pp->defer_queue.defer_bits, pp->dhfis_bits,
2132 pp->dmafis_bits, pp->last_issue_tag);
2133
2134 nv_swncq_fis_reinit(ap);
2135
2136 if (lack_dhfis) {
2137 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2138 nv_swncq_issue_atacmd(ap, qc);
2139 return 0;
2140 }
2141
2142 if (pp->defer_queue.defer_bits) {
2143
2144 qc = nv_swncq_qc_from_dq(ap);
2145 WARN_ON(qc == NULL);
2146 nv_swncq_issue_atacmd(ap, qc);
2147 }
2148
2149 return 0;
2150}
2151
2152static inline u32 nv_swncq_tag(struct ata_port *ap)
2153{
2154 struct nv_swncq_port_priv *pp = ap->private_data;
2155 u32 tag;
2156
2157 tag = readb(pp->tag_block) >> 2;
2158 return (tag & 0x1f);
2159}
2160
2161static void nv_swncq_dmafis(struct ata_port *ap)
2162{
2163 struct ata_queued_cmd *qc;
2164 unsigned int rw;
2165 u8 dmactl;
2166 u32 tag;
2167 struct nv_swncq_port_priv *pp = ap->private_data;
2168
2169 __ata_bmdma_stop(ap);
2170 tag = nv_swncq_tag(ap);
2171
2172 DPRINTK("dma setup tag 0x%x\n", tag);
2173 qc = ata_qc_from_tag(ap, tag);
2174
2175 if (unlikely(!qc))
2176 return;
2177
2178 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2179
2180
2181 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2182 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2183
2184
2185 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2186 dmactl &= ~ATA_DMA_WR;
2187 if (!rw)
2188 dmactl |= ATA_DMA_WR;
2189
2190 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2191}
2192
2193static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2194{
2195 struct nv_swncq_port_priv *pp = ap->private_data;
2196 struct ata_queued_cmd *qc;
2197 struct ata_eh_info *ehi = &ap->link.eh_info;
2198 u32 serror;
2199 u8 ata_stat;
2200
2201 ata_stat = ap->ops->sff_check_status(ap);
2202 nv_swncq_irq_clear(ap, fis);
2203 if (!fis)
2204 return;
2205
2206 if (ap->pflags & ATA_PFLAG_FROZEN)
2207 return;
2208
2209 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2210 nv_swncq_hotplug(ap, fis);
2211 return;
2212 }
2213
2214 if (!pp->qc_active)
2215 return;
2216
2217 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2218 return;
2219 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2220
2221 if (ata_stat & ATA_ERR) {
2222 ata_ehi_clear_desc(ehi);
2223 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2224 ehi->err_mask |= AC_ERR_DEV;
2225 ehi->serror |= serror;
2226 ehi->action |= ATA_EH_RESET;
2227 ata_port_freeze(ap);
2228 return;
2229 }
2230
2231 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2232
2233
2234
2235 pp->ncq_flags |= ncq_saw_backout;
2236 }
2237
2238 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2239 pp->ncq_flags |= ncq_saw_sdb;
2240 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2241 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2242 ap->print_id, pp->qc_active, pp->dhfis_bits,
2243 pp->dmafis_bits, readl(pp->sactive_block));
2244 if (nv_swncq_sdbfis(ap) < 0)
2245 goto irq_error;
2246 }
2247
2248 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2249
2250
2251
2252 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2253 pp->ncq_flags |= ncq_saw_d2h;
2254 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2255 ata_ehi_push_desc(ehi, "illegal fis transaction");
2256 ehi->err_mask |= AC_ERR_HSM;
2257 ehi->action |= ATA_EH_RESET;
2258 goto irq_error;
2259 }
2260
2261 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2262 !(pp->ncq_flags & ncq_saw_dmas)) {
2263 ata_stat = ap->ops->sff_check_status(ap);
2264 if (ata_stat & ATA_BUSY)
2265 goto irq_exit;
2266
2267 if (pp->defer_queue.defer_bits) {
2268 DPRINTK("send next command\n");
2269 qc = nv_swncq_qc_from_dq(ap);
2270 nv_swncq_issue_atacmd(ap, qc);
2271 }
2272 }
2273 }
2274
2275 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2276
2277
2278
2279 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2280 pp->ncq_flags |= ncq_saw_dmas;
2281 nv_swncq_dmafis(ap);
2282 }
2283
2284irq_exit:
2285 return;
2286irq_error:
2287 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2288 ata_port_freeze(ap);
2289 return;
2290}
2291
2292static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2293{
2294 struct ata_host *host = dev_instance;
2295 unsigned int i;
2296 unsigned int handled = 0;
2297 unsigned long flags;
2298 u32 irq_stat;
2299
2300 spin_lock_irqsave(&host->lock, flags);
2301
2302 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2303
2304 for (i = 0; i < host->n_ports; i++) {
2305 struct ata_port *ap = host->ports[i];
2306
2307 if (ap->link.sactive) {
2308 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2309 handled = 1;
2310 } else {
2311 if (irq_stat)
2312 nv_swncq_irq_clear(ap, 0xfff0);
2313
2314 handled += nv_host_intr(ap, (u8)irq_stat);
2315 }
2316 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2317 }
2318
2319 spin_unlock_irqrestore(&host->lock, flags);
2320
2321 return IRQ_RETVAL(handled);
2322}
2323
2324static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2325{
2326 const struct ata_port_info *ppi[] = { NULL, NULL };
2327 struct nv_pi_priv *ipriv;
2328 struct ata_host *host;
2329 struct nv_host_priv *hpriv;
2330 int rc;
2331 u32 bar;
2332 void __iomem *base;
2333 unsigned long type = ent->driver_data;
2334
2335
2336
2337
2338 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2339 if (pci_resource_start(pdev, bar) == 0)
2340 return -ENODEV;
2341
2342 ata_print_version_once(&pdev->dev, DRV_VERSION);
2343
2344 rc = pcim_enable_device(pdev);
2345 if (rc)
2346 return rc;
2347
2348
2349 if (type == CK804 && adma_enabled) {
2350 dev_notice(&pdev->dev, "Using ADMA mode\n");
2351 type = ADMA;
2352 } else if (type == MCP5x && swncq_enabled) {
2353 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2354 type = SWNCQ;
2355 }
2356
2357 ppi[0] = &nv_port_info[type];
2358 ipriv = ppi[0]->private_data;
2359 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2360 if (rc)
2361 return rc;
2362
2363 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2364 if (!hpriv)
2365 return -ENOMEM;
2366 hpriv->type = type;
2367 host->private_data = hpriv;
2368
2369
2370 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2371 if (rc)
2372 return rc;
2373
2374
2375 base = host->iomap[NV_MMIO_BAR];
2376 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2377 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2378
2379
2380 if (type >= CK804) {
2381 u8 regval;
2382
2383 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2384 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2385 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2386 }
2387
2388
2389 if (type == ADMA) {
2390 rc = nv_adma_host_init(host);
2391 if (rc)
2392 return rc;
2393 } else if (type == SWNCQ)
2394 nv_swncq_host_init(host);
2395
2396 if (msi_enabled) {
2397 dev_notice(&pdev->dev, "Using MSI\n");
2398 pci_enable_msi(pdev);
2399 }
2400
2401 pci_set_master(pdev);
2402 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2403}
2404
2405#ifdef CONFIG_PM_SLEEP
2406static int nv_pci_device_resume(struct pci_dev *pdev)
2407{
2408 struct ata_host *host = pci_get_drvdata(pdev);
2409 struct nv_host_priv *hpriv = host->private_data;
2410 int rc;
2411
2412 rc = ata_pci_device_do_resume(pdev);
2413 if (rc)
2414 return rc;
2415
2416 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2417 if (hpriv->type >= CK804) {
2418 u8 regval;
2419
2420 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2421 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2422 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2423 }
2424 if (hpriv->type == ADMA) {
2425 u32 tmp32;
2426 struct nv_adma_port_priv *pp;
2427
2428 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2429
2430 pp = host->ports[0]->private_data;
2431 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2432 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2433 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2434 else
2435 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2436 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2437 pp = host->ports[1]->private_data;
2438 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2439 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2440 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2441 else
2442 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2443 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2444
2445 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2446 }
2447 }
2448
2449 ata_host_resume(host);
2450
2451 return 0;
2452}
2453#endif
2454
2455static void nv_ck804_host_stop(struct ata_host *host)
2456{
2457 struct pci_dev *pdev = to_pci_dev(host->dev);
2458 u8 regval;
2459
2460
2461 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2462 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2463 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2464}
2465
2466static void nv_adma_host_stop(struct ata_host *host)
2467{
2468 struct pci_dev *pdev = to_pci_dev(host->dev);
2469 u32 tmp32;
2470
2471
2472 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2473 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2474 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2475 NV_MCP_SATA_CFG_20_PORT1_EN |
2476 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2477
2478 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2479
2480 nv_ck804_host_stop(host);
2481}
2482
2483module_pci_driver(nv_pci_driver);
2484
2485module_param_named(adma, adma_enabled, bool, 0444);
2486MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2487module_param_named(swncq, swncq_enabled, bool, 0444);
2488MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2489module_param_named(msi, msi_enabled, bool, 0444);
2490MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2491