1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
59#include <linux/dmapool.h>
60#include <linux/dma-mapping.h>
61#include <linux/device.h>
62#include <linux/clk.h>
63#include <linux/platform_device.h>
64#include <linux/ata_platform.h>
65#include <linux/mbus.h>
66#include <linux/bitops.h>
67#include <linux/gfp.h>
68#include <scsi/scsi_host.h>
69#include <scsi/scsi_cmnd.h>
70#include <scsi/scsi_device.h>
71#include <linux/libata.h>
72
73#define DRV_NAME "sata_mv"
74#define DRV_VERSION "1.28"
75
76
77
78
79
80static int msi;
81#ifdef CONFIG_PCI
82module_param(msi, int, S_IRUGO);
83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
84#endif
85
86static int irq_coalescing_io_count;
87module_param(irq_coalescing_io_count, int, S_IRUGO);
88MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)");
90
91static int irq_coalescing_usecs;
92module_param(irq_coalescing_usecs, int, S_IRUGO);
93MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs");
95
96enum {
97
98 MV_PRIMARY_BAR = 0,
99 MV_IO_BAR = 2,
100 MV_MISC_BAR = 3,
101
102 MV_MAJOR_REG_AREA_SZ = 0x10000,
103 MV_MINOR_REG_AREA_SZ = 0x2000,
104
105
106 COAL_CLOCKS_PER_USEC = 150,
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1),
108 MAX_COAL_IO_COUNT = 255,
109
110 MV_PCI_REG_BASE = 0,
111
112
113
114
115
116
117
118
119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
121 ALL_PORTS_COAL_IRQ = (1 << 4),
122
123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
125
126
127
128
129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
131
132 SATAHC0_REG_BASE = 0x20000,
133 FLASH_CTL = 0x1046c,
134 GPIO_PORT_CTL = 0x104f0,
135 RESET_CFG = 0x180d8,
136
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ,
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
141
142 MV_MAX_Q_DEPTH = 32,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
144
145
146
147
148
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
151 MV_MAX_SG_CT = 256,
152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
153
154
155 MV_PORT_HC_SHIFT = 2,
156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT),
157
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1),
159
160
161 MV_FLAG_DUAL_HC = (1 << 30),
162
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
165
166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
167
168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
170
171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
172
173 CRQB_FLAG_READ = (1 << 0),
174 CRQB_TAG_SHIFT = 1,
175 CRQB_IOID_SHIFT = 6,
176 CRQB_PMP_SHIFT = 12,
177 CRQB_HOSTQ_SHIFT = 17,
178 CRQB_CMD_ADDR_SHIFT = 8,
179 CRQB_CMD_CS = (0x2 << 11),
180 CRQB_CMD_LAST = (1 << 15),
181
182 CRPB_FLAG_STATUS_SHIFT = 8,
183 CRPB_IOID_SHIFT_6 = 5,
184 CRPB_IOID_SHIFT_7 = 7,
185
186 EPRD_FLAG_END_OF_TBL = (1 << 31),
187
188
189
190 MV_PCI_COMMAND = 0xc00,
191 MV_PCI_COMMAND_MWRCOM = (1 << 4),
192 MV_PCI_COMMAND_MRDTRIG = (1 << 7),
193
194 PCI_MAIN_CMD_STS = 0xd30,
195 STOP_PCI_MASTER = (1 << 2),
196 PCI_MASTER_EMPTY = (1 << 3),
197 GLOB_SFT_RST = (1 << 4),
198
199 MV_PCI_MODE = 0xd00,
200 MV_PCI_MODE_MASK = 0x30,
201
202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
203 MV_PCI_DISC_TIMER = 0xd04,
204 MV_PCI_MSI_TRIGGER = 0xc38,
205 MV_PCI_SERR_MASK = 0xc28,
206 MV_PCI_XBAR_TMOUT = 0x1d04,
207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
209 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
210 MV_PCI_ERR_COMMAND = 0x1d50,
211
212 PCI_IRQ_CAUSE = 0x1d58,
213 PCI_IRQ_MASK = 0x1d5c,
214 PCI_UNMASK_ALL_IRQS = 0x7fffff,
215
216 PCIE_IRQ_CAUSE = 0x1900,
217 PCIE_IRQ_MASK = 0x1910,
218 PCIE_UNMASK_ALL_IRQS = 0x40a,
219
220
221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
222 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
224 SOC_HC_MAIN_IRQ_MASK = 0x20024,
225 ERR_IRQ = (1 << 0),
226 DONE_IRQ = (1 << 1),
227 HC0_IRQ_PEND = 0x1ff,
228 HC_SHIFT = 9,
229 DONE_IRQ_0_3 = 0x000000aa,
230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT),
231 PCI_ERR = (1 << 18),
232 TRAN_COAL_LO_DONE = (1 << 19),
233 TRAN_COAL_HI_DONE = (1 << 20),
234 PORTS_0_3_COAL_DONE = (1 << 8),
235 PORTS_4_7_COAL_DONE = (1 << 17),
236 ALL_PORTS_COAL_DONE = (1 << 21),
237 GPIO_INT = (1 << 22),
238 SELF_INT = (1 << 23),
239 TWSI_INT = (1 << 24),
240 HC_MAIN_RSVD = (0x7f << 25),
241 HC_MAIN_RSVD_5 = (0x1fff << 19),
242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6),
243
244
245 HC_CFG = 0x00,
246
247 HC_IRQ_CAUSE = 0x14,
248 DMA_IRQ = (1 << 0),
249 HC_COAL_IRQ = (1 << 4),
250 DEV_IRQ = (1 << 8),
251
252
253
254
255
256
257
258
259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
261
262 SOC_LED_CTRL = 0x2c,
263 SOC_LED_CTRL_BLINK = (1 << 0),
264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),
265
266
267
268 SHD_BLK = 0x100,
269 SHD_CTL_AST = 0x20,
270
271
272 SATA_STATUS = 0x300,
273 SATA_ACTIVE = 0x350,
274 FIS_IRQ_CAUSE = 0x364,
275 FIS_IRQ_CAUSE_AN = (1 << 9),
276
277 LTMODE = 0x30c,
278 LTMODE_BIT8 = (1 << 8),
279
280 PHY_MODE2 = 0x330,
281 PHY_MODE3 = 0x310,
282
283 PHY_MODE4 = 0x314,
284 PHY_MODE4_CFG_MASK = 0x00000003,
285 PHY_MODE4_CFG_VALUE = 0x00000001,
286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa,
287 PHY_MODE4_RSVD_ONES = 0x00000005,
288
289 SATA_IFCTL = 0x344,
290 SATA_TESTCTL = 0x348,
291 SATA_IFSTAT = 0x34c,
292 VENDOR_UNIQUE_FIS = 0x35c,
293
294 FISCFG = 0x360,
295 FISCFG_WAIT_DEV_ERR = (1 << 8),
296 FISCFG_SINGLE_SYNC = (1 << 16),
297
298 PHY_MODE9_GEN2 = 0x398,
299 PHY_MODE9_GEN1 = 0x39c,
300 PHYCFG_OFS = 0x3a0,
301
302 MV5_PHY_MODE = 0x74,
303 MV5_LTMODE = 0x30,
304 MV5_PHY_CTL = 0x0C,
305 SATA_IFCFG = 0x050,
306
307 MV_M2_PREAMP_MASK = 0x7e0,
308
309
310 EDMA_CFG = 0,
311 EDMA_CFG_Q_DEPTH = 0x1f,
312 EDMA_CFG_NCQ = (1 << 5),
313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14),
314 EDMA_CFG_RD_BRST_EXT = (1 << 11),
315 EDMA_CFG_WR_BUFF_LEN = (1 << 13),
316 EDMA_CFG_EDMA_FBS = (1 << 16),
317 EDMA_CFG_FBS = (1 << 26),
318
319 EDMA_ERR_IRQ_CAUSE = 0x8,
320 EDMA_ERR_IRQ_MASK = 0xc,
321 EDMA_ERR_D_PAR = (1 << 0),
322 EDMA_ERR_PRD_PAR = (1 << 1),
323 EDMA_ERR_DEV = (1 << 2),
324 EDMA_ERR_DEV_DCON = (1 << 3),
325 EDMA_ERR_DEV_CON = (1 << 4),
326 EDMA_ERR_SERR = (1 << 5),
327 EDMA_ERR_SELF_DIS = (1 << 7),
328 EDMA_ERR_SELF_DIS_5 = (1 << 8),
329 EDMA_ERR_BIST_ASYNC = (1 << 8),
330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8),
331 EDMA_ERR_CRQB_PAR = (1 << 9),
332 EDMA_ERR_CRPB_PAR = (1 << 10),
333 EDMA_ERR_INTRL_PAR = (1 << 11),
334 EDMA_ERR_IORDY = (1 << 12),
335
336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13),
338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14),
339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16),
341
342 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
343
344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21),
346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22),
347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23),
348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24),
349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25),
350
351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
352
353 EDMA_ERR_TRANS_PROTO = (1 << 31),
354 EDMA_ERR_OVERRUN_5 = (1 << 5),
355 EDMA_ERR_UNDERRUN_5 = (1 << 6),
356
357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
358 EDMA_ERR_LNK_CTRL_RX_1 |
359 EDMA_ERR_LNK_CTRL_RX_3 |
360 EDMA_ERR_LNK_CTRL_TX,
361
362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
363 EDMA_ERR_PRD_PAR |
364 EDMA_ERR_DEV_DCON |
365 EDMA_ERR_DEV_CON |
366 EDMA_ERR_SERR |
367 EDMA_ERR_SELF_DIS |
368 EDMA_ERR_CRQB_PAR |
369 EDMA_ERR_CRPB_PAR |
370 EDMA_ERR_INTRL_PAR |
371 EDMA_ERR_IORDY |
372 EDMA_ERR_LNK_CTRL_RX_2 |
373 EDMA_ERR_LNK_DATA_RX |
374 EDMA_ERR_LNK_DATA_TX |
375 EDMA_ERR_TRANS_PROTO,
376
377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
378 EDMA_ERR_PRD_PAR |
379 EDMA_ERR_DEV_DCON |
380 EDMA_ERR_DEV_CON |
381 EDMA_ERR_OVERRUN_5 |
382 EDMA_ERR_UNDERRUN_5 |
383 EDMA_ERR_SELF_DIS_5 |
384 EDMA_ERR_CRQB_PAR |
385 EDMA_ERR_CRPB_PAR |
386 EDMA_ERR_INTRL_PAR |
387 EDMA_ERR_IORDY,
388
389 EDMA_REQ_Q_BASE_HI = 0x10,
390 EDMA_REQ_Q_IN_PTR = 0x14,
391
392 EDMA_REQ_Q_OUT_PTR = 0x18,
393 EDMA_REQ_Q_PTR_SHIFT = 5,
394
395 EDMA_RSP_Q_BASE_HI = 0x1c,
396 EDMA_RSP_Q_IN_PTR = 0x20,
397 EDMA_RSP_Q_OUT_PTR = 0x24,
398 EDMA_RSP_Q_PTR_SHIFT = 3,
399
400 EDMA_CMD = 0x28,
401 EDMA_EN = (1 << 0),
402 EDMA_DS = (1 << 1),
403 EDMA_RESET = (1 << 2),
404
405 EDMA_STATUS = 0x30,
406 EDMA_STATUS_CACHE_EMPTY = (1 << 6),
407 EDMA_STATUS_IDLE = (1 << 7),
408
409 EDMA_IORDY_TMOUT = 0x34,
410 EDMA_ARB_CFG = 0x38,
411
412 EDMA_HALTCOND = 0x60,
413 EDMA_UNKNOWN_RSVD = 0x6C,
414
415 BMDMA_CMD = 0x224,
416 BMDMA_STATUS = 0x228,
417 BMDMA_PRD_LOW = 0x22c,
418 BMDMA_PRD_HIGH = 0x230,
419
420
421 MV_HP_FLAG_MSI = (1 << 0),
422 MV_HP_ERRATA_50XXB0 = (1 << 1),
423 MV_HP_ERRATA_50XXB2 = (1 << 2),
424 MV_HP_ERRATA_60X1B2 = (1 << 3),
425 MV_HP_ERRATA_60X1C0 = (1 << 4),
426 MV_HP_GEN_I = (1 << 6),
427 MV_HP_GEN_II = (1 << 7),
428 MV_HP_GEN_IIE = (1 << 8),
429 MV_HP_PCIE = (1 << 9),
430 MV_HP_CUT_THROUGH = (1 << 10),
431 MV_HP_FLAG_SOC = (1 << 11),
432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),
433
434
435 MV_PP_FLAG_EDMA_EN = (1 << 0),
436 MV_PP_FLAG_NCQ_EN = (1 << 1),
437 MV_PP_FLAG_FBS_EN = (1 << 2),
438 MV_PP_FLAG_DELAYED_EH = (1 << 3),
439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),
440};
441
442#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
443#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
444#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
445#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
446#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
447
448#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
449#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
450
451enum {
452
453
454
455 MV_DMA_BOUNDARY = 0xffffU,
456
457
458
459
460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
461
462
463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
464};
465
466enum chip_type {
467 chip_504x,
468 chip_508x,
469 chip_5080,
470 chip_604x,
471 chip_608x,
472 chip_6042,
473 chip_7042,
474 chip_soc,
475};
476
477
478struct mv_crqb {
479 __le32 sg_addr;
480 __le32 sg_addr_hi;
481 __le16 ctrl_flags;
482 __le16 ata_cmd[11];
483};
484
485struct mv_crqb_iie {
486 __le32 addr;
487 __le32 addr_hi;
488 __le32 flags;
489 __le32 len;
490 __le32 ata_cmd[4];
491};
492
493
494struct mv_crpb {
495 __le16 id;
496 __le16 flags;
497 __le32 tmstmp;
498};
499
500
501struct mv_sg {
502 __le32 addr;
503 __le32 flags_size;
504 __le32 addr_hi;
505 __le32 reserved;
506};
507
508
509
510
511
512
513struct mv_cached_regs {
514 u32 fiscfg;
515 u32 ltmode;
516 u32 haltcond;
517 u32 unknown_rsvd;
518};
519
520struct mv_port_priv {
521 struct mv_crqb *crqb;
522 dma_addr_t crqb_dma;
523 struct mv_crpb *crpb;
524 dma_addr_t crpb_dma;
525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
527
528 unsigned int req_idx;
529 unsigned int resp_idx;
530
531 u32 pp_flags;
532 struct mv_cached_regs cached;
533 unsigned int delayed_eh_pmp_map;
534};
535
536struct mv_port_signal {
537 u32 amps;
538 u32 pre;
539};
540
541struct mv_host_priv {
542 u32 hp_flags;
543 unsigned int board_idx;
544 u32 main_irq_mask;
545 struct mv_port_signal signal[8];
546 const struct mv_hw_ops *ops;
547 int n_ports;
548 void __iomem *base;
549 void __iomem *main_irq_cause_addr;
550 void __iomem *main_irq_mask_addr;
551 u32 irq_cause_offset;
552 u32 irq_mask_offset;
553 u32 unmask_all_irqs;
554
555#if defined(CONFIG_HAVE_CLK)
556 struct clk *clk;
557#endif
558
559
560
561
562
563 struct dma_pool *crqb_pool;
564 struct dma_pool *crpb_pool;
565 struct dma_pool *sg_tbl_pool;
566};
567
568struct mv_hw_ops {
569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
570 unsigned int port);
571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
573 void __iomem *mmio);
574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
575 unsigned int n_hc);
576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
578};
579
580static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
581static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
582static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
583static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
584static int mv_port_start(struct ata_port *ap);
585static void mv_port_stop(struct ata_port *ap);
586static int mv_qc_defer(struct ata_queued_cmd *qc);
587static void mv_qc_prep(struct ata_queued_cmd *qc);
588static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
589static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
590static int mv_hardreset(struct ata_link *link, unsigned int *class,
591 unsigned long deadline);
592static void mv_eh_freeze(struct ata_port *ap);
593static void mv_eh_thaw(struct ata_port *ap);
594static void mv6_dev_config(struct ata_device *dev);
595
596static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
597 unsigned int port);
598static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
599static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
600 void __iomem *mmio);
601static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
602 unsigned int n_hc);
603static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
604static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
605
606static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
607 unsigned int port);
608static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
609static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
610 void __iomem *mmio);
611static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
612 unsigned int n_hc);
613static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
614static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
615 void __iomem *mmio);
616static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
617 void __iomem *mmio);
618static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
619 void __iomem *mmio, unsigned int n_hc);
620static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
621 void __iomem *mmio);
622static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
623static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
624 void __iomem *mmio, unsigned int port);
625static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
626static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
627 unsigned int port_no);
628static int mv_stop_edma(struct ata_port *ap);
629static int mv_stop_edma_engine(void __iomem *port_mmio);
630static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
631
632static void mv_pmp_select(struct ata_port *ap, int pmp);
633static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
634 unsigned long deadline);
635static int mv_softreset(struct ata_link *link, unsigned int *class,
636 unsigned long deadline);
637static void mv_pmp_error_handler(struct ata_port *ap);
638static void mv_process_crpb_entries(struct ata_port *ap,
639 struct mv_port_priv *pp);
640
641static void mv_sff_irq_clear(struct ata_port *ap);
642static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
643static void mv_bmdma_setup(struct ata_queued_cmd *qc);
644static void mv_bmdma_start(struct ata_queued_cmd *qc);
645static void mv_bmdma_stop(struct ata_queued_cmd *qc);
646static u8 mv_bmdma_status(struct ata_port *ap);
647static u8 mv_sff_check_status(struct ata_port *ap);
648
649
650
651
652
653static struct scsi_host_template mv5_sht = {
654 ATA_BASE_SHT(DRV_NAME),
655 .sg_tablesize = MV_MAX_SG_CT / 2,
656 .dma_boundary = MV_DMA_BOUNDARY,
657};
658
659static struct scsi_host_template mv6_sht = {
660 ATA_NCQ_SHT(DRV_NAME),
661 .can_queue = MV_MAX_Q_DEPTH - 1,
662 .sg_tablesize = MV_MAX_SG_CT / 2,
663 .dma_boundary = MV_DMA_BOUNDARY,
664};
665
666static struct ata_port_operations mv5_ops = {
667 .inherits = &ata_sff_port_ops,
668
669 .lost_interrupt = ATA_OP_NULL,
670
671 .qc_defer = mv_qc_defer,
672 .qc_prep = mv_qc_prep,
673 .qc_issue = mv_qc_issue,
674
675 .freeze = mv_eh_freeze,
676 .thaw = mv_eh_thaw,
677 .hardreset = mv_hardreset,
678
679 .scr_read = mv5_scr_read,
680 .scr_write = mv5_scr_write,
681
682 .port_start = mv_port_start,
683 .port_stop = mv_port_stop,
684};
685
686static struct ata_port_operations mv6_ops = {
687 .inherits = &ata_bmdma_port_ops,
688
689 .lost_interrupt = ATA_OP_NULL,
690
691 .qc_defer = mv_qc_defer,
692 .qc_prep = mv_qc_prep,
693 .qc_issue = mv_qc_issue,
694
695 .dev_config = mv6_dev_config,
696
697 .freeze = mv_eh_freeze,
698 .thaw = mv_eh_thaw,
699 .hardreset = mv_hardreset,
700 .softreset = mv_softreset,
701 .pmp_hardreset = mv_pmp_hardreset,
702 .pmp_softreset = mv_softreset,
703 .error_handler = mv_pmp_error_handler,
704
705 .scr_read = mv_scr_read,
706 .scr_write = mv_scr_write,
707
708 .sff_check_status = mv_sff_check_status,
709 .sff_irq_clear = mv_sff_irq_clear,
710 .check_atapi_dma = mv_check_atapi_dma,
711 .bmdma_setup = mv_bmdma_setup,
712 .bmdma_start = mv_bmdma_start,
713 .bmdma_stop = mv_bmdma_stop,
714 .bmdma_status = mv_bmdma_status,
715
716 .port_start = mv_port_start,
717 .port_stop = mv_port_stop,
718};
719
720static struct ata_port_operations mv_iie_ops = {
721 .inherits = &mv6_ops,
722 .dev_config = ATA_OP_NULL,
723 .qc_prep = mv_qc_prep_iie,
724};
725
726static const struct ata_port_info mv_port_info[] = {
727 {
728 .flags = MV_GEN_I_FLAGS,
729 .pio_mask = ATA_PIO4,
730 .udma_mask = ATA_UDMA6,
731 .port_ops = &mv5_ops,
732 },
733 {
734 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
735 .pio_mask = ATA_PIO4,
736 .udma_mask = ATA_UDMA6,
737 .port_ops = &mv5_ops,
738 },
739 {
740 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
741 .pio_mask = ATA_PIO4,
742 .udma_mask = ATA_UDMA6,
743 .port_ops = &mv5_ops,
744 },
745 {
746 .flags = MV_GEN_II_FLAGS,
747 .pio_mask = ATA_PIO4,
748 .udma_mask = ATA_UDMA6,
749 .port_ops = &mv6_ops,
750 },
751 {
752 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
753 .pio_mask = ATA_PIO4,
754 .udma_mask = ATA_UDMA6,
755 .port_ops = &mv6_ops,
756 },
757 {
758 .flags = MV_GEN_IIE_FLAGS,
759 .pio_mask = ATA_PIO4,
760 .udma_mask = ATA_UDMA6,
761 .port_ops = &mv_iie_ops,
762 },
763 {
764 .flags = MV_GEN_IIE_FLAGS,
765 .pio_mask = ATA_PIO4,
766 .udma_mask = ATA_UDMA6,
767 .port_ops = &mv_iie_ops,
768 },
769 {
770 .flags = MV_GEN_IIE_FLAGS,
771 .pio_mask = ATA_PIO4,
772 .udma_mask = ATA_UDMA6,
773 .port_ops = &mv_iie_ops,
774 },
775};
776
777static const struct pci_device_id mv_pci_tbl[] = {
778 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
779 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
780 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
781 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
782
783 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
784 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
785 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
786
787 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
788 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
789 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
790 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
791 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
792
793 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
794
795
796 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
797
798
799 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
800
801
802 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
803 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
804
805 { }
806};
807
808static const struct mv_hw_ops mv5xxx_ops = {
809 .phy_errata = mv5_phy_errata,
810 .enable_leds = mv5_enable_leds,
811 .read_preamp = mv5_read_preamp,
812 .reset_hc = mv5_reset_hc,
813 .reset_flash = mv5_reset_flash,
814 .reset_bus = mv5_reset_bus,
815};
816
817static const struct mv_hw_ops mv6xxx_ops = {
818 .phy_errata = mv6_phy_errata,
819 .enable_leds = mv6_enable_leds,
820 .read_preamp = mv6_read_preamp,
821 .reset_hc = mv6_reset_hc,
822 .reset_flash = mv6_reset_flash,
823 .reset_bus = mv_reset_pci_bus,
824};
825
826static const struct mv_hw_ops mv_soc_ops = {
827 .phy_errata = mv6_phy_errata,
828 .enable_leds = mv_soc_enable_leds,
829 .read_preamp = mv_soc_read_preamp,
830 .reset_hc = mv_soc_reset_hc,
831 .reset_flash = mv_soc_reset_flash,
832 .reset_bus = mv_soc_reset_bus,
833};
834
835static const struct mv_hw_ops mv_soc_65n_ops = {
836 .phy_errata = mv_soc_65n_phy_errata,
837 .enable_leds = mv_soc_enable_leds,
838 .reset_hc = mv_soc_reset_hc,
839 .reset_flash = mv_soc_reset_flash,
840 .reset_bus = mv_soc_reset_bus,
841};
842
843
844
845
846
847static inline void writelfl(unsigned long data, void __iomem *addr)
848{
849 writel(data, addr);
850 (void) readl(addr);
851}
852
853static inline unsigned int mv_hc_from_port(unsigned int port)
854{
855 return port >> MV_PORT_HC_SHIFT;
856}
857
858static inline unsigned int mv_hardport_from_port(unsigned int port)
859{
860 return port & MV_PORT_MASK;
861}
862
863
864
865
866
867
868
869
870
871
872
873
874#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
875{ \
876 shift = mv_hc_from_port(port) * HC_SHIFT; \
877 hardport = mv_hardport_from_port(port); \
878 shift += hardport * 2; \
879}
880
881static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
882{
883 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
884}
885
886static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
887 unsigned int port)
888{
889 return mv_hc_base(base, mv_hc_from_port(port));
890}
891
892static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
893{
894 return mv_hc_base_from_port(base, port) +
895 MV_SATAHC_ARBTR_REG_SZ +
896 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
897}
898
899static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
900{
901 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
902 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
903
904 return hc_mmio + ofs;
905}
906
907static inline void __iomem *mv_host_base(struct ata_host *host)
908{
909 struct mv_host_priv *hpriv = host->private_data;
910 return hpriv->base;
911}
912
913static inline void __iomem *mv_ap_base(struct ata_port *ap)
914{
915 return mv_port_base(mv_host_base(ap->host), ap->port_no);
916}
917
918static inline int mv_get_hc_count(unsigned long port_flags)
919{
920 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
921}
922
923
924
925
926
927
928
929
930
931
932
933static void mv_save_cached_regs(struct ata_port *ap)
934{
935 void __iomem *port_mmio = mv_ap_base(ap);
936 struct mv_port_priv *pp = ap->private_data;
937
938 pp->cached.fiscfg = readl(port_mmio + FISCFG);
939 pp->cached.ltmode = readl(port_mmio + LTMODE);
940 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
941 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
942}
943
944
945
946
947
948
949
950
951
952
953static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
954{
955 if (new != *old) {
956 unsigned long laddr;
957 *old = new;
958
959
960
961
962
963
964
965
966
967 laddr = (long)addr & 0xffff;
968 if (laddr >= 0x300 && laddr <= 0x33c) {
969 laddr &= 0x000f;
970 if (laddr == 0x4 || laddr == 0xc) {
971 writelfl(new, addr);
972 return;
973 }
974 }
975 writel(new, addr);
976 }
977}
978
979static void mv_set_edma_ptrs(void __iomem *port_mmio,
980 struct mv_host_priv *hpriv,
981 struct mv_port_priv *pp)
982{
983 u32 index;
984
985
986
987
988 pp->req_idx &= MV_MAX_Q_DEPTH_MASK;
989 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
990
991 WARN_ON(pp->crqb_dma & 0x3ff);
992 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
993 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
994 port_mmio + EDMA_REQ_Q_IN_PTR);
995 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
996
997
998
999
1000 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;
1001 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1002
1003 WARN_ON(pp->crpb_dma & 0xff);
1004 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1005 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1006 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1007 port_mmio + EDMA_RSP_Q_OUT_PTR);
1008}
1009
1010static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1011{
1012
1013
1014
1015
1016
1017
1018
1019
1020 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1021 mask &= ~DONE_IRQ_0_3;
1022 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1023 mask &= ~DONE_IRQ_4_7;
1024 writelfl(mask, hpriv->main_irq_mask_addr);
1025}
1026
1027static void mv_set_main_irq_mask(struct ata_host *host,
1028 u32 disable_bits, u32 enable_bits)
1029{
1030 struct mv_host_priv *hpriv = host->private_data;
1031 u32 old_mask, new_mask;
1032
1033 old_mask = hpriv->main_irq_mask;
1034 new_mask = (old_mask & ~disable_bits) | enable_bits;
1035 if (new_mask != old_mask) {
1036 hpriv->main_irq_mask = new_mask;
1037 mv_write_main_irq_mask(new_mask, hpriv);
1038 }
1039}
1040
1041static void mv_enable_port_irqs(struct ata_port *ap,
1042 unsigned int port_bits)
1043{
1044 unsigned int shift, hardport, port = ap->port_no;
1045 u32 disable_bits, enable_bits;
1046
1047 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1048
1049 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1050 enable_bits = port_bits << shift;
1051 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1052}
1053
1054static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1055 void __iomem *port_mmio,
1056 unsigned int port_irqs)
1057{
1058 struct mv_host_priv *hpriv = ap->host->private_data;
1059 int hardport = mv_hardport_from_port(ap->port_no);
1060 void __iomem *hc_mmio = mv_hc_base_from_port(
1061 mv_host_base(ap->host), ap->port_no);
1062 u32 hc_irq_cause;
1063
1064
1065 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1066
1067
1068 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1069 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1070
1071
1072 if (IS_GEN_IIE(hpriv))
1073 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1074
1075 mv_enable_port_irqs(ap, port_irqs);
1076}
1077
1078static void mv_set_irq_coalescing(struct ata_host *host,
1079 unsigned int count, unsigned int usecs)
1080{
1081 struct mv_host_priv *hpriv = host->private_data;
1082 void __iomem *mmio = hpriv->base, *hc_mmio;
1083 u32 coal_enable = 0;
1084 unsigned long flags;
1085 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1086 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1087 ALL_PORTS_COAL_DONE;
1088
1089
1090 if (!usecs || !count) {
1091 clks = count = 0;
1092 } else {
1093
1094 clks = usecs * COAL_CLOCKS_PER_USEC;
1095 if (clks > MAX_COAL_TIME_THRESHOLD)
1096 clks = MAX_COAL_TIME_THRESHOLD;
1097 if (count > MAX_COAL_IO_COUNT)
1098 count = MAX_COAL_IO_COUNT;
1099 }
1100
1101 spin_lock_irqsave(&host->lock, flags);
1102 mv_set_main_irq_mask(host, coal_disable, 0);
1103
1104 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1105
1106
1107
1108
1109 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1110 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1111
1112 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1113 if (count)
1114 coal_enable = ALL_PORTS_COAL_DONE;
1115 clks = count = 0;
1116 }
1117
1118
1119
1120
1121 hc_mmio = mv_hc_base_from_port(mmio, 0);
1122 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1123 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1124 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1125 if (count)
1126 coal_enable |= PORTS_0_3_COAL_DONE;
1127 if (is_dual_hc) {
1128 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1129 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1130 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1131 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1132 if (count)
1133 coal_enable |= PORTS_4_7_COAL_DONE;
1134 }
1135
1136 mv_set_main_irq_mask(host, 0, coal_enable);
1137 spin_unlock_irqrestore(&host->lock, flags);
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1152 struct mv_port_priv *pp, u8 protocol)
1153{
1154 int want_ncq = (protocol == ATA_PROT_NCQ);
1155
1156 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1157 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1158 if (want_ncq != using_ncq)
1159 mv_stop_edma(ap);
1160 }
1161 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1162 struct mv_host_priv *hpriv = ap->host->private_data;
1163
1164 mv_edma_cfg(ap, want_ncq, 1);
1165
1166 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1167 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1168
1169 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1170 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1171 }
1172}
1173
1174static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1175{
1176 void __iomem *port_mmio = mv_ap_base(ap);
1177 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1178 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1179 int i;
1180
1181
1182
1183
1184
1185
1186
1187
1188 for (i = 0; i < timeout; ++i) {
1189 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1190 if ((edma_stat & empty_idle) == empty_idle)
1191 break;
1192 udelay(per_loop);
1193 }
1194
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204static int mv_stop_edma_engine(void __iomem *port_mmio)
1205{
1206 int i;
1207
1208
1209 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1210
1211
1212 for (i = 10000; i > 0; i--) {
1213 u32 reg = readl(port_mmio + EDMA_CMD);
1214 if (!(reg & EDMA_EN))
1215 return 0;
1216 udelay(10);
1217 }
1218 return -EIO;
1219}
1220
1221static int mv_stop_edma(struct ata_port *ap)
1222{
1223 void __iomem *port_mmio = mv_ap_base(ap);
1224 struct mv_port_priv *pp = ap->private_data;
1225 int err = 0;
1226
1227 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1228 return 0;
1229 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1230 mv_wait_for_edma_empty_idle(ap);
1231 if (mv_stop_edma_engine(port_mmio)) {
1232 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1233 err = -EIO;
1234 }
1235 mv_edma_cfg(ap, 0, 0);
1236 return err;
1237}
1238
1239#ifdef ATA_DEBUG
1240static void mv_dump_mem(void __iomem *start, unsigned bytes)
1241{
1242 int b, w;
1243 for (b = 0; b < bytes; ) {
1244 DPRINTK("%p: ", start + b);
1245 for (w = 0; b < bytes && w < 4; w++) {
1246 printk("%08x ", readl(start + b));
1247 b += sizeof(u32);
1248 }
1249 printk("\n");
1250 }
1251}
1252#endif
1253
1254static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1255{
1256#ifdef ATA_DEBUG
1257 int b, w;
1258 u32 dw;
1259 for (b = 0; b < bytes; ) {
1260 DPRINTK("%02x: ", b);
1261 for (w = 0; b < bytes && w < 4; w++) {
1262 (void) pci_read_config_dword(pdev, b, &dw);
1263 printk("%08x ", dw);
1264 b += sizeof(u32);
1265 }
1266 printk("\n");
1267 }
1268#endif
1269}
1270static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1271 struct pci_dev *pdev)
1272{
1273#ifdef ATA_DEBUG
1274 void __iomem *hc_base = mv_hc_base(mmio_base,
1275 port >> MV_PORT_HC_SHIFT);
1276 void __iomem *port_base;
1277 int start_port, num_ports, p, start_hc, num_hcs, hc;
1278
1279 if (0 > port) {
1280 start_hc = start_port = 0;
1281 num_ports = 8;
1282 num_hcs = 2;
1283 } else {
1284 start_hc = port >> MV_PORT_HC_SHIFT;
1285 start_port = port;
1286 num_ports = num_hcs = 1;
1287 }
1288 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1289 num_ports > 1 ? num_ports - 1 : start_port);
1290
1291 if (NULL != pdev) {
1292 DPRINTK("PCI config space regs:\n");
1293 mv_dump_pci_cfg(pdev, 0x68);
1294 }
1295 DPRINTK("PCI regs:\n");
1296 mv_dump_mem(mmio_base+0xc00, 0x3c);
1297 mv_dump_mem(mmio_base+0xd00, 0x34);
1298 mv_dump_mem(mmio_base+0xf00, 0x4);
1299 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1300 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1301 hc_base = mv_hc_base(mmio_base, hc);
1302 DPRINTK("HC regs (HC %i):\n", hc);
1303 mv_dump_mem(hc_base, 0x1c);
1304 }
1305 for (p = start_port; p < start_port + num_ports; p++) {
1306 port_base = mv_port_base(mmio_base, p);
1307 DPRINTK("EDMA regs (port %i):\n", p);
1308 mv_dump_mem(port_base, 0x54);
1309 DPRINTK("SATA regs (port %i):\n", p);
1310 mv_dump_mem(port_base+0x300, 0x60);
1311 }
1312#endif
1313}
1314
1315static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1316{
1317 unsigned int ofs;
1318
1319 switch (sc_reg_in) {
1320 case SCR_STATUS:
1321 case SCR_CONTROL:
1322 case SCR_ERROR:
1323 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1324 break;
1325 case SCR_ACTIVE:
1326 ofs = SATA_ACTIVE;
1327 break;
1328 default:
1329 ofs = 0xffffffffU;
1330 break;
1331 }
1332 return ofs;
1333}
1334
1335static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1336{
1337 unsigned int ofs = mv_scr_offset(sc_reg_in);
1338
1339 if (ofs != 0xffffffffU) {
1340 *val = readl(mv_ap_base(link->ap) + ofs);
1341 return 0;
1342 } else
1343 return -EINVAL;
1344}
1345
1346static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1347{
1348 unsigned int ofs = mv_scr_offset(sc_reg_in);
1349
1350 if (ofs != 0xffffffffU) {
1351 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1352 if (sc_reg_in == SCR_CONTROL) {
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1367 val |= 0xf000;
1368 }
1369 writelfl(val, addr);
1370 return 0;
1371 } else
1372 return -EINVAL;
1373}
1374
1375static void mv6_dev_config(struct ata_device *adev)
1376{
1377
1378
1379
1380
1381
1382
1383 if (adev->flags & ATA_DFLAG_NCQ) {
1384 if (sata_pmp_attached(adev->link->ap)) {
1385 adev->flags &= ~ATA_DFLAG_NCQ;
1386 ata_dev_printk(adev, KERN_INFO,
1387 "NCQ disabled for command-based switching\n");
1388 }
1389 }
1390}
1391
1392static int mv_qc_defer(struct ata_queued_cmd *qc)
1393{
1394 struct ata_link *link = qc->dev->link;
1395 struct ata_port *ap = link->ap;
1396 struct mv_port_priv *pp = ap->private_data;
1397
1398
1399
1400
1401
1402 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1403 return ATA_DEFER_PORT;
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 if (unlikely(ap->excl_link)) {
1414 if (link == ap->excl_link) {
1415 if (ap->nr_active_links)
1416 return ATA_DEFER_PORT;
1417 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1418 return 0;
1419 } else
1420 return ATA_DEFER_PORT;
1421 }
1422
1423
1424
1425
1426 if (ap->nr_active_links == 0)
1427 return 0;
1428
1429
1430
1431
1432
1433
1434
1435 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1436 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1437 if (ata_is_ncq(qc->tf.protocol))
1438 return 0;
1439 else {
1440 ap->excl_link = link;
1441 return ATA_DEFER_PORT;
1442 }
1443 }
1444
1445 return ATA_DEFER_PORT;
1446}
1447
1448static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1449{
1450 struct mv_port_priv *pp = ap->private_data;
1451 void __iomem *port_mmio;
1452
1453 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1454 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1455 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1456
1457 ltmode = *old_ltmode & ~LTMODE_BIT8;
1458 haltcond = *old_haltcond | EDMA_ERR_DEV;
1459
1460 if (want_fbs) {
1461 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1462 ltmode = *old_ltmode | LTMODE_BIT8;
1463 if (want_ncq)
1464 haltcond &= ~EDMA_ERR_DEV;
1465 else
1466 fiscfg |= FISCFG_WAIT_DEV_ERR;
1467 } else {
1468 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1469 }
1470
1471 port_mmio = mv_ap_base(ap);
1472 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1473 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1474 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1475}
1476
1477static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1478{
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 u32 old, new;
1481
1482
1483 old = readl(hpriv->base + GPIO_PORT_CTL);
1484 if (want_ncq)
1485 new = old | (1 << 22);
1486 else
1487 new = old & ~(1 << 22);
1488 if (new != old)
1489 writel(new, hpriv->base + GPIO_PORT_CTL);
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1505{
1506 struct mv_port_priv *pp = ap->private_data;
1507 u32 new, *old = &pp->cached.unknown_rsvd;
1508
1509 if (enable_bmdma)
1510 new = *old | 1;
1511 else
1512 new = *old & ~1;
1513 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530static void mv_soc_led_blink_enable(struct ata_port *ap)
1531{
1532 struct ata_host *host = ap->host;
1533 struct mv_host_priv *hpriv = host->private_data;
1534 void __iomem *hc_mmio;
1535 u32 led_ctrl;
1536
1537 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1538 return;
1539 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1540 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1541 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1542 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1543}
1544
1545static void mv_soc_led_blink_disable(struct ata_port *ap)
1546{
1547 struct ata_host *host = ap->host;
1548 struct mv_host_priv *hpriv = host->private_data;
1549 void __iomem *hc_mmio;
1550 u32 led_ctrl;
1551 unsigned int port;
1552
1553 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1554 return;
1555
1556
1557 for (port = 0; port < hpriv->n_ports; port++) {
1558 struct ata_port *this_ap = host->ports[port];
1559 struct mv_port_priv *pp = this_ap->private_data;
1560
1561 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1562 return;
1563 }
1564
1565 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1566 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1567 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1568 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1569}
1570
1571static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1572{
1573 u32 cfg;
1574 struct mv_port_priv *pp = ap->private_data;
1575 struct mv_host_priv *hpriv = ap->host->private_data;
1576 void __iomem *port_mmio = mv_ap_base(ap);
1577
1578
1579 cfg = EDMA_CFG_Q_DEPTH;
1580 pp->pp_flags &=
1581 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1582
1583 if (IS_GEN_I(hpriv))
1584 cfg |= (1 << 8);
1585
1586 else if (IS_GEN_II(hpriv)) {
1587 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1588 mv_60x1_errata_sata25(ap, want_ncq);
1589
1590 } else if (IS_GEN_IIE(hpriv)) {
1591 int want_fbs = sata_pmp_attached(ap);
1592
1593
1594
1595
1596
1597
1598
1599
1600 want_fbs &= want_ncq;
1601
1602 mv_config_fbs(ap, want_ncq, want_fbs);
1603
1604 if (want_fbs) {
1605 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1606 cfg |= EDMA_CFG_EDMA_FBS;
1607 }
1608
1609 cfg |= (1 << 23);
1610 if (want_edma) {
1611 cfg |= (1 << 22);
1612 if (!IS_SOC(hpriv))
1613 cfg |= (1 << 18);
1614 }
1615 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1616 cfg |= (1 << 17);
1617 mv_bmdma_enable_iie(ap, !want_edma);
1618
1619 if (IS_SOC(hpriv)) {
1620 if (want_ncq)
1621 mv_soc_led_blink_enable(ap);
1622 else
1623 mv_soc_led_blink_disable(ap);
1624 }
1625 }
1626
1627 if (want_ncq) {
1628 cfg |= EDMA_CFG_NCQ;
1629 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1630 }
1631
1632 writelfl(cfg, port_mmio + EDMA_CFG);
1633}
1634
1635static void mv_port_free_dma_mem(struct ata_port *ap)
1636{
1637 struct mv_host_priv *hpriv = ap->host->private_data;
1638 struct mv_port_priv *pp = ap->private_data;
1639 int tag;
1640
1641 if (pp->crqb) {
1642 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1643 pp->crqb = NULL;
1644 }
1645 if (pp->crpb) {
1646 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1647 pp->crpb = NULL;
1648 }
1649
1650
1651
1652
1653 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1654 if (pp->sg_tbl[tag]) {
1655 if (tag == 0 || !IS_GEN_I(hpriv))
1656 dma_pool_free(hpriv->sg_tbl_pool,
1657 pp->sg_tbl[tag],
1658 pp->sg_tbl_dma[tag]);
1659 pp->sg_tbl[tag] = NULL;
1660 }
1661 }
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static int mv_port_start(struct ata_port *ap)
1675{
1676 struct device *dev = ap->host->dev;
1677 struct mv_host_priv *hpriv = ap->host->private_data;
1678 struct mv_port_priv *pp;
1679 unsigned long flags;
1680 int tag;
1681
1682 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1683 if (!pp)
1684 return -ENOMEM;
1685 ap->private_data = pp;
1686
1687 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1688 if (!pp->crqb)
1689 return -ENOMEM;
1690 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1691
1692 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1693 if (!pp->crpb)
1694 goto out_port_free_dma_mem;
1695 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1696
1697
1698 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1699 ap->flags |= ATA_FLAG_AN;
1700
1701
1702
1703
1704 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1705 if (tag == 0 || !IS_GEN_I(hpriv)) {
1706 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1707 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1708 if (!pp->sg_tbl[tag])
1709 goto out_port_free_dma_mem;
1710 } else {
1711 pp->sg_tbl[tag] = pp->sg_tbl[0];
1712 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1713 }
1714 }
1715
1716 spin_lock_irqsave(ap->lock, flags);
1717 mv_save_cached_regs(ap);
1718 mv_edma_cfg(ap, 0, 0);
1719 spin_unlock_irqrestore(ap->lock, flags);
1720
1721 return 0;
1722
1723out_port_free_dma_mem:
1724 mv_port_free_dma_mem(ap);
1725 return -ENOMEM;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static void mv_port_stop(struct ata_port *ap)
1738{
1739 unsigned long flags;
1740
1741 spin_lock_irqsave(ap->lock, flags);
1742 mv_stop_edma(ap);
1743 mv_enable_port_irqs(ap, 0);
1744 spin_unlock_irqrestore(ap->lock, flags);
1745 mv_port_free_dma_mem(ap);
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static void mv_fill_sg(struct ata_queued_cmd *qc)
1758{
1759 struct mv_port_priv *pp = qc->ap->private_data;
1760 struct scatterlist *sg;
1761 struct mv_sg *mv_sg, *last_sg = NULL;
1762 unsigned int si;
1763
1764 mv_sg = pp->sg_tbl[qc->tag];
1765 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1766 dma_addr_t addr = sg_dma_address(sg);
1767 u32 sg_len = sg_dma_len(sg);
1768
1769 while (sg_len) {
1770 u32 offset = addr & 0xffff;
1771 u32 len = sg_len;
1772
1773 if (offset + len > 0x10000)
1774 len = 0x10000 - offset;
1775
1776 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1777 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1778 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1779 mv_sg->reserved = 0;
1780
1781 sg_len -= len;
1782 addr += len;
1783
1784 last_sg = mv_sg;
1785 mv_sg++;
1786 }
1787 }
1788
1789 if (likely(last_sg))
1790 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1791 mb();
1792}
1793
1794static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1795{
1796 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1797 (last ? CRQB_CMD_LAST : 0);
1798 *cmdw = cpu_to_le16(tmp);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static void mv_sff_irq_clear(struct ata_port *ap)
1810{
1811 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1826{
1827 struct scsi_cmnd *scmd = qc->scsicmd;
1828
1829 if (scmd) {
1830 switch (scmd->cmnd[0]) {
1831 case READ_6:
1832 case READ_10:
1833 case READ_12:
1834 case WRITE_6:
1835 case WRITE_10:
1836 case WRITE_12:
1837 case GPCMD_READ_CD:
1838 case GPCMD_SEND_DVD_STRUCTURE:
1839 case GPCMD_SEND_CUE_SHEET:
1840 return 0;
1841 }
1842 }
1843 return -EOPNOTSUPP;
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1854{
1855 struct ata_port *ap = qc->ap;
1856 void __iomem *port_mmio = mv_ap_base(ap);
1857 struct mv_port_priv *pp = ap->private_data;
1858
1859 mv_fill_sg(qc);
1860
1861
1862 writel(0, port_mmio + BMDMA_CMD);
1863
1864
1865 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1866 port_mmio + BMDMA_PRD_HIGH);
1867 writelfl(pp->sg_tbl_dma[qc->tag],
1868 port_mmio + BMDMA_PRD_LOW);
1869
1870
1871 ap->ops->sff_exec_command(ap, &qc->tf);
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881static void mv_bmdma_start(struct ata_queued_cmd *qc)
1882{
1883 struct ata_port *ap = qc->ap;
1884 void __iomem *port_mmio = mv_ap_base(ap);
1885 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1886 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1887
1888
1889 writelfl(cmd, port_mmio + BMDMA_CMD);
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void mv_bmdma_stop_ap(struct ata_port *ap)
1902{
1903 void __iomem *port_mmio = mv_ap_base(ap);
1904 u32 cmd;
1905
1906
1907 cmd = readl(port_mmio + BMDMA_CMD);
1908 if (cmd & ATA_DMA_START) {
1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911
1912
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1916
1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1918{
1919 mv_bmdma_stop_ap(qc->ap);
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931static u8 mv_bmdma_status(struct ata_port *ap)
1932{
1933 void __iomem *port_mmio = mv_ap_base(ap);
1934 u32 reg, status;
1935
1936
1937
1938
1939
1940 reg = readl(port_mmio + BMDMA_STATUS);
1941 if (reg & ATA_DMA_ACTIVE)
1942 status = ATA_DMA_ACTIVE;
1943 else if (reg & ATA_DMA_ERR)
1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else {
1946
1947
1948
1949
1950
1951
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
1958 return status;
1959}
1960
1961static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1962{
1963 struct ata_taskfile *tf = &qc->tf;
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1978 if (qc->dev->multi_count > 7) {
1979 switch (tf->command) {
1980 case ATA_CMD_WRITE_MULTI:
1981 tf->command = ATA_CMD_PIO_WRITE;
1982 break;
1983 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1984 tf->flags &= ~ATA_TFLAG_FUA;
1985
1986 case ATA_CMD_WRITE_MULTI_EXT:
1987 tf->command = ATA_CMD_PIO_WRITE_EXT;
1988 break;
1989 }
1990 }
1991 }
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static void mv_qc_prep(struct ata_queued_cmd *qc)
2007{
2008 struct ata_port *ap = qc->ap;
2009 struct mv_port_priv *pp = ap->private_data;
2010 __le16 *cw;
2011 struct ata_taskfile *tf = &qc->tf;
2012 u16 flags = 0;
2013 unsigned in_index;
2014
2015 switch (tf->protocol) {
2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019
2020 case ATA_PROT_NCQ:
2021 break;
2022 case ATA_PROT_PIO:
2023 mv_rw_multi_errata_sata24(qc);
2024 return;
2025 default:
2026 return;
2027 }
2028
2029
2030
2031 if (!(tf->flags & ATA_TFLAG_WRITE))
2032 flags |= CRQB_FLAG_READ;
2033 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2034 flags |= qc->tag << CRQB_TAG_SHIFT;
2035 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2036
2037
2038 in_index = pp->req_idx;
2039
2040 pp->crqb[in_index].sg_addr =
2041 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2042 pp->crqb[in_index].sg_addr_hi =
2043 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2044 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2045
2046 cw = &pp->crqb[in_index].ata_cmd[0];
2047
2048
2049
2050
2051
2052
2053
2054
2055 switch (tf->command) {
2056 case ATA_CMD_READ:
2057 case ATA_CMD_READ_EXT:
2058 case ATA_CMD_WRITE:
2059 case ATA_CMD_WRITE_EXT:
2060 case ATA_CMD_WRITE_FUA_EXT:
2061 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2062 break;
2063 case ATA_CMD_FPDMA_READ:
2064 case ATA_CMD_FPDMA_WRITE:
2065 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2066 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2067 break;
2068 default:
2069
2070
2071
2072
2073
2074
2075
2076
2077 BUG_ON(tf->command);
2078 break;
2079 }
2080 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2081 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2082 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2083 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2084 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2085 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2086 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2087 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2088 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);
2089
2090 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2091 return;
2092 mv_fill_sg(qc);
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2108{
2109 struct ata_port *ap = qc->ap;
2110 struct mv_port_priv *pp = ap->private_data;
2111 struct mv_crqb_iie *crqb;
2112 struct ata_taskfile *tf = &qc->tf;
2113 unsigned in_index;
2114 u32 flags = 0;
2115
2116 if ((tf->protocol != ATA_PROT_DMA) &&
2117 (tf->protocol != ATA_PROT_NCQ))
2118 return;
2119 if (tf->command == ATA_CMD_DSM)
2120 return;
2121
2122
2123 if (!(tf->flags & ATA_TFLAG_WRITE))
2124 flags |= CRQB_FLAG_READ;
2125
2126 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2127 flags |= qc->tag << CRQB_TAG_SHIFT;
2128 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2129 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2130
2131
2132 in_index = pp->req_idx;
2133
2134 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2135 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2136 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2137 crqb->flags = cpu_to_le32(flags);
2138
2139 crqb->ata_cmd[0] = cpu_to_le32(
2140 (tf->command << 16) |
2141 (tf->feature << 24)
2142 );
2143 crqb->ata_cmd[1] = cpu_to_le32(
2144 (tf->lbal << 0) |
2145 (tf->lbam << 8) |
2146 (tf->lbah << 16) |
2147 (tf->device << 24)
2148 );
2149 crqb->ata_cmd[2] = cpu_to_le32(
2150 (tf->hob_lbal << 0) |
2151 (tf->hob_lbam << 8) |
2152 (tf->hob_lbah << 16) |
2153 (tf->hob_feature << 24)
2154 );
2155 crqb->ata_cmd[3] = cpu_to_le32(
2156 (tf->nsect << 0) |
2157 (tf->hob_nsect << 8)
2158 );
2159
2160 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2161 return;
2162 mv_fill_sg(qc);
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178static u8 mv_sff_check_status(struct ata_port *ap)
2179{
2180 u8 stat = ioread8(ap->ioaddr.status_addr);
2181 struct mv_port_priv *pp = ap->private_data;
2182
2183 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2184 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2185 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2186 else
2187 stat = ATA_BUSY;
2188 }
2189 return stat;
2190}
2191
2192
2193
2194
2195
2196
2197static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2198{
2199 void __iomem *port_mmio = mv_ap_base(ap);
2200 u32 ifctl, old_ifctl, ifstat;
2201 int i, timeout = 200, final_word = nwords - 1;
2202
2203
2204 old_ifctl = readl(port_mmio + SATA_IFCTL);
2205 ifctl = 0x100 | (old_ifctl & 0xf);
2206 writelfl(ifctl, port_mmio + SATA_IFCTL);
2207
2208
2209 for (i = 0; i < final_word; ++i)
2210 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2211
2212
2213 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2214 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2215
2216
2217
2218
2219
2220 do {
2221 ifstat = readl(port_mmio + SATA_IFSTAT);
2222 } while (!(ifstat & 0x1000) && --timeout);
2223
2224
2225 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2226
2227
2228 if ((ifstat & 0x3000) != 0x1000) {
2229 ata_port_printk(ap, KERN_WARNING,
2230 "%s transmission error, ifstat=%08x\n",
2231 __func__, ifstat);
2232 return AC_ERR_OTHER;
2233 }
2234 return 0;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2255{
2256 struct ata_port *ap = qc->ap;
2257 struct mv_port_priv *pp = ap->private_data;
2258 struct ata_link *link = qc->dev->link;
2259 u32 fis[5];
2260 int err = 0;
2261
2262 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2263 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2264 if (err)
2265 return err;
2266
2267 switch (qc->tf.protocol) {
2268 case ATAPI_PROT_PIO:
2269 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2270
2271 case ATAPI_PROT_NODATA:
2272 ap->hsm_task_state = HSM_ST_FIRST;
2273 break;
2274 case ATA_PROT_PIO:
2275 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2276 if (qc->tf.flags & ATA_TFLAG_WRITE)
2277 ap->hsm_task_state = HSM_ST_FIRST;
2278 else
2279 ap->hsm_task_state = HSM_ST;
2280 break;
2281 default:
2282 ap->hsm_task_state = HSM_ST_LAST;
2283 break;
2284 }
2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2287 ata_sff_queue_pio_task(link, 0);
2288 return 0;
2289}
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2304{
2305 static int limit_warnings = 10;
2306 struct ata_port *ap = qc->ap;
2307 void __iomem *port_mmio = mv_ap_base(ap);
2308 struct mv_port_priv *pp = ap->private_data;
2309 u32 in_index;
2310 unsigned int port_irqs;
2311
2312 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2313
2314 switch (qc->tf.protocol) {
2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup)
2318 return AC_ERR_OTHER;
2319 break;
2320 }
2321
2322 case ATA_PROT_NCQ:
2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2325 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2326
2327
2328 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2329 port_mmio + EDMA_REQ_Q_IN_PTR);
2330 return 0;
2331
2332 case ATA_PROT_PIO:
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2345 --limit_warnings;
2346 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2347 ": attempting PIO w/multiple DRQ: "
2348 "this may fail due to h/w errata\n");
2349 }
2350
2351 case ATA_PROT_NODATA:
2352 case ATAPI_PROT_PIO:
2353 case ATAPI_PROT_NODATA:
2354 if (ap->flags & ATA_FLAG_PIO_POLLING)
2355 qc->tf.flags |= ATA_TFLAG_POLLING;
2356 break;
2357 }
2358
2359 if (qc->tf.flags & ATA_TFLAG_POLLING)
2360 port_irqs = ERR_IRQ;
2361 else
2362 port_irqs = ERR_IRQ | DONE_IRQ;
2363
2364
2365
2366
2367
2368
2369 mv_stop_edma(ap);
2370 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2371 mv_pmp_select(ap, qc->dev->link->pmp);
2372
2373 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2374 struct mv_host_priv *hpriv = ap->host->private_data;
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386 if (IS_GEN_II(hpriv))
2387 return mv_qc_issue_fis(qc);
2388 }
2389 return ata_bmdma_qc_issue(qc);
2390}
2391
2392static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2393{
2394 struct mv_port_priv *pp = ap->private_data;
2395 struct ata_queued_cmd *qc;
2396
2397 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2398 return NULL;
2399 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2400 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2401 return qc;
2402 return NULL;
2403}
2404
2405static void mv_pmp_error_handler(struct ata_port *ap)
2406{
2407 unsigned int pmp, pmp_map;
2408 struct mv_port_priv *pp = ap->private_data;
2409
2410 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2411
2412
2413
2414
2415
2416
2417 pmp_map = pp->delayed_eh_pmp_map;
2418 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2419 for (pmp = 0; pmp_map != 0; pmp++) {
2420 unsigned int this_pmp = (1 << pmp);
2421 if (pmp_map & this_pmp) {
2422 struct ata_link *link = &ap->pmp_link[pmp];
2423 pmp_map &= ~this_pmp;
2424 ata_eh_analyze_ncq_error(link);
2425 }
2426 }
2427 ata_port_freeze(ap);
2428 }
2429 sata_pmp_error_handler(ap);
2430}
2431
2432static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2433{
2434 void __iomem *port_mmio = mv_ap_base(ap);
2435
2436 return readl(port_mmio + SATA_TESTCTL) >> 16;
2437}
2438
2439static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2440{
2441 struct ata_eh_info *ehi;
2442 unsigned int pmp;
2443
2444
2445
2446
2447 ehi = &ap->link.eh_info;
2448 for (pmp = 0; pmp_map != 0; pmp++) {
2449 unsigned int this_pmp = (1 << pmp);
2450 if (pmp_map & this_pmp) {
2451 struct ata_link *link = &ap->pmp_link[pmp];
2452
2453 pmp_map &= ~this_pmp;
2454 ehi = &link->eh_info;
2455 ata_ehi_clear_desc(ehi);
2456 ata_ehi_push_desc(ehi, "dev err");
2457 ehi->err_mask |= AC_ERR_DEV;
2458 ehi->action |= ATA_EH_RESET;
2459 ata_link_abort(link);
2460 }
2461 }
2462}
2463
2464static int mv_req_q_empty(struct ata_port *ap)
2465{
2466 void __iomem *port_mmio = mv_ap_base(ap);
2467 u32 in_ptr, out_ptr;
2468
2469 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2471 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2472 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2473 return (in_ptr == out_ptr);
2474}
2475
2476static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2477{
2478 struct mv_port_priv *pp = ap->private_data;
2479 int failed_links;
2480 unsigned int old_map, new_map;
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2491 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2492 pp->delayed_eh_pmp_map = 0;
2493 }
2494 old_map = pp->delayed_eh_pmp_map;
2495 new_map = old_map | mv_get_err_pmp_map(ap);
2496
2497 if (old_map != new_map) {
2498 pp->delayed_eh_pmp_map = new_map;
2499 mv_pmp_eh_prep(ap, new_map & ~old_map);
2500 }
2501 failed_links = hweight16(new_map);
2502
2503 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2504 "failed_links=%d nr_active_links=%d\n",
2505 __func__, pp->delayed_eh_pmp_map,
2506 ap->qc_active, failed_links,
2507 ap->nr_active_links);
2508
2509 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2510 mv_process_crpb_entries(ap, pp);
2511 mv_stop_edma(ap);
2512 mv_eh_freeze(ap);
2513 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2514 return 1;
2515 }
2516 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2517 return 1;
2518}
2519
2520static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2521{
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533 return 0;
2534}
2535
2536static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2537{
2538 struct mv_port_priv *pp = ap->private_data;
2539
2540 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2541 return 0;
2542 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2543 return 0;
2544
2545 if (!(edma_err_cause & EDMA_ERR_DEV))
2546 return 0;
2547 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2548 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2549 return 0;
2550
2551 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2552
2553
2554
2555
2556
2557 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2558 ata_port_printk(ap, KERN_WARNING,
2559 "%s: err_cause=0x%x pp_flags=0x%x\n",
2560 __func__, edma_err_cause, pp->pp_flags);
2561 return 0;
2562 }
2563 return mv_handle_fbs_ncq_dev_err(ap);
2564 } else {
2565
2566
2567
2568
2569
2570 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2571 ata_port_printk(ap, KERN_WARNING,
2572 "%s: err_cause=0x%x pp_flags=0x%x\n",
2573 __func__, edma_err_cause, pp->pp_flags);
2574 return 0;
2575 }
2576 return mv_handle_fbs_non_ncq_dev_err(ap);
2577 }
2578 return 0;
2579}
2580
2581static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2582{
2583 struct ata_eh_info *ehi = &ap->link.eh_info;
2584 char *when = "idle";
2585
2586 ata_ehi_clear_desc(ehi);
2587 if (edma_was_enabled) {
2588 when = "EDMA enabled";
2589 } else {
2590 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2591 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2592 when = "polling";
2593 }
2594 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2595 ehi->err_mask |= AC_ERR_OTHER;
2596 ehi->action |= ATA_EH_RESET;
2597 ata_port_freeze(ap);
2598}
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611static void mv_err_intr(struct ata_port *ap)
2612{
2613 void __iomem *port_mmio = mv_ap_base(ap);
2614 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2615 u32 fis_cause = 0;
2616 struct mv_port_priv *pp = ap->private_data;
2617 struct mv_host_priv *hpriv = ap->host->private_data;
2618 unsigned int action = 0, err_mask = 0;
2619 struct ata_eh_info *ehi = &ap->link.eh_info;
2620 struct ata_queued_cmd *qc;
2621 int abort = 0;
2622
2623
2624
2625
2626
2627
2628 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2629 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2630
2631 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2632 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2633 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2634 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2635 }
2636 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2637
2638 if (edma_err_cause & EDMA_ERR_DEV) {
2639
2640
2641
2642
2643 if (mv_handle_dev_err(ap, edma_err_cause))
2644 return;
2645 }
2646
2647 qc = mv_get_active_qc(ap);
2648 ata_ehi_clear_desc(ehi);
2649 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2650 edma_err_cause, pp->pp_flags);
2651
2652 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2653 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2654 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2655 u32 ec = edma_err_cause &
2656 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2657 sata_async_notification(ap);
2658 if (!ec)
2659 return;
2660 ata_ehi_push_desc(ehi, "SDB notify");
2661 }
2662 }
2663
2664
2665
2666 if (edma_err_cause & EDMA_ERR_DEV) {
2667 err_mask |= AC_ERR_DEV;
2668 action |= ATA_EH_RESET;
2669 ata_ehi_push_desc(ehi, "dev error");
2670 }
2671 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2672 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2673 EDMA_ERR_INTRL_PAR)) {
2674 err_mask |= AC_ERR_ATA_BUS;
2675 action |= ATA_EH_RESET;
2676 ata_ehi_push_desc(ehi, "parity error");
2677 }
2678 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2679 ata_ehi_hotplugged(ehi);
2680 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2681 "dev disconnect" : "dev connect");
2682 action |= ATA_EH_RESET;
2683 }
2684
2685
2686
2687
2688
2689 if (IS_GEN_I(hpriv)) {
2690 eh_freeze_mask = EDMA_EH_FREEZE_5;
2691 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2692 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2693 ata_ehi_push_desc(ehi, "EDMA self-disable");
2694 }
2695 } else {
2696 eh_freeze_mask = EDMA_EH_FREEZE;
2697 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2698 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2699 ata_ehi_push_desc(ehi, "EDMA self-disable");
2700 }
2701 if (edma_err_cause & EDMA_ERR_SERR) {
2702 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2703 err_mask |= AC_ERR_ATA_BUS;
2704 action |= ATA_EH_RESET;
2705 }
2706 }
2707
2708 if (!err_mask) {
2709 err_mask = AC_ERR_OTHER;
2710 action |= ATA_EH_RESET;
2711 }
2712
2713 ehi->serror |= serr;
2714 ehi->action |= action;
2715
2716 if (qc)
2717 qc->err_mask |= err_mask;
2718 else
2719 ehi->err_mask |= err_mask;
2720
2721 if (err_mask == AC_ERR_DEV) {
2722
2723
2724
2725
2726
2727 mv_eh_freeze(ap);
2728 abort = 1;
2729 } else if (edma_err_cause & eh_freeze_mask) {
2730
2731
2732
2733 ata_port_freeze(ap);
2734 } else {
2735 abort = 1;
2736 }
2737
2738 if (abort) {
2739 if (qc)
2740 ata_link_abort(qc->dev->link);
2741 else
2742 ata_port_abort(ap);
2743 }
2744}
2745
2746static bool mv_process_crpb_response(struct ata_port *ap,
2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2748{
2749 u8 ata_status;
2750 u16 edma_status = le16_to_cpu(response->flags);
2751
2752
2753
2754
2755
2756
2757 if (!ncq_enabled) {
2758 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2759 if (err_cause) {
2760
2761
2762
2763
2764 return false;
2765 }
2766 }
2767 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2768 if (!ac_err_mask(ata_status))
2769 return true;
2770
2771 return false;
2772}
2773
2774static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2775{
2776 void __iomem *port_mmio = mv_ap_base(ap);
2777 struct mv_host_priv *hpriv = ap->host->private_data;
2778 u32 in_index;
2779 bool work_done = false;
2780 u32 done_mask = 0;
2781 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2782
2783
2784 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2785 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2786
2787
2788 while (in_index != pp->resp_idx) {
2789 unsigned int tag;
2790 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2791
2792 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2793
2794 if (IS_GEN_I(hpriv)) {
2795
2796 tag = ap->link.active_tag;
2797 } else {
2798
2799 tag = le16_to_cpu(response->id) & 0x1f;
2800 }
2801 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2802 done_mask |= 1 << tag;
2803 work_done = true;
2804 }
2805
2806 if (work_done) {
2807 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2808
2809
2810 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2811 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2812 port_mmio + EDMA_RSP_Q_OUT_PTR);
2813 }
2814}
2815
2816static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2817{
2818 struct mv_port_priv *pp;
2819 int edma_was_enabled;
2820
2821
2822
2823
2824
2825
2826 pp = ap->private_data;
2827 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2828
2829
2830
2831 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2832 mv_process_crpb_entries(ap, pp);
2833 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2834 mv_handle_fbs_ncq_dev_err(ap);
2835 }
2836
2837
2838
2839 if (unlikely(port_cause & ERR_IRQ)) {
2840 mv_err_intr(ap);
2841 } else if (!edma_was_enabled) {
2842 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2843 if (qc)
2844 ata_bmdma_port_intr(ap, qc);
2845 else
2846 mv_unexpected_intr(ap, edma_was_enabled);
2847 }
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2859{
2860 struct mv_host_priv *hpriv = host->private_data;
2861 void __iomem *mmio = hpriv->base, *hc_mmio;
2862 unsigned int handled = 0, port;
2863
2864
2865 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2866 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2867
2868 for (port = 0; port < hpriv->n_ports; port++) {
2869 struct ata_port *ap = host->ports[port];
2870 unsigned int p, shift, hardport, port_cause;
2871
2872 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2873
2874
2875
2876
2877 if (hardport == 0) {
2878 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2879 u32 port_mask, ack_irqs;
2880
2881
2882
2883 if (!hc_cause) {
2884 port += MV_PORTS_PER_HC - 1;
2885 continue;
2886 }
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899 ack_irqs = 0;
2900 if (hc_cause & PORTS_0_3_COAL_DONE)
2901 ack_irqs = HC_COAL_IRQ;
2902 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2903 if ((port + p) >= hpriv->n_ports)
2904 break;
2905 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2906 if (hc_cause & port_mask)
2907 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2908 }
2909 hc_mmio = mv_hc_base_from_port(mmio, port);
2910 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2911 handled = 1;
2912 }
2913
2914
2915
2916 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2917 if (port_cause)
2918 mv_port_intr(ap, port_cause);
2919 }
2920 return handled;
2921}
2922
2923static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2924{
2925 struct mv_host_priv *hpriv = host->private_data;
2926 struct ata_port *ap;
2927 struct ata_queued_cmd *qc;
2928 struct ata_eh_info *ehi;
2929 unsigned int i, err_mask, printed = 0;
2930 u32 err_cause;
2931
2932 err_cause = readl(mmio + hpriv->irq_cause_offset);
2933
2934 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2935 err_cause);
2936
2937 DPRINTK("All regs @ PCI error\n");
2938 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2939
2940 writelfl(0, mmio + hpriv->irq_cause_offset);
2941
2942 for (i = 0; i < host->n_ports; i++) {
2943 ap = host->ports[i];
2944 if (!ata_link_offline(&ap->link)) {
2945 ehi = &ap->link.eh_info;
2946 ata_ehi_clear_desc(ehi);
2947 if (!printed++)
2948 ata_ehi_push_desc(ehi,
2949 "PCI err cause 0x%08x", err_cause);
2950 err_mask = AC_ERR_HOST_BUS;
2951 ehi->action = ATA_EH_RESET;
2952 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2953 if (qc)
2954 qc->err_mask |= err_mask;
2955 else
2956 ehi->err_mask |= err_mask;
2957
2958 ata_port_freeze(ap);
2959 }
2960 }
2961 return 1;
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2979{
2980 struct ata_host *host = dev_instance;
2981 struct mv_host_priv *hpriv = host->private_data;
2982 unsigned int handled = 0;
2983 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2984 u32 main_irq_cause, pending_irqs;
2985
2986 spin_lock(&host->lock);
2987
2988
2989 if (using_msi)
2990 mv_write_main_irq_mask(0, hpriv);
2991
2992 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2993 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2994
2995
2996
2997
2998 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2999 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3000 handled = mv_pci_error(host, hpriv->base);
3001 else
3002 handled = mv_host_intr(host, pending_irqs);
3003 }
3004
3005
3006 if (using_msi)
3007 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3008
3009 spin_unlock(&host->lock);
3010
3011 return IRQ_RETVAL(handled);
3012}
3013
3014static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3015{
3016 unsigned int ofs;
3017
3018 switch (sc_reg_in) {
3019 case SCR_STATUS:
3020 case SCR_ERROR:
3021 case SCR_CONTROL:
3022 ofs = sc_reg_in * sizeof(u32);
3023 break;
3024 default:
3025 ofs = 0xffffffffU;
3026 break;
3027 }
3028 return ofs;
3029}
3030
3031static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3032{
3033 struct mv_host_priv *hpriv = link->ap->host->private_data;
3034 void __iomem *mmio = hpriv->base;
3035 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3036 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3037
3038 if (ofs != 0xffffffffU) {
3039 *val = readl(addr + ofs);
3040 return 0;
3041 } else
3042 return -EINVAL;
3043}
3044
3045static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3046{
3047 struct mv_host_priv *hpriv = link->ap->host->private_data;
3048 void __iomem *mmio = hpriv->base;
3049 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3050 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3051
3052 if (ofs != 0xffffffffU) {
3053 writelfl(val, addr + ofs);
3054 return 0;
3055 } else
3056 return -EINVAL;
3057}
3058
3059static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3060{
3061 struct pci_dev *pdev = to_pci_dev(host->dev);
3062 int early_5080;
3063
3064 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3065
3066 if (!early_5080) {
3067 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3068 tmp |= (1 << 0);
3069 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3070 }
3071
3072 mv_reset_pci_bus(host, mmio);
3073}
3074
3075static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3076{
3077 writel(0x0fcfffff, mmio + FLASH_CTL);
3078}
3079
3080static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3081 void __iomem *mmio)
3082{
3083 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3084 u32 tmp;
3085
3086 tmp = readl(phy_mmio + MV5_PHY_MODE);
3087
3088 hpriv->signal[idx].pre = tmp & 0x1800;
3089 hpriv->signal[idx].amps = tmp & 0xe0;
3090}
3091
3092static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3093{
3094 u32 tmp;
3095
3096 writel(0, mmio + GPIO_PORT_CTL);
3097
3098
3099
3100 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3101 tmp |= ~(1 << 0);
3102 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3103}
3104
3105static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3106 unsigned int port)
3107{
3108 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3109 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3110 u32 tmp;
3111 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3112
3113 if (fix_apm_sq) {
3114 tmp = readl(phy_mmio + MV5_LTMODE);
3115 tmp |= (1 << 19);
3116 writel(tmp, phy_mmio + MV5_LTMODE);
3117
3118 tmp = readl(phy_mmio + MV5_PHY_CTL);
3119 tmp &= ~0x3;
3120 tmp |= 0x1;
3121 writel(tmp, phy_mmio + MV5_PHY_CTL);
3122 }
3123
3124 tmp = readl(phy_mmio + MV5_PHY_MODE);
3125 tmp &= ~mask;
3126 tmp |= hpriv->signal[port].pre;
3127 tmp |= hpriv->signal[port].amps;
3128 writel(tmp, phy_mmio + MV5_PHY_MODE);
3129}
3130
3131
3132#undef ZERO
3133#define ZERO(reg) writel(0, port_mmio + (reg))
3134static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3135 unsigned int port)
3136{
3137 void __iomem *port_mmio = mv_port_base(mmio, port);
3138
3139 mv_reset_channel(hpriv, mmio, port);
3140
3141 ZERO(0x028);
3142 writel(0x11f, port_mmio + EDMA_CFG);
3143 ZERO(0x004);
3144 ZERO(0x008);
3145 ZERO(0x00c);
3146 ZERO(0x010);
3147 ZERO(0x014);
3148 ZERO(0x018);
3149 ZERO(0x01c);
3150 ZERO(0x024);
3151 ZERO(0x020);
3152 ZERO(0x02c);
3153 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3154}
3155#undef ZERO
3156
3157#define ZERO(reg) writel(0, hc_mmio + (reg))
3158static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3159 unsigned int hc)
3160{
3161 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3162 u32 tmp;
3163
3164 ZERO(0x00c);
3165 ZERO(0x010);
3166 ZERO(0x014);
3167 ZERO(0x018);
3168
3169 tmp = readl(hc_mmio + 0x20);
3170 tmp &= 0x1c1c1c1c;
3171 tmp |= 0x03030303;
3172 writel(tmp, hc_mmio + 0x20);
3173}
3174#undef ZERO
3175
3176static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3177 unsigned int n_hc)
3178{
3179 unsigned int hc, port;
3180
3181 for (hc = 0; hc < n_hc; hc++) {
3182 for (port = 0; port < MV_PORTS_PER_HC; port++)
3183 mv5_reset_hc_port(hpriv, mmio,
3184 (hc * MV_PORTS_PER_HC) + port);
3185
3186 mv5_reset_one_hc(hpriv, mmio, hc);
3187 }
3188
3189 return 0;
3190}
3191
3192#undef ZERO
3193#define ZERO(reg) writel(0, mmio + (reg))
3194static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3195{
3196 struct mv_host_priv *hpriv = host->private_data;
3197 u32 tmp;
3198
3199 tmp = readl(mmio + MV_PCI_MODE);
3200 tmp &= 0xff00ffff;
3201 writel(tmp, mmio + MV_PCI_MODE);
3202
3203 ZERO(MV_PCI_DISC_TIMER);
3204 ZERO(MV_PCI_MSI_TRIGGER);
3205 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3206 ZERO(MV_PCI_SERR_MASK);
3207 ZERO(hpriv->irq_cause_offset);
3208 ZERO(hpriv->irq_mask_offset);
3209 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3210 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3211 ZERO(MV_PCI_ERR_ATTRIBUTE);
3212 ZERO(MV_PCI_ERR_COMMAND);
3213}
3214#undef ZERO
3215
3216static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3217{
3218 u32 tmp;
3219
3220 mv5_reset_flash(hpriv, mmio);
3221
3222 tmp = readl(mmio + GPIO_PORT_CTL);
3223 tmp &= 0x3;
3224 tmp |= (1 << 5) | (1 << 6);
3225 writel(tmp, mmio + GPIO_PORT_CTL);
3226}
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3238 unsigned int n_hc)
3239{
3240 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3241 int i, rc = 0;
3242 u32 t;
3243
3244
3245
3246
3247 t = readl(reg);
3248 writel(t | STOP_PCI_MASTER, reg);
3249
3250 for (i = 0; i < 1000; i++) {
3251 udelay(1);
3252 t = readl(reg);
3253 if (PCI_MASTER_EMPTY & t)
3254 break;
3255 }
3256 if (!(PCI_MASTER_EMPTY & t)) {
3257 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3258 rc = 1;
3259 goto done;
3260 }
3261
3262
3263 i = 5;
3264 do {
3265 writel(t | GLOB_SFT_RST, reg);
3266 t = readl(reg);
3267 udelay(1);
3268 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3269
3270 if (!(GLOB_SFT_RST & t)) {
3271 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3272 rc = 1;
3273 goto done;
3274 }
3275
3276
3277 i = 5;
3278 do {
3279 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3280 t = readl(reg);
3281 udelay(1);
3282 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3283
3284 if (GLOB_SFT_RST & t) {
3285 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3286 rc = 1;
3287 }
3288done:
3289 return rc;
3290}
3291
3292static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3293 void __iomem *mmio)
3294{
3295 void __iomem *port_mmio;
3296 u32 tmp;
3297
3298 tmp = readl(mmio + RESET_CFG);
3299 if ((tmp & (1 << 0)) == 0) {
3300 hpriv->signal[idx].amps = 0x7 << 8;
3301 hpriv->signal[idx].pre = 0x1 << 5;
3302 return;
3303 }
3304
3305 port_mmio = mv_port_base(mmio, idx);
3306 tmp = readl(port_mmio + PHY_MODE2);
3307
3308 hpriv->signal[idx].amps = tmp & 0x700;
3309 hpriv->signal[idx].pre = tmp & 0xe0;
3310}
3311
3312static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3313{
3314 writel(0x00000060, mmio + GPIO_PORT_CTL);
3315}
3316
3317static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3318 unsigned int port)
3319{
3320 void __iomem *port_mmio = mv_port_base(mmio, port);
3321
3322 u32 hp_flags = hpriv->hp_flags;
3323 int fix_phy_mode2 =
3324 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3325 int fix_phy_mode4 =
3326 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3327 u32 m2, m3;
3328
3329 if (fix_phy_mode2) {
3330 m2 = readl(port_mmio + PHY_MODE2);
3331 m2 &= ~(1 << 16);
3332 m2 |= (1 << 31);
3333 writel(m2, port_mmio + PHY_MODE2);
3334
3335 udelay(200);
3336
3337 m2 = readl(port_mmio + PHY_MODE2);
3338 m2 &= ~((1 << 16) | (1 << 31));
3339 writel(m2, port_mmio + PHY_MODE2);
3340
3341 udelay(200);
3342 }
3343
3344
3345
3346
3347
3348 m3 = readl(port_mmio + PHY_MODE3);
3349 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3350
3351
3352 if (IS_SOC(hpriv))
3353 m3 &= ~0x1c;
3354
3355 if (fix_phy_mode4) {
3356 u32 m4 = readl(port_mmio + PHY_MODE4);
3357
3358
3359
3360
3361
3362 if (IS_GEN_IIE(hpriv))
3363 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3364 else
3365 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3366 writel(m4, port_mmio + PHY_MODE4);
3367 }
3368
3369
3370
3371
3372
3373
3374 writel(m3, port_mmio + PHY_MODE3);
3375
3376
3377 m2 = readl(port_mmio + PHY_MODE2);
3378
3379 m2 &= ~MV_M2_PREAMP_MASK;
3380 m2 |= hpriv->signal[port].amps;
3381 m2 |= hpriv->signal[port].pre;
3382 m2 &= ~(1 << 16);
3383
3384
3385 if (IS_GEN_IIE(hpriv)) {
3386 m2 &= ~0xC30FF01F;
3387 m2 |= 0x0000900F;
3388 }
3389
3390 writel(m2, port_mmio + PHY_MODE2);
3391}
3392
3393
3394
3395static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3396 void __iomem *mmio)
3397{
3398 return;
3399}
3400
3401static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3402 void __iomem *mmio)
3403{
3404 void __iomem *port_mmio;
3405 u32 tmp;
3406
3407 port_mmio = mv_port_base(mmio, idx);
3408 tmp = readl(port_mmio + PHY_MODE2);
3409
3410 hpriv->signal[idx].amps = tmp & 0x700;
3411 hpriv->signal[idx].pre = tmp & 0xe0;
3412}
3413
3414#undef ZERO
3415#define ZERO(reg) writel(0, port_mmio + (reg))
3416static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3417 void __iomem *mmio, unsigned int port)
3418{
3419 void __iomem *port_mmio = mv_port_base(mmio, port);
3420
3421 mv_reset_channel(hpriv, mmio, port);
3422
3423 ZERO(0x028);
3424 writel(0x101f, port_mmio + EDMA_CFG);
3425 ZERO(0x004);
3426 ZERO(0x008);
3427 ZERO(0x00c);
3428 ZERO(0x010);
3429 ZERO(0x014);
3430 ZERO(0x018);
3431 ZERO(0x01c);
3432 ZERO(0x024);
3433 ZERO(0x020);
3434 ZERO(0x02c);
3435 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3436}
3437
3438#undef ZERO
3439
3440#define ZERO(reg) writel(0, hc_mmio + (reg))
3441static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3442 void __iomem *mmio)
3443{
3444 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3445
3446 ZERO(0x00c);
3447 ZERO(0x010);
3448 ZERO(0x014);
3449
3450}
3451
3452#undef ZERO
3453
3454static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3455 void __iomem *mmio, unsigned int n_hc)
3456{
3457 unsigned int port;
3458
3459 for (port = 0; port < hpriv->n_ports; port++)
3460 mv_soc_reset_hc_port(hpriv, mmio, port);
3461
3462 mv_soc_reset_one_hc(hpriv, mmio);
3463
3464 return 0;
3465}
3466
3467static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3468 void __iomem *mmio)
3469{
3470 return;
3471}
3472
3473static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3474{
3475 return;
3476}
3477
3478static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3479 void __iomem *mmio, unsigned int port)
3480{
3481 void __iomem *port_mmio = mv_port_base(mmio, port);
3482 u32 reg;
3483
3484 reg = readl(port_mmio + PHY_MODE3);
3485 reg &= ~(0x3 << 27);
3486 reg |= (0x1 << 27);
3487 reg &= ~(0x3 << 29);
3488 reg |= (0x1 << 29);
3489 writel(reg, port_mmio + PHY_MODE3);
3490
3491 reg = readl(port_mmio + PHY_MODE4);
3492 reg &= ~0x1;
3493 reg |= (0x1 << 16);
3494 writel(reg, port_mmio + PHY_MODE4);
3495
3496 reg = readl(port_mmio + PHY_MODE9_GEN2);
3497 reg &= ~0xf;
3498 reg |= 0x8;
3499 reg &= ~(0x1 << 14);
3500 writel(reg, port_mmio + PHY_MODE9_GEN2);
3501
3502 reg = readl(port_mmio + PHY_MODE9_GEN1);
3503 reg &= ~0xf;
3504 reg |= 0x8;
3505 reg &= ~(0x1 << 14);
3506 writel(reg, port_mmio + PHY_MODE9_GEN1);
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516static bool soc_is_65n(struct mv_host_priv *hpriv)
3517{
3518 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3519
3520 if (readl(port0_mmio + PHYCFG_OFS))
3521 return true;
3522 return false;
3523}
3524
3525static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3526{
3527 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3528
3529 ifcfg = (ifcfg & 0xf7f) | 0x9b1000;
3530 if (want_gen2i)
3531 ifcfg |= (1 << 7);
3532 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3533}
3534
3535static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3536 unsigned int port_no)
3537{
3538 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3539
3540
3541
3542
3543
3544
3545 mv_stop_edma_engine(port_mmio);
3546 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3547
3548 if (!IS_GEN_I(hpriv)) {
3549
3550 mv_setup_ifcfg(port_mmio, 1);
3551 }
3552
3553
3554
3555
3556
3557 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3558 udelay(25);
3559 writelfl(0, port_mmio + EDMA_CMD);
3560
3561 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3562
3563 if (IS_GEN_I(hpriv))
3564 mdelay(1);
3565}
3566
3567static void mv_pmp_select(struct ata_port *ap, int pmp)
3568{
3569 if (sata_pmp_supported(ap)) {
3570 void __iomem *port_mmio = mv_ap_base(ap);
3571 u32 reg = readl(port_mmio + SATA_IFCTL);
3572 int old = reg & 0xf;
3573
3574 if (old != pmp) {
3575 reg = (reg & ~0xf) | pmp;
3576 writelfl(reg, port_mmio + SATA_IFCTL);
3577 }
3578 }
3579}
3580
3581static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3582 unsigned long deadline)
3583{
3584 mv_pmp_select(link->ap, sata_srst_pmp(link));
3585 return sata_std_hardreset(link, class, deadline);
3586}
3587
3588static int mv_softreset(struct ata_link *link, unsigned int *class,
3589 unsigned long deadline)
3590{
3591 mv_pmp_select(link->ap, sata_srst_pmp(link));
3592 return ata_sff_softreset(link, class, deadline);
3593}
3594
3595static int mv_hardreset(struct ata_link *link, unsigned int *class,
3596 unsigned long deadline)
3597{
3598 struct ata_port *ap = link->ap;
3599 struct mv_host_priv *hpriv = ap->host->private_data;
3600 struct mv_port_priv *pp = ap->private_data;
3601 void __iomem *mmio = hpriv->base;
3602 int rc, attempts = 0, extra = 0;
3603 u32 sstatus;
3604 bool online;
3605
3606 mv_reset_channel(hpriv, mmio, ap->port_no);
3607 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3608 pp->pp_flags &=
3609 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3610
3611
3612 do {
3613 const unsigned long *timing =
3614 sata_ehc_deb_timing(&link->eh_context);
3615
3616 rc = sata_link_hardreset(link, timing, deadline + extra,
3617 &online, NULL);
3618 rc = online ? -EAGAIN : rc;
3619 if (rc)
3620 return rc;
3621 sata_scr_read(link, SCR_STATUS, &sstatus);
3622 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3623
3624 mv_setup_ifcfg(mv_ap_base(ap), 0);
3625 if (time_after(jiffies + HZ, deadline))
3626 extra = HZ;
3627 }
3628 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3629 mv_save_cached_regs(ap);
3630 mv_edma_cfg(ap, 0, 0);
3631
3632 return rc;
3633}
3634
3635static void mv_eh_freeze(struct ata_port *ap)
3636{
3637 mv_stop_edma(ap);
3638 mv_enable_port_irqs(ap, 0);
3639}
3640
3641static void mv_eh_thaw(struct ata_port *ap)
3642{
3643 struct mv_host_priv *hpriv = ap->host->private_data;
3644 unsigned int port = ap->port_no;
3645 unsigned int hardport = mv_hardport_from_port(port);
3646 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3647 void __iomem *port_mmio = mv_ap_base(ap);
3648 u32 hc_irq_cause;
3649
3650
3651 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3652
3653
3654 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3655 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3656
3657 mv_enable_port_irqs(ap, ERR_IRQ);
3658}
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3673{
3674 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3675
3676
3677
3678 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3679 port->error_addr =
3680 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3681 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3682 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3683 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3684 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3685 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3686 port->status_addr =
3687 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3688
3689 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3690
3691
3692 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3693 writelfl(readl(serr), serr);
3694 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3695
3696
3697 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3698
3699 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3700 readl(port_mmio + EDMA_CFG),
3701 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3702 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3703}
3704
3705static unsigned int mv_in_pcix_mode(struct ata_host *host)
3706{
3707 struct mv_host_priv *hpriv = host->private_data;
3708 void __iomem *mmio = hpriv->base;
3709 u32 reg;
3710
3711 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3712 return 0;
3713 reg = readl(mmio + MV_PCI_MODE);
3714 if ((reg & MV_PCI_MODE_MASK) == 0)
3715 return 0;
3716 return 1;
3717}
3718
3719static int mv_pci_cut_through_okay(struct ata_host *host)
3720{
3721 struct mv_host_priv *hpriv = host->private_data;
3722 void __iomem *mmio = hpriv->base;
3723 u32 reg;
3724
3725 if (!mv_in_pcix_mode(host)) {
3726 reg = readl(mmio + MV_PCI_COMMAND);
3727 if (reg & MV_PCI_COMMAND_MRDTRIG)
3728 return 0;
3729 }
3730 return 1;
3731}
3732
3733static void mv_60x1b2_errata_pci7(struct ata_host *host)
3734{
3735 struct mv_host_priv *hpriv = host->private_data;
3736 void __iomem *mmio = hpriv->base;
3737
3738
3739 if (mv_in_pcix_mode(host)) {
3740 u32 reg = readl(mmio + MV_PCI_COMMAND);
3741 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3742 }
3743}
3744
3745static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3746{
3747 struct pci_dev *pdev = to_pci_dev(host->dev);
3748 struct mv_host_priv *hpriv = host->private_data;
3749 u32 hp_flags = hpriv->hp_flags;
3750
3751 switch (board_idx) {
3752 case chip_5080:
3753 hpriv->ops = &mv5xxx_ops;
3754 hp_flags |= MV_HP_GEN_I;
3755
3756 switch (pdev->revision) {
3757 case 0x1:
3758 hp_flags |= MV_HP_ERRATA_50XXB0;
3759 break;
3760 case 0x3:
3761 hp_flags |= MV_HP_ERRATA_50XXB2;
3762 break;
3763 default:
3764 dev_printk(KERN_WARNING, &pdev->dev,
3765 "Applying 50XXB2 workarounds to unknown rev\n");
3766 hp_flags |= MV_HP_ERRATA_50XXB2;
3767 break;
3768 }
3769 break;
3770
3771 case chip_504x:
3772 case chip_508x:
3773 hpriv->ops = &mv5xxx_ops;
3774 hp_flags |= MV_HP_GEN_I;
3775
3776 switch (pdev->revision) {
3777 case 0x0:
3778 hp_flags |= MV_HP_ERRATA_50XXB0;
3779 break;
3780 case 0x3:
3781 hp_flags |= MV_HP_ERRATA_50XXB2;
3782 break;
3783 default:
3784 dev_printk(KERN_WARNING, &pdev->dev,
3785 "Applying B2 workarounds to unknown rev\n");
3786 hp_flags |= MV_HP_ERRATA_50XXB2;
3787 break;
3788 }
3789 break;
3790
3791 case chip_604x:
3792 case chip_608x:
3793 hpriv->ops = &mv6xxx_ops;
3794 hp_flags |= MV_HP_GEN_II;
3795
3796 switch (pdev->revision) {
3797 case 0x7:
3798 mv_60x1b2_errata_pci7(host);
3799 hp_flags |= MV_HP_ERRATA_60X1B2;
3800 break;
3801 case 0x9:
3802 hp_flags |= MV_HP_ERRATA_60X1C0;
3803 break;
3804 default:
3805 dev_printk(KERN_WARNING, &pdev->dev,
3806 "Applying B2 workarounds to unknown rev\n");
3807 hp_flags |= MV_HP_ERRATA_60X1B2;
3808 break;
3809 }
3810 break;
3811
3812 case chip_7042:
3813 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3814 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3815 (pdev->device == 0x2300 || pdev->device == 0x2310))
3816 {
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3835 " BIOS CORRUPTS DATA on all attached drives,"
3836 " regardless of if/how they are configured."
3837 " BEWARE!\n");
3838 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3839 " use sectors 8-9 on \"Legacy\" drives,"
3840 " and avoid the final two gigabytes on"
3841 " all RocketRAID BIOS initialized drives.\n");
3842 }
3843
3844 case chip_6042:
3845 hpriv->ops = &mv6xxx_ops;
3846 hp_flags |= MV_HP_GEN_IIE;
3847 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3848 hp_flags |= MV_HP_CUT_THROUGH;
3849
3850 switch (pdev->revision) {
3851 case 0x2:
3852 hp_flags |= MV_HP_ERRATA_60X1C0;
3853 break;
3854 default:
3855 dev_printk(KERN_WARNING, &pdev->dev,
3856 "Applying 60X1C0 workarounds to unknown rev\n");
3857 hp_flags |= MV_HP_ERRATA_60X1C0;
3858 break;
3859 }
3860 break;
3861 case chip_soc:
3862 if (soc_is_65n(hpriv))
3863 hpriv->ops = &mv_soc_65n_ops;
3864 else
3865 hpriv->ops = &mv_soc_ops;
3866 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3867 MV_HP_ERRATA_60X1C0;
3868 break;
3869
3870 default:
3871 dev_printk(KERN_ERR, host->dev,
3872 "BUG: invalid board index %u\n", board_idx);
3873 return 1;
3874 }
3875
3876 hpriv->hp_flags = hp_flags;
3877 if (hp_flags & MV_HP_PCIE) {
3878 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3879 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3880 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3881 } else {
3882 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3883 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3884 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3885 }
3886
3887 return 0;
3888}
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900static int mv_init_host(struct ata_host *host)
3901{
3902 int rc = 0, n_hc, port, hc;
3903 struct mv_host_priv *hpriv = host->private_data;
3904 void __iomem *mmio = hpriv->base;
3905
3906 rc = mv_chip_id(host, hpriv->board_idx);
3907 if (rc)
3908 goto done;
3909
3910 if (IS_SOC(hpriv)) {
3911 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3912 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3913 } else {
3914 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3915 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3916 }
3917
3918
3919 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3920
3921
3922 mv_set_main_irq_mask(host, ~0, 0);
3923
3924 n_hc = mv_get_hc_count(host->ports[0]->flags);
3925
3926 for (port = 0; port < host->n_ports; port++)
3927 if (hpriv->ops->read_preamp)
3928 hpriv->ops->read_preamp(hpriv, port, mmio);
3929
3930 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3931 if (rc)
3932 goto done;
3933
3934 hpriv->ops->reset_flash(hpriv, mmio);
3935 hpriv->ops->reset_bus(host, mmio);
3936 hpriv->ops->enable_leds(hpriv, mmio);
3937
3938 for (port = 0; port < host->n_ports; port++) {
3939 struct ata_port *ap = host->ports[port];
3940 void __iomem *port_mmio = mv_port_base(mmio, port);
3941
3942 mv_port_init(&ap->ioaddr, port_mmio);
3943 }
3944
3945 for (hc = 0; hc < n_hc; hc++) {
3946 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3947
3948 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3949 "(before clear)=0x%08x\n", hc,
3950 readl(hc_mmio + HC_CFG),
3951 readl(hc_mmio + HC_IRQ_CAUSE));
3952
3953
3954 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3955 }
3956
3957 if (!IS_SOC(hpriv)) {
3958
3959 writelfl(0, mmio + hpriv->irq_cause_offset);
3960
3961
3962 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3963 }
3964
3965
3966
3967
3968
3969 mv_set_main_irq_mask(host, 0, PCI_ERR);
3970 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3971 irq_coalescing_usecs);
3972done:
3973 return rc;
3974}
3975
3976static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3977{
3978 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3979 MV_CRQB_Q_SZ, 0);
3980 if (!hpriv->crqb_pool)
3981 return -ENOMEM;
3982
3983 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3984 MV_CRPB_Q_SZ, 0);
3985 if (!hpriv->crpb_pool)
3986 return -ENOMEM;
3987
3988 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3989 MV_SG_TBL_SZ, 0);
3990 if (!hpriv->sg_tbl_pool)
3991 return -ENOMEM;
3992
3993 return 0;
3994}
3995
3996static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3997 struct mbus_dram_target_info *dram)
3998{
3999 int i;
4000
4001 for (i = 0; i < 4; i++) {
4002 writel(0, hpriv->base + WINDOW_CTRL(i));
4003 writel(0, hpriv->base + WINDOW_BASE(i));
4004 }
4005
4006 for (i = 0; i < dram->num_cs; i++) {
4007 struct mbus_dram_window *cs = dram->cs + i;
4008
4009 writel(((cs->size - 1) & 0xffff0000) |
4010 (cs->mbus_attr << 8) |
4011 (dram->mbus_dram_target_id << 4) | 1,
4012 hpriv->base + WINDOW_CTRL(i));
4013 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4014 }
4015}
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025static int mv_platform_probe(struct platform_device *pdev)
4026{
4027 static int printed_version;
4028 const struct mv_sata_platform_data *mv_platform_data;
4029 const struct ata_port_info *ppi[] =
4030 { &mv_port_info[chip_soc], NULL };
4031 struct ata_host *host;
4032 struct mv_host_priv *hpriv;
4033 struct resource *res;
4034 int n_ports, rc;
4035
4036 if (!printed_version++)
4037 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4038
4039
4040
4041
4042 if (unlikely(pdev->num_resources != 2)) {
4043 dev_err(&pdev->dev, "invalid number of resources\n");
4044 return -EINVAL;
4045 }
4046
4047
4048
4049
4050 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4051 if (res == NULL)
4052 return -EINVAL;
4053
4054
4055 mv_platform_data = pdev->dev.platform_data;
4056 n_ports = mv_platform_data->n_ports;
4057
4058 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4059 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4060
4061 if (!host || !hpriv)
4062 return -ENOMEM;
4063 host->private_data = hpriv;
4064 hpriv->n_ports = n_ports;
4065 hpriv->board_idx = chip_soc;
4066
4067 host->iomap = NULL;
4068 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4069 resource_size(res));
4070 hpriv->base -= SATAHC0_REG_BASE;
4071
4072#if defined(CONFIG_HAVE_CLK)
4073 hpriv->clk = clk_get(&pdev->dev, NULL);
4074 if (IS_ERR(hpriv->clk))
4075 dev_notice(&pdev->dev, "cannot get clkdev\n");
4076 else
4077 clk_enable(hpriv->clk);
4078#endif
4079
4080
4081
4082
4083 if (mv_platform_data->dram != NULL)
4084 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4085
4086 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4087 if (rc)
4088 goto err;
4089
4090
4091 rc = mv_init_host(host);
4092 if (rc)
4093 goto err;
4094
4095 dev_printk(KERN_INFO, &pdev->dev,
4096 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
4097 host->n_ports);
4098
4099 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4100 IRQF_SHARED, &mv6_sht);
4101err:
4102#if defined(CONFIG_HAVE_CLK)
4103 if (!IS_ERR(hpriv->clk)) {
4104 clk_disable(hpriv->clk);
4105 clk_put(hpriv->clk);
4106 }
4107#endif
4108
4109 return rc;
4110}
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120static int __devexit mv_platform_remove(struct platform_device *pdev)
4121{
4122 struct device *dev = &pdev->dev;
4123 struct ata_host *host = dev_get_drvdata(dev);
4124#if defined(CONFIG_HAVE_CLK)
4125 struct mv_host_priv *hpriv = host->private_data;
4126#endif
4127 ata_host_detach(host);
4128
4129#if defined(CONFIG_HAVE_CLK)
4130 if (!IS_ERR(hpriv->clk)) {
4131 clk_disable(hpriv->clk);
4132 clk_put(hpriv->clk);
4133 }
4134#endif
4135 return 0;
4136}
4137
4138#ifdef CONFIG_PM
4139static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4140{
4141 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4142 if (host)
4143 return ata_host_suspend(host, state);
4144 else
4145 return 0;
4146}
4147
4148static int mv_platform_resume(struct platform_device *pdev)
4149{
4150 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4151 int ret;
4152
4153 if (host) {
4154 struct mv_host_priv *hpriv = host->private_data;
4155 const struct mv_sata_platform_data *mv_platform_data = \
4156 pdev->dev.platform_data;
4157
4158
4159
4160 if (mv_platform_data->dram != NULL)
4161 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4162
4163
4164 ret = mv_init_host(host);
4165 if (ret) {
4166 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4167 return ret;
4168 }
4169 ata_host_resume(host);
4170 }
4171
4172 return 0;
4173}
4174#else
4175#define mv_platform_suspend NULL
4176#define mv_platform_resume NULL
4177#endif
4178
4179static struct platform_driver mv_platform_driver = {
4180 .probe = mv_platform_probe,
4181 .remove = __devexit_p(mv_platform_remove),
4182 .suspend = mv_platform_suspend,
4183 .resume = mv_platform_resume,
4184 .driver = {
4185 .name = DRV_NAME,
4186 .owner = THIS_MODULE,
4187 },
4188};
4189
4190
4191#ifdef CONFIG_PCI
4192static int mv_pci_init_one(struct pci_dev *pdev,
4193 const struct pci_device_id *ent);
4194#ifdef CONFIG_PM
4195static int mv_pci_device_resume(struct pci_dev *pdev);
4196#endif
4197
4198
4199static struct pci_driver mv_pci_driver = {
4200 .name = DRV_NAME,
4201 .id_table = mv_pci_tbl,
4202 .probe = mv_pci_init_one,
4203 .remove = ata_pci_remove_one,
4204#ifdef CONFIG_PM
4205 .suspend = ata_pci_device_suspend,
4206 .resume = mv_pci_device_resume,
4207#endif
4208
4209};
4210
4211
4212static int pci_go_64(struct pci_dev *pdev)
4213{
4214 int rc;
4215
4216 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4217 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4218 if (rc) {
4219 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4220 if (rc) {
4221 dev_printk(KERN_ERR, &pdev->dev,
4222 "64-bit DMA enable failed\n");
4223 return rc;
4224 }
4225 }
4226 } else {
4227 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4228 if (rc) {
4229 dev_printk(KERN_ERR, &pdev->dev,
4230 "32-bit DMA enable failed\n");
4231 return rc;
4232 }
4233 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4234 if (rc) {
4235 dev_printk(KERN_ERR, &pdev->dev,
4236 "32-bit consistent DMA enable failed\n");
4237 return rc;
4238 }
4239 }
4240
4241 return rc;
4242}
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253static void mv_print_info(struct ata_host *host)
4254{
4255 struct pci_dev *pdev = to_pci_dev(host->dev);
4256 struct mv_host_priv *hpriv = host->private_data;
4257 u8 scc;
4258 const char *scc_s, *gen;
4259
4260
4261
4262
4263 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4264 if (scc == 0)
4265 scc_s = "SCSI";
4266 else if (scc == 0x01)
4267 scc_s = "RAID";
4268 else
4269 scc_s = "?";
4270
4271 if (IS_GEN_I(hpriv))
4272 gen = "I";
4273 else if (IS_GEN_II(hpriv))
4274 gen = "II";
4275 else if (IS_GEN_IIE(hpriv))
4276 gen = "IIE";
4277 else
4278 gen = "?";
4279
4280 dev_printk(KERN_INFO, &pdev->dev,
4281 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4282 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4283 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4284}
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294static int mv_pci_init_one(struct pci_dev *pdev,
4295 const struct pci_device_id *ent)
4296{
4297 static int printed_version;
4298 unsigned int board_idx = (unsigned int)ent->driver_data;
4299 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4300 struct ata_host *host;
4301 struct mv_host_priv *hpriv;
4302 int n_ports, port, rc;
4303
4304 if (!printed_version++)
4305 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4306
4307
4308 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4309
4310 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4311 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4312 if (!host || !hpriv)
4313 return -ENOMEM;
4314 host->private_data = hpriv;
4315 hpriv->n_ports = n_ports;
4316 hpriv->board_idx = board_idx;
4317
4318
4319 rc = pcim_enable_device(pdev);
4320 if (rc)
4321 return rc;
4322
4323 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4324 if (rc == -EBUSY)
4325 pcim_pin_device(pdev);
4326 if (rc)
4327 return rc;
4328 host->iomap = pcim_iomap_table(pdev);
4329 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4330
4331 rc = pci_go_64(pdev);
4332 if (rc)
4333 return rc;
4334
4335 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4336 if (rc)
4337 return rc;
4338
4339 for (port = 0; port < host->n_ports; port++) {
4340 struct ata_port *ap = host->ports[port];
4341 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4342 unsigned int offset = port_mmio - hpriv->base;
4343
4344 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4345 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4346 }
4347
4348
4349 rc = mv_init_host(host);
4350 if (rc)
4351 return rc;
4352
4353
4354 if (msi && pci_enable_msi(pdev) == 0)
4355 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4356
4357 mv_dump_pci_cfg(pdev, 0x68);
4358 mv_print_info(host);
4359
4360 pci_set_master(pdev);
4361 pci_try_set_mwi(pdev);
4362 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4363 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4364}
4365
4366#ifdef CONFIG_PM
4367static int mv_pci_device_resume(struct pci_dev *pdev)
4368{
4369 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4370 int rc;
4371
4372 rc = ata_pci_device_do_resume(pdev);
4373 if (rc)
4374 return rc;
4375
4376
4377 rc = mv_init_host(host);
4378 if (rc)
4379 return rc;
4380
4381 ata_host_resume(host);
4382
4383 return 0;
4384}
4385#endif
4386#endif
4387
4388static int mv_platform_probe(struct platform_device *pdev);
4389static int __devexit mv_platform_remove(struct platform_device *pdev);
4390
4391static int __init mv_init(void)
4392{
4393 int rc = -ENODEV;
4394#ifdef CONFIG_PCI
4395 rc = pci_register_driver(&mv_pci_driver);
4396 if (rc < 0)
4397 return rc;
4398#endif
4399 rc = platform_driver_register(&mv_platform_driver);
4400
4401#ifdef CONFIG_PCI
4402 if (rc < 0)
4403 pci_unregister_driver(&mv_pci_driver);
4404#endif
4405 return rc;
4406}
4407
4408static void __exit mv_exit(void)
4409{
4410#ifdef CONFIG_PCI
4411 pci_unregister_driver(&mv_pci_driver);
4412#endif
4413 platform_driver_unregister(&mv_platform_driver);
4414}
4415
4416MODULE_AUTHOR("Brett Russ");
4417MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4418MODULE_LICENSE("GPL");
4419MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4420MODULE_VERSION(DRV_VERSION);
4421MODULE_ALIAS("platform:" DRV_NAME);
4422
4423module_init(mv_init);
4424module_exit(mv_exit);
4425