1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
59#include <linux/dmapool.h>
60#include <linux/dma-mapping.h>
61#include <linux/device.h>
62#include <linux/clk.h>
63#include <linux/platform_device.h>
64#include <linux/ata_platform.h>
65#include <linux/mbus.h>
66#include <linux/bitops.h>
67#include <linux/gfp.h>
68#include <scsi/scsi_host.h>
69#include <scsi/scsi_cmnd.h>
70#include <scsi/scsi_device.h>
71#include <linux/libata.h>
72
73#define DRV_NAME "sata_mv"
74#define DRV_VERSION "1.28"
75
76
77
78
79
80static int msi;
81#ifdef CONFIG_PCI
82module_param(msi, int, S_IRUGO);
83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
84#endif
85
86static int irq_coalescing_io_count;
87module_param(irq_coalescing_io_count, int, S_IRUGO);
88MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)");
90
91static int irq_coalescing_usecs;
92module_param(irq_coalescing_usecs, int, S_IRUGO);
93MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs");
95
96enum {
97
98 MV_PRIMARY_BAR = 0,
99 MV_IO_BAR = 2,
100 MV_MISC_BAR = 3,
101
102 MV_MAJOR_REG_AREA_SZ = 0x10000,
103 MV_MINOR_REG_AREA_SZ = 0x2000,
104
105
106 COAL_CLOCKS_PER_USEC = 150,
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1),
108 MAX_COAL_IO_COUNT = 255,
109
110 MV_PCI_REG_BASE = 0,
111
112
113
114
115
116
117
118
119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
121 ALL_PORTS_COAL_IRQ = (1 << 4),
122
123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
125
126
127
128
129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
131
132 SATAHC0_REG_BASE = 0x20000,
133 FLASH_CTL = 0x1046c,
134 GPIO_PORT_CTL = 0x104f0,
135 RESET_CFG = 0x180d8,
136
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ,
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
141
142 MV_MAX_Q_DEPTH = 32,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
144
145
146
147
148
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
151 MV_MAX_SG_CT = 256,
152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
153
154
155 MV_PORT_HC_SHIFT = 2,
156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT),
157
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1),
159
160
161 MV_FLAG_DUAL_HC = (1 << 30),
162
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
164
165 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
166
167 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
168 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
169
170 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
171
172 CRQB_FLAG_READ = (1 << 0),
173 CRQB_TAG_SHIFT = 1,
174 CRQB_IOID_SHIFT = 6,
175 CRQB_PMP_SHIFT = 12,
176 CRQB_HOSTQ_SHIFT = 17,
177 CRQB_CMD_ADDR_SHIFT = 8,
178 CRQB_CMD_CS = (0x2 << 11),
179 CRQB_CMD_LAST = (1 << 15),
180
181 CRPB_FLAG_STATUS_SHIFT = 8,
182 CRPB_IOID_SHIFT_6 = 5,
183 CRPB_IOID_SHIFT_7 = 7,
184
185 EPRD_FLAG_END_OF_TBL = (1 << 31),
186
187
188
189 MV_PCI_COMMAND = 0xc00,
190 MV_PCI_COMMAND_MWRCOM = (1 << 4),
191 MV_PCI_COMMAND_MRDTRIG = (1 << 7),
192
193 PCI_MAIN_CMD_STS = 0xd30,
194 STOP_PCI_MASTER = (1 << 2),
195 PCI_MASTER_EMPTY = (1 << 3),
196 GLOB_SFT_RST = (1 << 4),
197
198 MV_PCI_MODE = 0xd00,
199 MV_PCI_MODE_MASK = 0x30,
200
201 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
202 MV_PCI_DISC_TIMER = 0xd04,
203 MV_PCI_MSI_TRIGGER = 0xc38,
204 MV_PCI_SERR_MASK = 0xc28,
205 MV_PCI_XBAR_TMOUT = 0x1d04,
206 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
207 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
208 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
209 MV_PCI_ERR_COMMAND = 0x1d50,
210
211 PCI_IRQ_CAUSE = 0x1d58,
212 PCI_IRQ_MASK = 0x1d5c,
213 PCI_UNMASK_ALL_IRQS = 0x7fffff,
214
215 PCIE_IRQ_CAUSE = 0x1900,
216 PCIE_IRQ_MASK = 0x1910,
217 PCIE_UNMASK_ALL_IRQS = 0x40a,
218
219
220 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
221 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
222 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
223 SOC_HC_MAIN_IRQ_MASK = 0x20024,
224 ERR_IRQ = (1 << 0),
225 DONE_IRQ = (1 << 1),
226 HC0_IRQ_PEND = 0x1ff,
227 HC_SHIFT = 9,
228 DONE_IRQ_0_3 = 0x000000aa,
229 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT),
230 PCI_ERR = (1 << 18),
231 TRAN_COAL_LO_DONE = (1 << 19),
232 TRAN_COAL_HI_DONE = (1 << 20),
233 PORTS_0_3_COAL_DONE = (1 << 8),
234 PORTS_4_7_COAL_DONE = (1 << 17),
235 ALL_PORTS_COAL_DONE = (1 << 21),
236 GPIO_INT = (1 << 22),
237 SELF_INT = (1 << 23),
238 TWSI_INT = (1 << 24),
239 HC_MAIN_RSVD = (0x7f << 25),
240 HC_MAIN_RSVD_5 = (0x1fff << 19),
241 HC_MAIN_RSVD_SOC = (0x3fffffb << 6),
242
243
244 HC_CFG = 0x00,
245
246 HC_IRQ_CAUSE = 0x14,
247 DMA_IRQ = (1 << 0),
248 HC_COAL_IRQ = (1 << 4),
249 DEV_IRQ = (1 << 8),
250
251
252
253
254
255
256
257
258 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
259 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
260
261 SOC_LED_CTRL = 0x2c,
262 SOC_LED_CTRL_BLINK = (1 << 0),
263 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),
264
265
266
267 SHD_BLK = 0x100,
268 SHD_CTL_AST = 0x20,
269
270
271 SATA_STATUS = 0x300,
272 SATA_ACTIVE = 0x350,
273 FIS_IRQ_CAUSE = 0x364,
274 FIS_IRQ_CAUSE_AN = (1 << 9),
275
276 LTMODE = 0x30c,
277 LTMODE_BIT8 = (1 << 8),
278
279 PHY_MODE2 = 0x330,
280 PHY_MODE3 = 0x310,
281
282 PHY_MODE4 = 0x314,
283 PHY_MODE4_CFG_MASK = 0x00000003,
284 PHY_MODE4_CFG_VALUE = 0x00000001,
285 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa,
286 PHY_MODE4_RSVD_ONES = 0x00000005,
287
288 SATA_IFCTL = 0x344,
289 SATA_TESTCTL = 0x348,
290 SATA_IFSTAT = 0x34c,
291 VENDOR_UNIQUE_FIS = 0x35c,
292
293 FISCFG = 0x360,
294 FISCFG_WAIT_DEV_ERR = (1 << 8),
295 FISCFG_SINGLE_SYNC = (1 << 16),
296
297 PHY_MODE9_GEN2 = 0x398,
298 PHY_MODE9_GEN1 = 0x39c,
299 PHYCFG_OFS = 0x3a0,
300
301 MV5_PHY_MODE = 0x74,
302 MV5_LTMODE = 0x30,
303 MV5_PHY_CTL = 0x0C,
304 SATA_IFCFG = 0x050,
305
306 MV_M2_PREAMP_MASK = 0x7e0,
307
308
309 EDMA_CFG = 0,
310 EDMA_CFG_Q_DEPTH = 0x1f,
311 EDMA_CFG_NCQ = (1 << 5),
312 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14),
313 EDMA_CFG_RD_BRST_EXT = (1 << 11),
314 EDMA_CFG_WR_BUFF_LEN = (1 << 13),
315 EDMA_CFG_EDMA_FBS = (1 << 16),
316 EDMA_CFG_FBS = (1 << 26),
317
318 EDMA_ERR_IRQ_CAUSE = 0x8,
319 EDMA_ERR_IRQ_MASK = 0xc,
320 EDMA_ERR_D_PAR = (1 << 0),
321 EDMA_ERR_PRD_PAR = (1 << 1),
322 EDMA_ERR_DEV = (1 << 2),
323 EDMA_ERR_DEV_DCON = (1 << 3),
324 EDMA_ERR_DEV_CON = (1 << 4),
325 EDMA_ERR_SERR = (1 << 5),
326 EDMA_ERR_SELF_DIS = (1 << 7),
327 EDMA_ERR_SELF_DIS_5 = (1 << 8),
328 EDMA_ERR_BIST_ASYNC = (1 << 8),
329 EDMA_ERR_TRANS_IRQ_7 = (1 << 8),
330 EDMA_ERR_CRQB_PAR = (1 << 9),
331 EDMA_ERR_CRPB_PAR = (1 << 10),
332 EDMA_ERR_INTRL_PAR = (1 << 11),
333 EDMA_ERR_IORDY = (1 << 12),
334
335 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
336 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13),
337 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14),
338 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
339 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16),
340
341 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
342
343 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
344 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21),
345 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22),
346 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23),
347 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24),
348 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25),
349
350 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
351
352 EDMA_ERR_TRANS_PROTO = (1 << 31),
353 EDMA_ERR_OVERRUN_5 = (1 << 5),
354 EDMA_ERR_UNDERRUN_5 = (1 << 6),
355
356 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
357 EDMA_ERR_LNK_CTRL_RX_1 |
358 EDMA_ERR_LNK_CTRL_RX_3 |
359 EDMA_ERR_LNK_CTRL_TX,
360
361 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
362 EDMA_ERR_PRD_PAR |
363 EDMA_ERR_DEV_DCON |
364 EDMA_ERR_DEV_CON |
365 EDMA_ERR_SERR |
366 EDMA_ERR_SELF_DIS |
367 EDMA_ERR_CRQB_PAR |
368 EDMA_ERR_CRPB_PAR |
369 EDMA_ERR_INTRL_PAR |
370 EDMA_ERR_IORDY |
371 EDMA_ERR_LNK_CTRL_RX_2 |
372 EDMA_ERR_LNK_DATA_RX |
373 EDMA_ERR_LNK_DATA_TX |
374 EDMA_ERR_TRANS_PROTO,
375
376 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
377 EDMA_ERR_PRD_PAR |
378 EDMA_ERR_DEV_DCON |
379 EDMA_ERR_DEV_CON |
380 EDMA_ERR_OVERRUN_5 |
381 EDMA_ERR_UNDERRUN_5 |
382 EDMA_ERR_SELF_DIS_5 |
383 EDMA_ERR_CRQB_PAR |
384 EDMA_ERR_CRPB_PAR |
385 EDMA_ERR_INTRL_PAR |
386 EDMA_ERR_IORDY,
387
388 EDMA_REQ_Q_BASE_HI = 0x10,
389 EDMA_REQ_Q_IN_PTR = 0x14,
390
391 EDMA_REQ_Q_OUT_PTR = 0x18,
392 EDMA_REQ_Q_PTR_SHIFT = 5,
393
394 EDMA_RSP_Q_BASE_HI = 0x1c,
395 EDMA_RSP_Q_IN_PTR = 0x20,
396 EDMA_RSP_Q_OUT_PTR = 0x24,
397 EDMA_RSP_Q_PTR_SHIFT = 3,
398
399 EDMA_CMD = 0x28,
400 EDMA_EN = (1 << 0),
401 EDMA_DS = (1 << 1),
402 EDMA_RESET = (1 << 2),
403
404 EDMA_STATUS = 0x30,
405 EDMA_STATUS_CACHE_EMPTY = (1 << 6),
406 EDMA_STATUS_IDLE = (1 << 7),
407
408 EDMA_IORDY_TMOUT = 0x34,
409 EDMA_ARB_CFG = 0x38,
410
411 EDMA_HALTCOND = 0x60,
412 EDMA_UNKNOWN_RSVD = 0x6C,
413
414 BMDMA_CMD = 0x224,
415 BMDMA_STATUS = 0x228,
416 BMDMA_PRD_LOW = 0x22c,
417 BMDMA_PRD_HIGH = 0x230,
418
419
420 MV_HP_FLAG_MSI = (1 << 0),
421 MV_HP_ERRATA_50XXB0 = (1 << 1),
422 MV_HP_ERRATA_50XXB2 = (1 << 2),
423 MV_HP_ERRATA_60X1B2 = (1 << 3),
424 MV_HP_ERRATA_60X1C0 = (1 << 4),
425 MV_HP_GEN_I = (1 << 6),
426 MV_HP_GEN_II = (1 << 7),
427 MV_HP_GEN_IIE = (1 << 8),
428 MV_HP_PCIE = (1 << 9),
429 MV_HP_CUT_THROUGH = (1 << 10),
430 MV_HP_FLAG_SOC = (1 << 11),
431 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),
432
433
434 MV_PP_FLAG_EDMA_EN = (1 << 0),
435 MV_PP_FLAG_NCQ_EN = (1 << 1),
436 MV_PP_FLAG_FBS_EN = (1 << 2),
437 MV_PP_FLAG_DELAYED_EH = (1 << 3),
438 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),
439};
440
441#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
442#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
443#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
444#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
445#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
446
447#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
448#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
449
450enum {
451
452
453
454 MV_DMA_BOUNDARY = 0xffffU,
455
456
457
458
459 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
460
461
462 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
463};
464
465enum chip_type {
466 chip_504x,
467 chip_508x,
468 chip_5080,
469 chip_604x,
470 chip_608x,
471 chip_6042,
472 chip_7042,
473 chip_soc,
474};
475
476
477struct mv_crqb {
478 __le32 sg_addr;
479 __le32 sg_addr_hi;
480 __le16 ctrl_flags;
481 __le16 ata_cmd[11];
482};
483
484struct mv_crqb_iie {
485 __le32 addr;
486 __le32 addr_hi;
487 __le32 flags;
488 __le32 len;
489 __le32 ata_cmd[4];
490};
491
492
493struct mv_crpb {
494 __le16 id;
495 __le16 flags;
496 __le32 tmstmp;
497};
498
499
500struct mv_sg {
501 __le32 addr;
502 __le32 flags_size;
503 __le32 addr_hi;
504 __le32 reserved;
505};
506
507
508
509
510
511
512struct mv_cached_regs {
513 u32 fiscfg;
514 u32 ltmode;
515 u32 haltcond;
516 u32 unknown_rsvd;
517};
518
519struct mv_port_priv {
520 struct mv_crqb *crqb;
521 dma_addr_t crqb_dma;
522 struct mv_crpb *crpb;
523 dma_addr_t crpb_dma;
524 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
525 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
526
527 unsigned int req_idx;
528 unsigned int resp_idx;
529
530 u32 pp_flags;
531 struct mv_cached_regs cached;
532 unsigned int delayed_eh_pmp_map;
533};
534
535struct mv_port_signal {
536 u32 amps;
537 u32 pre;
538};
539
540struct mv_host_priv {
541 u32 hp_flags;
542 unsigned int board_idx;
543 u32 main_irq_mask;
544 struct mv_port_signal signal[8];
545 const struct mv_hw_ops *ops;
546 int n_ports;
547 void __iomem *base;
548 void __iomem *main_irq_cause_addr;
549 void __iomem *main_irq_mask_addr;
550 u32 irq_cause_offset;
551 u32 irq_mask_offset;
552 u32 unmask_all_irqs;
553
554#if defined(CONFIG_HAVE_CLK)
555 struct clk *clk;
556#endif
557
558
559
560
561
562 struct dma_pool *crqb_pool;
563 struct dma_pool *crpb_pool;
564 struct dma_pool *sg_tbl_pool;
565};
566
567struct mv_hw_ops {
568 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
569 unsigned int port);
570 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
571 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
572 void __iomem *mmio);
573 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
574 unsigned int n_hc);
575 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
576 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
577};
578
579static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
580static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
581static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
582static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
583static int mv_port_start(struct ata_port *ap);
584static void mv_port_stop(struct ata_port *ap);
585static int mv_qc_defer(struct ata_queued_cmd *qc);
586static void mv_qc_prep(struct ata_queued_cmd *qc);
587static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
588static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
589static int mv_hardreset(struct ata_link *link, unsigned int *class,
590 unsigned long deadline);
591static void mv_eh_freeze(struct ata_port *ap);
592static void mv_eh_thaw(struct ata_port *ap);
593static void mv6_dev_config(struct ata_device *dev);
594
595static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
596 unsigned int port);
597static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
598static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
599 void __iomem *mmio);
600static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
601 unsigned int n_hc);
602static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
603static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
604
605static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
606 unsigned int port);
607static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
608static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
609 void __iomem *mmio);
610static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
611 unsigned int n_hc);
612static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
613static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
614 void __iomem *mmio);
615static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
616 void __iomem *mmio);
617static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
618 void __iomem *mmio, unsigned int n_hc);
619static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
620 void __iomem *mmio);
621static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
622static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
623 void __iomem *mmio, unsigned int port);
624static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
625static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
626 unsigned int port_no);
627static int mv_stop_edma(struct ata_port *ap);
628static int mv_stop_edma_engine(void __iomem *port_mmio);
629static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
630
631static void mv_pmp_select(struct ata_port *ap, int pmp);
632static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
633 unsigned long deadline);
634static int mv_softreset(struct ata_link *link, unsigned int *class,
635 unsigned long deadline);
636static void mv_pmp_error_handler(struct ata_port *ap);
637static void mv_process_crpb_entries(struct ata_port *ap,
638 struct mv_port_priv *pp);
639
640static void mv_sff_irq_clear(struct ata_port *ap);
641static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
642static void mv_bmdma_setup(struct ata_queued_cmd *qc);
643static void mv_bmdma_start(struct ata_queued_cmd *qc);
644static void mv_bmdma_stop(struct ata_queued_cmd *qc);
645static u8 mv_bmdma_status(struct ata_port *ap);
646static u8 mv_sff_check_status(struct ata_port *ap);
647
648
649
650
651
652static struct scsi_host_template mv5_sht = {
653 ATA_BASE_SHT(DRV_NAME),
654 .sg_tablesize = MV_MAX_SG_CT / 2,
655 .dma_boundary = MV_DMA_BOUNDARY,
656};
657
658static struct scsi_host_template mv6_sht = {
659 ATA_NCQ_SHT(DRV_NAME),
660 .can_queue = MV_MAX_Q_DEPTH - 1,
661 .sg_tablesize = MV_MAX_SG_CT / 2,
662 .dma_boundary = MV_DMA_BOUNDARY,
663};
664
665static struct ata_port_operations mv5_ops = {
666 .inherits = &ata_sff_port_ops,
667
668 .lost_interrupt = ATA_OP_NULL,
669
670 .qc_defer = mv_qc_defer,
671 .qc_prep = mv_qc_prep,
672 .qc_issue = mv_qc_issue,
673
674 .freeze = mv_eh_freeze,
675 .thaw = mv_eh_thaw,
676 .hardreset = mv_hardreset,
677
678 .scr_read = mv5_scr_read,
679 .scr_write = mv5_scr_write,
680
681 .port_start = mv_port_start,
682 .port_stop = mv_port_stop,
683};
684
685static struct ata_port_operations mv6_ops = {
686 .inherits = &ata_bmdma_port_ops,
687
688 .lost_interrupt = ATA_OP_NULL,
689
690 .qc_defer = mv_qc_defer,
691 .qc_prep = mv_qc_prep,
692 .qc_issue = mv_qc_issue,
693
694 .dev_config = mv6_dev_config,
695
696 .freeze = mv_eh_freeze,
697 .thaw = mv_eh_thaw,
698 .hardreset = mv_hardreset,
699 .softreset = mv_softreset,
700 .pmp_hardreset = mv_pmp_hardreset,
701 .pmp_softreset = mv_softreset,
702 .error_handler = mv_pmp_error_handler,
703
704 .scr_read = mv_scr_read,
705 .scr_write = mv_scr_write,
706
707 .sff_check_status = mv_sff_check_status,
708 .sff_irq_clear = mv_sff_irq_clear,
709 .check_atapi_dma = mv_check_atapi_dma,
710 .bmdma_setup = mv_bmdma_setup,
711 .bmdma_start = mv_bmdma_start,
712 .bmdma_stop = mv_bmdma_stop,
713 .bmdma_status = mv_bmdma_status,
714
715 .port_start = mv_port_start,
716 .port_stop = mv_port_stop,
717};
718
719static struct ata_port_operations mv_iie_ops = {
720 .inherits = &mv6_ops,
721 .dev_config = ATA_OP_NULL,
722 .qc_prep = mv_qc_prep_iie,
723};
724
725static const struct ata_port_info mv_port_info[] = {
726 {
727 .flags = MV_GEN_I_FLAGS,
728 .pio_mask = ATA_PIO4,
729 .udma_mask = ATA_UDMA6,
730 .port_ops = &mv5_ops,
731 },
732 {
733 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
734 .pio_mask = ATA_PIO4,
735 .udma_mask = ATA_UDMA6,
736 .port_ops = &mv5_ops,
737 },
738 {
739 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
740 .pio_mask = ATA_PIO4,
741 .udma_mask = ATA_UDMA6,
742 .port_ops = &mv5_ops,
743 },
744 {
745 .flags = MV_GEN_II_FLAGS,
746 .pio_mask = ATA_PIO4,
747 .udma_mask = ATA_UDMA6,
748 .port_ops = &mv6_ops,
749 },
750 {
751 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
752 .pio_mask = ATA_PIO4,
753 .udma_mask = ATA_UDMA6,
754 .port_ops = &mv6_ops,
755 },
756 {
757 .flags = MV_GEN_IIE_FLAGS,
758 .pio_mask = ATA_PIO4,
759 .udma_mask = ATA_UDMA6,
760 .port_ops = &mv_iie_ops,
761 },
762 {
763 .flags = MV_GEN_IIE_FLAGS,
764 .pio_mask = ATA_PIO4,
765 .udma_mask = ATA_UDMA6,
766 .port_ops = &mv_iie_ops,
767 },
768 {
769 .flags = MV_GEN_IIE_FLAGS,
770 .pio_mask = ATA_PIO4,
771 .udma_mask = ATA_UDMA6,
772 .port_ops = &mv_iie_ops,
773 },
774};
775
776static const struct pci_device_id mv_pci_tbl[] = {
777 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
778 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
779 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
780 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
781
782 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
783 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
784 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
785
786 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
787 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
788 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
789 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
790 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
791
792 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
793
794
795 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
796
797
798 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
799
800
801 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
802 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
803
804 { }
805};
806
807static const struct mv_hw_ops mv5xxx_ops = {
808 .phy_errata = mv5_phy_errata,
809 .enable_leds = mv5_enable_leds,
810 .read_preamp = mv5_read_preamp,
811 .reset_hc = mv5_reset_hc,
812 .reset_flash = mv5_reset_flash,
813 .reset_bus = mv5_reset_bus,
814};
815
816static const struct mv_hw_ops mv6xxx_ops = {
817 .phy_errata = mv6_phy_errata,
818 .enable_leds = mv6_enable_leds,
819 .read_preamp = mv6_read_preamp,
820 .reset_hc = mv6_reset_hc,
821 .reset_flash = mv6_reset_flash,
822 .reset_bus = mv_reset_pci_bus,
823};
824
825static const struct mv_hw_ops mv_soc_ops = {
826 .phy_errata = mv6_phy_errata,
827 .enable_leds = mv_soc_enable_leds,
828 .read_preamp = mv_soc_read_preamp,
829 .reset_hc = mv_soc_reset_hc,
830 .reset_flash = mv_soc_reset_flash,
831 .reset_bus = mv_soc_reset_bus,
832};
833
834static const struct mv_hw_ops mv_soc_65n_ops = {
835 .phy_errata = mv_soc_65n_phy_errata,
836 .enable_leds = mv_soc_enable_leds,
837 .reset_hc = mv_soc_reset_hc,
838 .reset_flash = mv_soc_reset_flash,
839 .reset_bus = mv_soc_reset_bus,
840};
841
842
843
844
845
846static inline void writelfl(unsigned long data, void __iomem *addr)
847{
848 writel(data, addr);
849 (void) readl(addr);
850}
851
852static inline unsigned int mv_hc_from_port(unsigned int port)
853{
854 return port >> MV_PORT_HC_SHIFT;
855}
856
857static inline unsigned int mv_hardport_from_port(unsigned int port)
858{
859 return port & MV_PORT_MASK;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
874{ \
875 shift = mv_hc_from_port(port) * HC_SHIFT; \
876 hardport = mv_hardport_from_port(port); \
877 shift += hardport * 2; \
878}
879
880static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
881{
882 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
883}
884
885static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
886 unsigned int port)
887{
888 return mv_hc_base(base, mv_hc_from_port(port));
889}
890
891static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
892{
893 return mv_hc_base_from_port(base, port) +
894 MV_SATAHC_ARBTR_REG_SZ +
895 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
896}
897
898static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
899{
900 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
901 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
902
903 return hc_mmio + ofs;
904}
905
906static inline void __iomem *mv_host_base(struct ata_host *host)
907{
908 struct mv_host_priv *hpriv = host->private_data;
909 return hpriv->base;
910}
911
912static inline void __iomem *mv_ap_base(struct ata_port *ap)
913{
914 return mv_port_base(mv_host_base(ap->host), ap->port_no);
915}
916
917static inline int mv_get_hc_count(unsigned long port_flags)
918{
919 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
920}
921
922
923
924
925
926
927
928
929
930
931
932static void mv_save_cached_regs(struct ata_port *ap)
933{
934 void __iomem *port_mmio = mv_ap_base(ap);
935 struct mv_port_priv *pp = ap->private_data;
936
937 pp->cached.fiscfg = readl(port_mmio + FISCFG);
938 pp->cached.ltmode = readl(port_mmio + LTMODE);
939 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
940 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
941}
942
943
944
945
946
947
948
949
950
951
952static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
953{
954 if (new != *old) {
955 unsigned long laddr;
956 *old = new;
957
958
959
960
961
962
963
964
965
966 laddr = (long)addr & 0xffff;
967 if (laddr >= 0x300 && laddr <= 0x33c) {
968 laddr &= 0x000f;
969 if (laddr == 0x4 || laddr == 0xc) {
970 writelfl(new, addr);
971 return;
972 }
973 }
974 writel(new, addr);
975 }
976}
977
978static void mv_set_edma_ptrs(void __iomem *port_mmio,
979 struct mv_host_priv *hpriv,
980 struct mv_port_priv *pp)
981{
982 u32 index;
983
984
985
986
987 pp->req_idx &= MV_MAX_Q_DEPTH_MASK;
988 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
989
990 WARN_ON(pp->crqb_dma & 0x3ff);
991 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
992 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
993 port_mmio + EDMA_REQ_Q_IN_PTR);
994 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
995
996
997
998
999 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;
1000 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1001
1002 WARN_ON(pp->crpb_dma & 0xff);
1003 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1004 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1005 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1006 port_mmio + EDMA_RSP_Q_OUT_PTR);
1007}
1008
1009static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1010{
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1020 mask &= ~DONE_IRQ_0_3;
1021 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1022 mask &= ~DONE_IRQ_4_7;
1023 writelfl(mask, hpriv->main_irq_mask_addr);
1024}
1025
1026static void mv_set_main_irq_mask(struct ata_host *host,
1027 u32 disable_bits, u32 enable_bits)
1028{
1029 struct mv_host_priv *hpriv = host->private_data;
1030 u32 old_mask, new_mask;
1031
1032 old_mask = hpriv->main_irq_mask;
1033 new_mask = (old_mask & ~disable_bits) | enable_bits;
1034 if (new_mask != old_mask) {
1035 hpriv->main_irq_mask = new_mask;
1036 mv_write_main_irq_mask(new_mask, hpriv);
1037 }
1038}
1039
1040static void mv_enable_port_irqs(struct ata_port *ap,
1041 unsigned int port_bits)
1042{
1043 unsigned int shift, hardport, port = ap->port_no;
1044 u32 disable_bits, enable_bits;
1045
1046 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1047
1048 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1049 enable_bits = port_bits << shift;
1050 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1051}
1052
1053static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1054 void __iomem *port_mmio,
1055 unsigned int port_irqs)
1056{
1057 struct mv_host_priv *hpriv = ap->host->private_data;
1058 int hardport = mv_hardport_from_port(ap->port_no);
1059 void __iomem *hc_mmio = mv_hc_base_from_port(
1060 mv_host_base(ap->host), ap->port_no);
1061 u32 hc_irq_cause;
1062
1063
1064 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1065
1066
1067 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1068 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1069
1070
1071 if (IS_GEN_IIE(hpriv))
1072 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1073
1074 mv_enable_port_irqs(ap, port_irqs);
1075}
1076
1077static void mv_set_irq_coalescing(struct ata_host *host,
1078 unsigned int count, unsigned int usecs)
1079{
1080 struct mv_host_priv *hpriv = host->private_data;
1081 void __iomem *mmio = hpriv->base, *hc_mmio;
1082 u32 coal_enable = 0;
1083 unsigned long flags;
1084 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1085 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1086 ALL_PORTS_COAL_DONE;
1087
1088
1089 if (!usecs || !count) {
1090 clks = count = 0;
1091 } else {
1092
1093 clks = usecs * COAL_CLOCKS_PER_USEC;
1094 if (clks > MAX_COAL_TIME_THRESHOLD)
1095 clks = MAX_COAL_TIME_THRESHOLD;
1096 if (count > MAX_COAL_IO_COUNT)
1097 count = MAX_COAL_IO_COUNT;
1098 }
1099
1100 spin_lock_irqsave(&host->lock, flags);
1101 mv_set_main_irq_mask(host, coal_disable, 0);
1102
1103 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1104
1105
1106
1107
1108 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1109 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1110
1111 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1112 if (count)
1113 coal_enable = ALL_PORTS_COAL_DONE;
1114 clks = count = 0;
1115 }
1116
1117
1118
1119
1120 hc_mmio = mv_hc_base_from_port(mmio, 0);
1121 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1122 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1123 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1124 if (count)
1125 coal_enable |= PORTS_0_3_COAL_DONE;
1126 if (is_dual_hc) {
1127 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1128 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1129 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1130 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1131 if (count)
1132 coal_enable |= PORTS_4_7_COAL_DONE;
1133 }
1134
1135 mv_set_main_irq_mask(host, 0, coal_enable);
1136 spin_unlock_irqrestore(&host->lock, flags);
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1151 struct mv_port_priv *pp, u8 protocol)
1152{
1153 int want_ncq = (protocol == ATA_PROT_NCQ);
1154
1155 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1156 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1157 if (want_ncq != using_ncq)
1158 mv_stop_edma(ap);
1159 }
1160 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1161 struct mv_host_priv *hpriv = ap->host->private_data;
1162
1163 mv_edma_cfg(ap, want_ncq, 1);
1164
1165 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1166 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1167
1168 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1169 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1170 }
1171}
1172
1173static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1174{
1175 void __iomem *port_mmio = mv_ap_base(ap);
1176 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1177 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1178 int i;
1179
1180
1181
1182
1183
1184
1185
1186
1187 for (i = 0; i < timeout; ++i) {
1188 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1189 if ((edma_stat & empty_idle) == empty_idle)
1190 break;
1191 udelay(per_loop);
1192 }
1193
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203static int mv_stop_edma_engine(void __iomem *port_mmio)
1204{
1205 int i;
1206
1207
1208 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1209
1210
1211 for (i = 10000; i > 0; i--) {
1212 u32 reg = readl(port_mmio + EDMA_CMD);
1213 if (!(reg & EDMA_EN))
1214 return 0;
1215 udelay(10);
1216 }
1217 return -EIO;
1218}
1219
1220static int mv_stop_edma(struct ata_port *ap)
1221{
1222 void __iomem *port_mmio = mv_ap_base(ap);
1223 struct mv_port_priv *pp = ap->private_data;
1224 int err = 0;
1225
1226 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1227 return 0;
1228 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1229 mv_wait_for_edma_empty_idle(ap);
1230 if (mv_stop_edma_engine(port_mmio)) {
1231 ata_port_err(ap, "Unable to stop eDMA\n");
1232 err = -EIO;
1233 }
1234 mv_edma_cfg(ap, 0, 0);
1235 return err;
1236}
1237
1238#ifdef ATA_DEBUG
1239static void mv_dump_mem(void __iomem *start, unsigned bytes)
1240{
1241 int b, w;
1242 for (b = 0; b < bytes; ) {
1243 DPRINTK("%p: ", start + b);
1244 for (w = 0; b < bytes && w < 4; w++) {
1245 printk("%08x ", readl(start + b));
1246 b += sizeof(u32);
1247 }
1248 printk("\n");
1249 }
1250}
1251#endif
1252
1253static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1254{
1255#ifdef ATA_DEBUG
1256 int b, w;
1257 u32 dw;
1258 for (b = 0; b < bytes; ) {
1259 DPRINTK("%02x: ", b);
1260 for (w = 0; b < bytes && w < 4; w++) {
1261 (void) pci_read_config_dword(pdev, b, &dw);
1262 printk("%08x ", dw);
1263 b += sizeof(u32);
1264 }
1265 printk("\n");
1266 }
1267#endif
1268}
1269static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1270 struct pci_dev *pdev)
1271{
1272#ifdef ATA_DEBUG
1273 void __iomem *hc_base = mv_hc_base(mmio_base,
1274 port >> MV_PORT_HC_SHIFT);
1275 void __iomem *port_base;
1276 int start_port, num_ports, p, start_hc, num_hcs, hc;
1277
1278 if (0 > port) {
1279 start_hc = start_port = 0;
1280 num_ports = 8;
1281 num_hcs = 2;
1282 } else {
1283 start_hc = port >> MV_PORT_HC_SHIFT;
1284 start_port = port;
1285 num_ports = num_hcs = 1;
1286 }
1287 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1288 num_ports > 1 ? num_ports - 1 : start_port);
1289
1290 if (NULL != pdev) {
1291 DPRINTK("PCI config space regs:\n");
1292 mv_dump_pci_cfg(pdev, 0x68);
1293 }
1294 DPRINTK("PCI regs:\n");
1295 mv_dump_mem(mmio_base+0xc00, 0x3c);
1296 mv_dump_mem(mmio_base+0xd00, 0x34);
1297 mv_dump_mem(mmio_base+0xf00, 0x4);
1298 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1299 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1300 hc_base = mv_hc_base(mmio_base, hc);
1301 DPRINTK("HC regs (HC %i):\n", hc);
1302 mv_dump_mem(hc_base, 0x1c);
1303 }
1304 for (p = start_port; p < start_port + num_ports; p++) {
1305 port_base = mv_port_base(mmio_base, p);
1306 DPRINTK("EDMA regs (port %i):\n", p);
1307 mv_dump_mem(port_base, 0x54);
1308 DPRINTK("SATA regs (port %i):\n", p);
1309 mv_dump_mem(port_base+0x300, 0x60);
1310 }
1311#endif
1312}
1313
1314static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1315{
1316 unsigned int ofs;
1317
1318 switch (sc_reg_in) {
1319 case SCR_STATUS:
1320 case SCR_CONTROL:
1321 case SCR_ERROR:
1322 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1323 break;
1324 case SCR_ACTIVE:
1325 ofs = SATA_ACTIVE;
1326 break;
1327 default:
1328 ofs = 0xffffffffU;
1329 break;
1330 }
1331 return ofs;
1332}
1333
1334static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1335{
1336 unsigned int ofs = mv_scr_offset(sc_reg_in);
1337
1338 if (ofs != 0xffffffffU) {
1339 *val = readl(mv_ap_base(link->ap) + ofs);
1340 return 0;
1341 } else
1342 return -EINVAL;
1343}
1344
1345static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1346{
1347 unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
1349 if (ofs != 0xffffffffU) {
1350 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1351 if (sc_reg_in == SCR_CONTROL) {
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1366 val |= 0xf000;
1367 }
1368 writelfl(val, addr);
1369 return 0;
1370 } else
1371 return -EINVAL;
1372}
1373
1374static void mv6_dev_config(struct ata_device *adev)
1375{
1376
1377
1378
1379
1380
1381
1382 if (adev->flags & ATA_DFLAG_NCQ) {
1383 if (sata_pmp_attached(adev->link->ap)) {
1384 adev->flags &= ~ATA_DFLAG_NCQ;
1385 ata_dev_info(adev,
1386 "NCQ disabled for command-based switching\n");
1387 }
1388 }
1389}
1390
1391static int mv_qc_defer(struct ata_queued_cmd *qc)
1392{
1393 struct ata_link *link = qc->dev->link;
1394 struct ata_port *ap = link->ap;
1395 struct mv_port_priv *pp = ap->private_data;
1396
1397
1398
1399
1400
1401 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1402 return ATA_DEFER_PORT;
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 if (unlikely(ap->excl_link)) {
1413 if (link == ap->excl_link) {
1414 if (ap->nr_active_links)
1415 return ATA_DEFER_PORT;
1416 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1417 return 0;
1418 } else
1419 return ATA_DEFER_PORT;
1420 }
1421
1422
1423
1424
1425 if (ap->nr_active_links == 0)
1426 return 0;
1427
1428
1429
1430
1431
1432
1433
1434 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1435 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1436 if (ata_is_ncq(qc->tf.protocol))
1437 return 0;
1438 else {
1439 ap->excl_link = link;
1440 return ATA_DEFER_PORT;
1441 }
1442 }
1443
1444 return ATA_DEFER_PORT;
1445}
1446
1447static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1448{
1449 struct mv_port_priv *pp = ap->private_data;
1450 void __iomem *port_mmio;
1451
1452 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1453 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1454 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1455
1456 ltmode = *old_ltmode & ~LTMODE_BIT8;
1457 haltcond = *old_haltcond | EDMA_ERR_DEV;
1458
1459 if (want_fbs) {
1460 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1461 ltmode = *old_ltmode | LTMODE_BIT8;
1462 if (want_ncq)
1463 haltcond &= ~EDMA_ERR_DEV;
1464 else
1465 fiscfg |= FISCFG_WAIT_DEV_ERR;
1466 } else {
1467 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1468 }
1469
1470 port_mmio = mv_ap_base(ap);
1471 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1472 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1473 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1474}
1475
1476static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1477{
1478 struct mv_host_priv *hpriv = ap->host->private_data;
1479 u32 old, new;
1480
1481
1482 old = readl(hpriv->base + GPIO_PORT_CTL);
1483 if (want_ncq)
1484 new = old | (1 << 22);
1485 else
1486 new = old & ~(1 << 22);
1487 if (new != old)
1488 writel(new, hpriv->base + GPIO_PORT_CTL);
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1504{
1505 struct mv_port_priv *pp = ap->private_data;
1506 u32 new, *old = &pp->cached.unknown_rsvd;
1507
1508 if (enable_bmdma)
1509 new = *old | 1;
1510 else
1511 new = *old & ~1;
1512 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529static void mv_soc_led_blink_enable(struct ata_port *ap)
1530{
1531 struct ata_host *host = ap->host;
1532 struct mv_host_priv *hpriv = host->private_data;
1533 void __iomem *hc_mmio;
1534 u32 led_ctrl;
1535
1536 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1537 return;
1538 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1539 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1540 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1541 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1542}
1543
1544static void mv_soc_led_blink_disable(struct ata_port *ap)
1545{
1546 struct ata_host *host = ap->host;
1547 struct mv_host_priv *hpriv = host->private_data;
1548 void __iomem *hc_mmio;
1549 u32 led_ctrl;
1550 unsigned int port;
1551
1552 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1553 return;
1554
1555
1556 for (port = 0; port < hpriv->n_ports; port++) {
1557 struct ata_port *this_ap = host->ports[port];
1558 struct mv_port_priv *pp = this_ap->private_data;
1559
1560 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1561 return;
1562 }
1563
1564 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1565 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1566 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1567 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1568}
1569
1570static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1571{
1572 u32 cfg;
1573 struct mv_port_priv *pp = ap->private_data;
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 void __iomem *port_mmio = mv_ap_base(ap);
1576
1577
1578 cfg = EDMA_CFG_Q_DEPTH;
1579 pp->pp_flags &=
1580 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1581
1582 if (IS_GEN_I(hpriv))
1583 cfg |= (1 << 8);
1584
1585 else if (IS_GEN_II(hpriv)) {
1586 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1587 mv_60x1_errata_sata25(ap, want_ncq);
1588
1589 } else if (IS_GEN_IIE(hpriv)) {
1590 int want_fbs = sata_pmp_attached(ap);
1591
1592
1593
1594
1595
1596
1597
1598
1599 want_fbs &= want_ncq;
1600
1601 mv_config_fbs(ap, want_ncq, want_fbs);
1602
1603 if (want_fbs) {
1604 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1605 cfg |= EDMA_CFG_EDMA_FBS;
1606 }
1607
1608 cfg |= (1 << 23);
1609 if (want_edma) {
1610 cfg |= (1 << 22);
1611 if (!IS_SOC(hpriv))
1612 cfg |= (1 << 18);
1613 }
1614 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1615 cfg |= (1 << 17);
1616 mv_bmdma_enable_iie(ap, !want_edma);
1617
1618 if (IS_SOC(hpriv)) {
1619 if (want_ncq)
1620 mv_soc_led_blink_enable(ap);
1621 else
1622 mv_soc_led_blink_disable(ap);
1623 }
1624 }
1625
1626 if (want_ncq) {
1627 cfg |= EDMA_CFG_NCQ;
1628 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1629 }
1630
1631 writelfl(cfg, port_mmio + EDMA_CFG);
1632}
1633
1634static void mv_port_free_dma_mem(struct ata_port *ap)
1635{
1636 struct mv_host_priv *hpriv = ap->host->private_data;
1637 struct mv_port_priv *pp = ap->private_data;
1638 int tag;
1639
1640 if (pp->crqb) {
1641 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1642 pp->crqb = NULL;
1643 }
1644 if (pp->crpb) {
1645 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1646 pp->crpb = NULL;
1647 }
1648
1649
1650
1651
1652 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1653 if (pp->sg_tbl[tag]) {
1654 if (tag == 0 || !IS_GEN_I(hpriv))
1655 dma_pool_free(hpriv->sg_tbl_pool,
1656 pp->sg_tbl[tag],
1657 pp->sg_tbl_dma[tag]);
1658 pp->sg_tbl[tag] = NULL;
1659 }
1660 }
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static int mv_port_start(struct ata_port *ap)
1674{
1675 struct device *dev = ap->host->dev;
1676 struct mv_host_priv *hpriv = ap->host->private_data;
1677 struct mv_port_priv *pp;
1678 unsigned long flags;
1679 int tag;
1680
1681 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1682 if (!pp)
1683 return -ENOMEM;
1684 ap->private_data = pp;
1685
1686 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1687 if (!pp->crqb)
1688 return -ENOMEM;
1689 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1690
1691 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1692 if (!pp->crpb)
1693 goto out_port_free_dma_mem;
1694 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1695
1696
1697 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1698 ap->flags |= ATA_FLAG_AN;
1699
1700
1701
1702
1703 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1704 if (tag == 0 || !IS_GEN_I(hpriv)) {
1705 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1706 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1707 if (!pp->sg_tbl[tag])
1708 goto out_port_free_dma_mem;
1709 } else {
1710 pp->sg_tbl[tag] = pp->sg_tbl[0];
1711 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1712 }
1713 }
1714
1715 spin_lock_irqsave(ap->lock, flags);
1716 mv_save_cached_regs(ap);
1717 mv_edma_cfg(ap, 0, 0);
1718 spin_unlock_irqrestore(ap->lock, flags);
1719
1720 return 0;
1721
1722out_port_free_dma_mem:
1723 mv_port_free_dma_mem(ap);
1724 return -ENOMEM;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736static void mv_port_stop(struct ata_port *ap)
1737{
1738 unsigned long flags;
1739
1740 spin_lock_irqsave(ap->lock, flags);
1741 mv_stop_edma(ap);
1742 mv_enable_port_irqs(ap, 0);
1743 spin_unlock_irqrestore(ap->lock, flags);
1744 mv_port_free_dma_mem(ap);
1745}
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static void mv_fill_sg(struct ata_queued_cmd *qc)
1757{
1758 struct mv_port_priv *pp = qc->ap->private_data;
1759 struct scatterlist *sg;
1760 struct mv_sg *mv_sg, *last_sg = NULL;
1761 unsigned int si;
1762
1763 mv_sg = pp->sg_tbl[qc->tag];
1764 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1765 dma_addr_t addr = sg_dma_address(sg);
1766 u32 sg_len = sg_dma_len(sg);
1767
1768 while (sg_len) {
1769 u32 offset = addr & 0xffff;
1770 u32 len = sg_len;
1771
1772 if (offset + len > 0x10000)
1773 len = 0x10000 - offset;
1774
1775 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1776 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1777 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1778 mv_sg->reserved = 0;
1779
1780 sg_len -= len;
1781 addr += len;
1782
1783 last_sg = mv_sg;
1784 mv_sg++;
1785 }
1786 }
1787
1788 if (likely(last_sg))
1789 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1790 mb();
1791}
1792
1793static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1794{
1795 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1796 (last ? CRQB_CMD_LAST : 0);
1797 *cmdw = cpu_to_le16(tmp);
1798}
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static void mv_sff_irq_clear(struct ata_port *ap)
1809{
1810 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1825{
1826 struct scsi_cmnd *scmd = qc->scsicmd;
1827
1828 if (scmd) {
1829 switch (scmd->cmnd[0]) {
1830 case READ_6:
1831 case READ_10:
1832 case READ_12:
1833 case WRITE_6:
1834 case WRITE_10:
1835 case WRITE_12:
1836 case GPCMD_READ_CD:
1837 case GPCMD_SEND_DVD_STRUCTURE:
1838 case GPCMD_SEND_CUE_SHEET:
1839 return 0;
1840 }
1841 }
1842 return -EOPNOTSUPP;
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1853{
1854 struct ata_port *ap = qc->ap;
1855 void __iomem *port_mmio = mv_ap_base(ap);
1856 struct mv_port_priv *pp = ap->private_data;
1857
1858 mv_fill_sg(qc);
1859
1860
1861 writel(0, port_mmio + BMDMA_CMD);
1862
1863
1864 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1865 port_mmio + BMDMA_PRD_HIGH);
1866 writelfl(pp->sg_tbl_dma[qc->tag],
1867 port_mmio + BMDMA_PRD_LOW);
1868
1869
1870 ap->ops->sff_exec_command(ap, &qc->tf);
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880static void mv_bmdma_start(struct ata_queued_cmd *qc)
1881{
1882 struct ata_port *ap = qc->ap;
1883 void __iomem *port_mmio = mv_ap_base(ap);
1884 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1885 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1886
1887
1888 writelfl(cmd, port_mmio + BMDMA_CMD);
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static void mv_bmdma_stop_ap(struct ata_port *ap)
1901{
1902 void __iomem *port_mmio = mv_ap_base(ap);
1903 u32 cmd;
1904
1905
1906 cmd = readl(port_mmio + BMDMA_CMD);
1907 if (cmd & ATA_DMA_START) {
1908 cmd &= ~ATA_DMA_START;
1909 writelfl(cmd, port_mmio + BMDMA_CMD);
1910
1911
1912 ata_sff_dma_pause(ap);
1913 }
1914}
1915
1916static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1917{
1918 mv_bmdma_stop_ap(qc->ap);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static u8 mv_bmdma_status(struct ata_port *ap)
1931{
1932 void __iomem *port_mmio = mv_ap_base(ap);
1933 u32 reg, status;
1934
1935
1936
1937
1938
1939 reg = readl(port_mmio + BMDMA_STATUS);
1940 if (reg & ATA_DMA_ACTIVE)
1941 status = ATA_DMA_ACTIVE;
1942 else if (reg & ATA_DMA_ERR)
1943 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1944 else {
1945
1946
1947
1948
1949
1950
1951 mv_bmdma_stop_ap(ap);
1952 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1953 status = 0;
1954 else
1955 status = ATA_DMA_INTR;
1956 }
1957 return status;
1958}
1959
1960static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1961{
1962 struct ata_taskfile *tf = &qc->tf;
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1977 if (qc->dev->multi_count > 7) {
1978 switch (tf->command) {
1979 case ATA_CMD_WRITE_MULTI:
1980 tf->command = ATA_CMD_PIO_WRITE;
1981 break;
1982 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1983 tf->flags &= ~ATA_TFLAG_FUA;
1984
1985 case ATA_CMD_WRITE_MULTI_EXT:
1986 tf->command = ATA_CMD_PIO_WRITE_EXT;
1987 break;
1988 }
1989 }
1990 }
1991}
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005static void mv_qc_prep(struct ata_queued_cmd *qc)
2006{
2007 struct ata_port *ap = qc->ap;
2008 struct mv_port_priv *pp = ap->private_data;
2009 __le16 *cw;
2010 struct ata_taskfile *tf = &qc->tf;
2011 u16 flags = 0;
2012 unsigned in_index;
2013
2014 switch (tf->protocol) {
2015 case ATA_PROT_DMA:
2016 if (tf->command == ATA_CMD_DSM)
2017 return;
2018
2019 case ATA_PROT_NCQ:
2020 break;
2021 case ATA_PROT_PIO:
2022 mv_rw_multi_errata_sata24(qc);
2023 return;
2024 default:
2025 return;
2026 }
2027
2028
2029
2030 if (!(tf->flags & ATA_TFLAG_WRITE))
2031 flags |= CRQB_FLAG_READ;
2032 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2033 flags |= qc->tag << CRQB_TAG_SHIFT;
2034 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2035
2036
2037 in_index = pp->req_idx;
2038
2039 pp->crqb[in_index].sg_addr =
2040 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2041 pp->crqb[in_index].sg_addr_hi =
2042 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2043 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2044
2045 cw = &pp->crqb[in_index].ata_cmd[0];
2046
2047
2048
2049
2050
2051
2052
2053
2054 switch (tf->command) {
2055 case ATA_CMD_READ:
2056 case ATA_CMD_READ_EXT:
2057 case ATA_CMD_WRITE:
2058 case ATA_CMD_WRITE_EXT:
2059 case ATA_CMD_WRITE_FUA_EXT:
2060 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2061 break;
2062 case ATA_CMD_FPDMA_READ:
2063 case ATA_CMD_FPDMA_WRITE:
2064 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2065 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2066 break;
2067 default:
2068
2069
2070
2071
2072
2073
2074
2075
2076 BUG_ON(tf->command);
2077 break;
2078 }
2079 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2080 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2081 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2082 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2083 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2084 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2085 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2086 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2087 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);
2088
2089 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2090 return;
2091 mv_fill_sg(qc);
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2107{
2108 struct ata_port *ap = qc->ap;
2109 struct mv_port_priv *pp = ap->private_data;
2110 struct mv_crqb_iie *crqb;
2111 struct ata_taskfile *tf = &qc->tf;
2112 unsigned in_index;
2113 u32 flags = 0;
2114
2115 if ((tf->protocol != ATA_PROT_DMA) &&
2116 (tf->protocol != ATA_PROT_NCQ))
2117 return;
2118 if (tf->command == ATA_CMD_DSM)
2119 return;
2120
2121
2122 if (!(tf->flags & ATA_TFLAG_WRITE))
2123 flags |= CRQB_FLAG_READ;
2124
2125 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2126 flags |= qc->tag << CRQB_TAG_SHIFT;
2127 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2128 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2129
2130
2131 in_index = pp->req_idx;
2132
2133 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2134 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2135 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2136 crqb->flags = cpu_to_le32(flags);
2137
2138 crqb->ata_cmd[0] = cpu_to_le32(
2139 (tf->command << 16) |
2140 (tf->feature << 24)
2141 );
2142 crqb->ata_cmd[1] = cpu_to_le32(
2143 (tf->lbal << 0) |
2144 (tf->lbam << 8) |
2145 (tf->lbah << 16) |
2146 (tf->device << 24)
2147 );
2148 crqb->ata_cmd[2] = cpu_to_le32(
2149 (tf->hob_lbal << 0) |
2150 (tf->hob_lbam << 8) |
2151 (tf->hob_lbah << 16) |
2152 (tf->hob_feature << 24)
2153 );
2154 crqb->ata_cmd[3] = cpu_to_le32(
2155 (tf->nsect << 0) |
2156 (tf->hob_nsect << 8)
2157 );
2158
2159 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2160 return;
2161 mv_fill_sg(qc);
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177static u8 mv_sff_check_status(struct ata_port *ap)
2178{
2179 u8 stat = ioread8(ap->ioaddr.status_addr);
2180 struct mv_port_priv *pp = ap->private_data;
2181
2182 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2183 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2184 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2185 else
2186 stat = ATA_BUSY;
2187 }
2188 return stat;
2189}
2190
2191
2192
2193
2194
2195
2196static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2197{
2198 void __iomem *port_mmio = mv_ap_base(ap);
2199 u32 ifctl, old_ifctl, ifstat;
2200 int i, timeout = 200, final_word = nwords - 1;
2201
2202
2203 old_ifctl = readl(port_mmio + SATA_IFCTL);
2204 ifctl = 0x100 | (old_ifctl & 0xf);
2205 writelfl(ifctl, port_mmio + SATA_IFCTL);
2206
2207
2208 for (i = 0; i < final_word; ++i)
2209 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2210
2211
2212 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2213 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2214
2215
2216
2217
2218
2219 do {
2220 ifstat = readl(port_mmio + SATA_IFSTAT);
2221 } while (!(ifstat & 0x1000) && --timeout);
2222
2223
2224 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2225
2226
2227 if ((ifstat & 0x3000) != 0x1000) {
2228 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2229 __func__, ifstat);
2230 return AC_ERR_OTHER;
2231 }
2232 return 0;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2253{
2254 struct ata_port *ap = qc->ap;
2255 struct mv_port_priv *pp = ap->private_data;
2256 struct ata_link *link = qc->dev->link;
2257 u32 fis[5];
2258 int err = 0;
2259
2260 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2261 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2262 if (err)
2263 return err;
2264
2265 switch (qc->tf.protocol) {
2266 case ATAPI_PROT_PIO:
2267 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2268
2269 case ATAPI_PROT_NODATA:
2270 ap->hsm_task_state = HSM_ST_FIRST;
2271 break;
2272 case ATA_PROT_PIO:
2273 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2274 if (qc->tf.flags & ATA_TFLAG_WRITE)
2275 ap->hsm_task_state = HSM_ST_FIRST;
2276 else
2277 ap->hsm_task_state = HSM_ST;
2278 break;
2279 default:
2280 ap->hsm_task_state = HSM_ST_LAST;
2281 break;
2282 }
2283
2284 if (qc->tf.flags & ATA_TFLAG_POLLING)
2285 ata_sff_queue_pio_task(link, 0);
2286 return 0;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2302{
2303 static int limit_warnings = 10;
2304 struct ata_port *ap = qc->ap;
2305 void __iomem *port_mmio = mv_ap_base(ap);
2306 struct mv_port_priv *pp = ap->private_data;
2307 u32 in_index;
2308 unsigned int port_irqs;
2309
2310 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2311
2312 switch (qc->tf.protocol) {
2313 case ATA_PROT_DMA:
2314 if (qc->tf.command == ATA_CMD_DSM) {
2315 if (!ap->ops->bmdma_setup)
2316 return AC_ERR_OTHER;
2317 break;
2318 }
2319
2320 case ATA_PROT_NCQ:
2321 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2322 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2323 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2324
2325
2326 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2327 port_mmio + EDMA_REQ_Q_IN_PTR);
2328 return 0;
2329
2330 case ATA_PROT_PIO:
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2343 --limit_warnings;
2344 ata_link_warn(qc->dev->link, DRV_NAME
2345 ": attempting PIO w/multiple DRQ: "
2346 "this may fail due to h/w errata\n");
2347 }
2348
2349 case ATA_PROT_NODATA:
2350 case ATAPI_PROT_PIO:
2351 case ATAPI_PROT_NODATA:
2352 if (ap->flags & ATA_FLAG_PIO_POLLING)
2353 qc->tf.flags |= ATA_TFLAG_POLLING;
2354 break;
2355 }
2356
2357 if (qc->tf.flags & ATA_TFLAG_POLLING)
2358 port_irqs = ERR_IRQ;
2359 else
2360 port_irqs = ERR_IRQ | DONE_IRQ;
2361
2362
2363
2364
2365
2366
2367 mv_stop_edma(ap);
2368 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2369 mv_pmp_select(ap, qc->dev->link->pmp);
2370
2371 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2372 struct mv_host_priv *hpriv = ap->host->private_data;
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 if (IS_GEN_II(hpriv))
2385 return mv_qc_issue_fis(qc);
2386 }
2387 return ata_bmdma_qc_issue(qc);
2388}
2389
2390static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2391{
2392 struct mv_port_priv *pp = ap->private_data;
2393 struct ata_queued_cmd *qc;
2394
2395 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2396 return NULL;
2397 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2398 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2399 return qc;
2400 return NULL;
2401}
2402
2403static void mv_pmp_error_handler(struct ata_port *ap)
2404{
2405 unsigned int pmp, pmp_map;
2406 struct mv_port_priv *pp = ap->private_data;
2407
2408 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2409
2410
2411
2412
2413
2414
2415 pmp_map = pp->delayed_eh_pmp_map;
2416 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2417 for (pmp = 0; pmp_map != 0; pmp++) {
2418 unsigned int this_pmp = (1 << pmp);
2419 if (pmp_map & this_pmp) {
2420 struct ata_link *link = &ap->pmp_link[pmp];
2421 pmp_map &= ~this_pmp;
2422 ata_eh_analyze_ncq_error(link);
2423 }
2424 }
2425 ata_port_freeze(ap);
2426 }
2427 sata_pmp_error_handler(ap);
2428}
2429
2430static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2431{
2432 void __iomem *port_mmio = mv_ap_base(ap);
2433
2434 return readl(port_mmio + SATA_TESTCTL) >> 16;
2435}
2436
2437static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2438{
2439 struct ata_eh_info *ehi;
2440 unsigned int pmp;
2441
2442
2443
2444
2445 ehi = &ap->link.eh_info;
2446 for (pmp = 0; pmp_map != 0; pmp++) {
2447 unsigned int this_pmp = (1 << pmp);
2448 if (pmp_map & this_pmp) {
2449 struct ata_link *link = &ap->pmp_link[pmp];
2450
2451 pmp_map &= ~this_pmp;
2452 ehi = &link->eh_info;
2453 ata_ehi_clear_desc(ehi);
2454 ata_ehi_push_desc(ehi, "dev err");
2455 ehi->err_mask |= AC_ERR_DEV;
2456 ehi->action |= ATA_EH_RESET;
2457 ata_link_abort(link);
2458 }
2459 }
2460}
2461
2462static int mv_req_q_empty(struct ata_port *ap)
2463{
2464 void __iomem *port_mmio = mv_ap_base(ap);
2465 u32 in_ptr, out_ptr;
2466
2467 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2468 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2469 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2471 return (in_ptr == out_ptr);
2472}
2473
2474static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2475{
2476 struct mv_port_priv *pp = ap->private_data;
2477 int failed_links;
2478 unsigned int old_map, new_map;
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2489 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2490 pp->delayed_eh_pmp_map = 0;
2491 }
2492 old_map = pp->delayed_eh_pmp_map;
2493 new_map = old_map | mv_get_err_pmp_map(ap);
2494
2495 if (old_map != new_map) {
2496 pp->delayed_eh_pmp_map = new_map;
2497 mv_pmp_eh_prep(ap, new_map & ~old_map);
2498 }
2499 failed_links = hweight16(new_map);
2500
2501 ata_port_info(ap,
2502 "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2503 __func__, pp->delayed_eh_pmp_map,
2504 ap->qc_active, failed_links,
2505 ap->nr_active_links);
2506
2507 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2508 mv_process_crpb_entries(ap, pp);
2509 mv_stop_edma(ap);
2510 mv_eh_freeze(ap);
2511 ata_port_info(ap, "%s: done\n", __func__);
2512 return 1;
2513 }
2514 ata_port_info(ap, "%s: waiting\n", __func__);
2515 return 1;
2516}
2517
2518static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2519{
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531 return 0;
2532}
2533
2534static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2535{
2536 struct mv_port_priv *pp = ap->private_data;
2537
2538 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2539 return 0;
2540 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2541 return 0;
2542
2543 if (!(edma_err_cause & EDMA_ERR_DEV))
2544 return 0;
2545 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2546 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2547 return 0;
2548
2549 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2550
2551
2552
2553
2554
2555 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2556 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2557 __func__, edma_err_cause, pp->pp_flags);
2558 return 0;
2559 }
2560 return mv_handle_fbs_ncq_dev_err(ap);
2561 } else {
2562
2563
2564
2565
2566
2567 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2568 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2569 __func__, edma_err_cause, pp->pp_flags);
2570 return 0;
2571 }
2572 return mv_handle_fbs_non_ncq_dev_err(ap);
2573 }
2574 return 0;
2575}
2576
2577static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2578{
2579 struct ata_eh_info *ehi = &ap->link.eh_info;
2580 char *when = "idle";
2581
2582 ata_ehi_clear_desc(ehi);
2583 if (edma_was_enabled) {
2584 when = "EDMA enabled";
2585 } else {
2586 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2587 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2588 when = "polling";
2589 }
2590 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2591 ehi->err_mask |= AC_ERR_OTHER;
2592 ehi->action |= ATA_EH_RESET;
2593 ata_port_freeze(ap);
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607static void mv_err_intr(struct ata_port *ap)
2608{
2609 void __iomem *port_mmio = mv_ap_base(ap);
2610 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2611 u32 fis_cause = 0;
2612 struct mv_port_priv *pp = ap->private_data;
2613 struct mv_host_priv *hpriv = ap->host->private_data;
2614 unsigned int action = 0, err_mask = 0;
2615 struct ata_eh_info *ehi = &ap->link.eh_info;
2616 struct ata_queued_cmd *qc;
2617 int abort = 0;
2618
2619
2620
2621
2622
2623
2624 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2625 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2626
2627 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2628 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2629 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2630 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2631 }
2632 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2633
2634 if (edma_err_cause & EDMA_ERR_DEV) {
2635
2636
2637
2638
2639 if (mv_handle_dev_err(ap, edma_err_cause))
2640 return;
2641 }
2642
2643 qc = mv_get_active_qc(ap);
2644 ata_ehi_clear_desc(ehi);
2645 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2646 edma_err_cause, pp->pp_flags);
2647
2648 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2649 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2650 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2651 u32 ec = edma_err_cause &
2652 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2653 sata_async_notification(ap);
2654 if (!ec)
2655 return;
2656 ata_ehi_push_desc(ehi, "SDB notify");
2657 }
2658 }
2659
2660
2661
2662 if (edma_err_cause & EDMA_ERR_DEV) {
2663 err_mask |= AC_ERR_DEV;
2664 action |= ATA_EH_RESET;
2665 ata_ehi_push_desc(ehi, "dev error");
2666 }
2667 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2668 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2669 EDMA_ERR_INTRL_PAR)) {
2670 err_mask |= AC_ERR_ATA_BUS;
2671 action |= ATA_EH_RESET;
2672 ata_ehi_push_desc(ehi, "parity error");
2673 }
2674 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2675 ata_ehi_hotplugged(ehi);
2676 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2677 "dev disconnect" : "dev connect");
2678 action |= ATA_EH_RESET;
2679 }
2680
2681
2682
2683
2684
2685 if (IS_GEN_I(hpriv)) {
2686 eh_freeze_mask = EDMA_EH_FREEZE_5;
2687 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2688 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2689 ata_ehi_push_desc(ehi, "EDMA self-disable");
2690 }
2691 } else {
2692 eh_freeze_mask = EDMA_EH_FREEZE;
2693 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2694 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2695 ata_ehi_push_desc(ehi, "EDMA self-disable");
2696 }
2697 if (edma_err_cause & EDMA_ERR_SERR) {
2698 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2699 err_mask |= AC_ERR_ATA_BUS;
2700 action |= ATA_EH_RESET;
2701 }
2702 }
2703
2704 if (!err_mask) {
2705 err_mask = AC_ERR_OTHER;
2706 action |= ATA_EH_RESET;
2707 }
2708
2709 ehi->serror |= serr;
2710 ehi->action |= action;
2711
2712 if (qc)
2713 qc->err_mask |= err_mask;
2714 else
2715 ehi->err_mask |= err_mask;
2716
2717 if (err_mask == AC_ERR_DEV) {
2718
2719
2720
2721
2722
2723 mv_eh_freeze(ap);
2724 abort = 1;
2725 } else if (edma_err_cause & eh_freeze_mask) {
2726
2727
2728
2729 ata_port_freeze(ap);
2730 } else {
2731 abort = 1;
2732 }
2733
2734 if (abort) {
2735 if (qc)
2736 ata_link_abort(qc->dev->link);
2737 else
2738 ata_port_abort(ap);
2739 }
2740}
2741
2742static bool mv_process_crpb_response(struct ata_port *ap,
2743 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2744{
2745 u8 ata_status;
2746 u16 edma_status = le16_to_cpu(response->flags);
2747
2748
2749
2750
2751
2752
2753 if (!ncq_enabled) {
2754 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2755 if (err_cause) {
2756
2757
2758
2759
2760 return false;
2761 }
2762 }
2763 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2764 if (!ac_err_mask(ata_status))
2765 return true;
2766
2767 return false;
2768}
2769
2770static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2771{
2772 void __iomem *port_mmio = mv_ap_base(ap);
2773 struct mv_host_priv *hpriv = ap->host->private_data;
2774 u32 in_index;
2775 bool work_done = false;
2776 u32 done_mask = 0;
2777 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2778
2779
2780 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2781 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2782
2783
2784 while (in_index != pp->resp_idx) {
2785 unsigned int tag;
2786 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2787
2788 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2789
2790 if (IS_GEN_I(hpriv)) {
2791
2792 tag = ap->link.active_tag;
2793 } else {
2794
2795 tag = le16_to_cpu(response->id) & 0x1f;
2796 }
2797 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2798 done_mask |= 1 << tag;
2799 work_done = true;
2800 }
2801
2802 if (work_done) {
2803 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2804
2805
2806 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2807 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2808 port_mmio + EDMA_RSP_Q_OUT_PTR);
2809 }
2810}
2811
2812static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2813{
2814 struct mv_port_priv *pp;
2815 int edma_was_enabled;
2816
2817
2818
2819
2820
2821
2822 pp = ap->private_data;
2823 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2824
2825
2826
2827 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2828 mv_process_crpb_entries(ap, pp);
2829 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2830 mv_handle_fbs_ncq_dev_err(ap);
2831 }
2832
2833
2834
2835 if (unlikely(port_cause & ERR_IRQ)) {
2836 mv_err_intr(ap);
2837 } else if (!edma_was_enabled) {
2838 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2839 if (qc)
2840 ata_bmdma_port_intr(ap, qc);
2841 else
2842 mv_unexpected_intr(ap, edma_was_enabled);
2843 }
2844}
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2855{
2856 struct mv_host_priv *hpriv = host->private_data;
2857 void __iomem *mmio = hpriv->base, *hc_mmio;
2858 unsigned int handled = 0, port;
2859
2860
2861 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2862 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2863
2864 for (port = 0; port < hpriv->n_ports; port++) {
2865 struct ata_port *ap = host->ports[port];
2866 unsigned int p, shift, hardport, port_cause;
2867
2868 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2869
2870
2871
2872
2873 if (hardport == 0) {
2874 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2875 u32 port_mask, ack_irqs;
2876
2877
2878
2879 if (!hc_cause) {
2880 port += MV_PORTS_PER_HC - 1;
2881 continue;
2882 }
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895 ack_irqs = 0;
2896 if (hc_cause & PORTS_0_3_COAL_DONE)
2897 ack_irqs = HC_COAL_IRQ;
2898 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2899 if ((port + p) >= hpriv->n_ports)
2900 break;
2901 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2902 if (hc_cause & port_mask)
2903 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2904 }
2905 hc_mmio = mv_hc_base_from_port(mmio, port);
2906 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2907 handled = 1;
2908 }
2909
2910
2911
2912 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2913 if (port_cause)
2914 mv_port_intr(ap, port_cause);
2915 }
2916 return handled;
2917}
2918
2919static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2920{
2921 struct mv_host_priv *hpriv = host->private_data;
2922 struct ata_port *ap;
2923 struct ata_queued_cmd *qc;
2924 struct ata_eh_info *ehi;
2925 unsigned int i, err_mask, printed = 0;
2926 u32 err_cause;
2927
2928 err_cause = readl(mmio + hpriv->irq_cause_offset);
2929
2930 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2931
2932 DPRINTK("All regs @ PCI error\n");
2933 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2934
2935 writelfl(0, mmio + hpriv->irq_cause_offset);
2936
2937 for (i = 0; i < host->n_ports; i++) {
2938 ap = host->ports[i];
2939 if (!ata_link_offline(&ap->link)) {
2940 ehi = &ap->link.eh_info;
2941 ata_ehi_clear_desc(ehi);
2942 if (!printed++)
2943 ata_ehi_push_desc(ehi,
2944 "PCI err cause 0x%08x", err_cause);
2945 err_mask = AC_ERR_HOST_BUS;
2946 ehi->action = ATA_EH_RESET;
2947 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2948 if (qc)
2949 qc->err_mask |= err_mask;
2950 else
2951 ehi->err_mask |= err_mask;
2952
2953 ata_port_freeze(ap);
2954 }
2955 }
2956 return 1;
2957}
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2974{
2975 struct ata_host *host = dev_instance;
2976 struct mv_host_priv *hpriv = host->private_data;
2977 unsigned int handled = 0;
2978 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2979 u32 main_irq_cause, pending_irqs;
2980
2981 spin_lock(&host->lock);
2982
2983
2984 if (using_msi)
2985 mv_write_main_irq_mask(0, hpriv);
2986
2987 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2988 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2989
2990
2991
2992
2993 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2994 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2995 handled = mv_pci_error(host, hpriv->base);
2996 else
2997 handled = mv_host_intr(host, pending_irqs);
2998 }
2999
3000
3001 if (using_msi)
3002 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3003
3004 spin_unlock(&host->lock);
3005
3006 return IRQ_RETVAL(handled);
3007}
3008
3009static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3010{
3011 unsigned int ofs;
3012
3013 switch (sc_reg_in) {
3014 case SCR_STATUS:
3015 case SCR_ERROR:
3016 case SCR_CONTROL:
3017 ofs = sc_reg_in * sizeof(u32);
3018 break;
3019 default:
3020 ofs = 0xffffffffU;
3021 break;
3022 }
3023 return ofs;
3024}
3025
3026static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3027{
3028 struct mv_host_priv *hpriv = link->ap->host->private_data;
3029 void __iomem *mmio = hpriv->base;
3030 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3031 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3032
3033 if (ofs != 0xffffffffU) {
3034 *val = readl(addr + ofs);
3035 return 0;
3036 } else
3037 return -EINVAL;
3038}
3039
3040static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3041{
3042 struct mv_host_priv *hpriv = link->ap->host->private_data;
3043 void __iomem *mmio = hpriv->base;
3044 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3045 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3046
3047 if (ofs != 0xffffffffU) {
3048 writelfl(val, addr + ofs);
3049 return 0;
3050 } else
3051 return -EINVAL;
3052}
3053
3054static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3055{
3056 struct pci_dev *pdev = to_pci_dev(host->dev);
3057 int early_5080;
3058
3059 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3060
3061 if (!early_5080) {
3062 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3063 tmp |= (1 << 0);
3064 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3065 }
3066
3067 mv_reset_pci_bus(host, mmio);
3068}
3069
3070static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3071{
3072 writel(0x0fcfffff, mmio + FLASH_CTL);
3073}
3074
3075static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3076 void __iomem *mmio)
3077{
3078 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3079 u32 tmp;
3080
3081 tmp = readl(phy_mmio + MV5_PHY_MODE);
3082
3083 hpriv->signal[idx].pre = tmp & 0x1800;
3084 hpriv->signal[idx].amps = tmp & 0xe0;
3085}
3086
3087static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3088{
3089 u32 tmp;
3090
3091 writel(0, mmio + GPIO_PORT_CTL);
3092
3093
3094
3095 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3096 tmp |= ~(1 << 0);
3097 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3098}
3099
3100static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3101 unsigned int port)
3102{
3103 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3104 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3105 u32 tmp;
3106 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3107
3108 if (fix_apm_sq) {
3109 tmp = readl(phy_mmio + MV5_LTMODE);
3110 tmp |= (1 << 19);
3111 writel(tmp, phy_mmio + MV5_LTMODE);
3112
3113 tmp = readl(phy_mmio + MV5_PHY_CTL);
3114 tmp &= ~0x3;
3115 tmp |= 0x1;
3116 writel(tmp, phy_mmio + MV5_PHY_CTL);
3117 }
3118
3119 tmp = readl(phy_mmio + MV5_PHY_MODE);
3120 tmp &= ~mask;
3121 tmp |= hpriv->signal[port].pre;
3122 tmp |= hpriv->signal[port].amps;
3123 writel(tmp, phy_mmio + MV5_PHY_MODE);
3124}
3125
3126
3127#undef ZERO
3128#define ZERO(reg) writel(0, port_mmio + (reg))
3129static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3130 unsigned int port)
3131{
3132 void __iomem *port_mmio = mv_port_base(mmio, port);
3133
3134 mv_reset_channel(hpriv, mmio, port);
3135
3136 ZERO(0x028);
3137 writel(0x11f, port_mmio + EDMA_CFG);
3138 ZERO(0x004);
3139 ZERO(0x008);
3140 ZERO(0x00c);
3141 ZERO(0x010);
3142 ZERO(0x014);
3143 ZERO(0x018);
3144 ZERO(0x01c);
3145 ZERO(0x024);
3146 ZERO(0x020);
3147 ZERO(0x02c);
3148 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3149}
3150#undef ZERO
3151
3152#define ZERO(reg) writel(0, hc_mmio + (reg))
3153static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3154 unsigned int hc)
3155{
3156 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3157 u32 tmp;
3158
3159 ZERO(0x00c);
3160 ZERO(0x010);
3161 ZERO(0x014);
3162 ZERO(0x018);
3163
3164 tmp = readl(hc_mmio + 0x20);
3165 tmp &= 0x1c1c1c1c;
3166 tmp |= 0x03030303;
3167 writel(tmp, hc_mmio + 0x20);
3168}
3169#undef ZERO
3170
3171static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3172 unsigned int n_hc)
3173{
3174 unsigned int hc, port;
3175
3176 for (hc = 0; hc < n_hc; hc++) {
3177 for (port = 0; port < MV_PORTS_PER_HC; port++)
3178 mv5_reset_hc_port(hpriv, mmio,
3179 (hc * MV_PORTS_PER_HC) + port);
3180
3181 mv5_reset_one_hc(hpriv, mmio, hc);
3182 }
3183
3184 return 0;
3185}
3186
3187#undef ZERO
3188#define ZERO(reg) writel(0, mmio + (reg))
3189static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3190{
3191 struct mv_host_priv *hpriv = host->private_data;
3192 u32 tmp;
3193
3194 tmp = readl(mmio + MV_PCI_MODE);
3195 tmp &= 0xff00ffff;
3196 writel(tmp, mmio + MV_PCI_MODE);
3197
3198 ZERO(MV_PCI_DISC_TIMER);
3199 ZERO(MV_PCI_MSI_TRIGGER);
3200 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3201 ZERO(MV_PCI_SERR_MASK);
3202 ZERO(hpriv->irq_cause_offset);
3203 ZERO(hpriv->irq_mask_offset);
3204 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3205 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3206 ZERO(MV_PCI_ERR_ATTRIBUTE);
3207 ZERO(MV_PCI_ERR_COMMAND);
3208}
3209#undef ZERO
3210
3211static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3212{
3213 u32 tmp;
3214
3215 mv5_reset_flash(hpriv, mmio);
3216
3217 tmp = readl(mmio + GPIO_PORT_CTL);
3218 tmp &= 0x3;
3219 tmp |= (1 << 5) | (1 << 6);
3220 writel(tmp, mmio + GPIO_PORT_CTL);
3221}
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3233 unsigned int n_hc)
3234{
3235 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3236 int i, rc = 0;
3237 u32 t;
3238
3239
3240
3241
3242 t = readl(reg);
3243 writel(t | STOP_PCI_MASTER, reg);
3244
3245 for (i = 0; i < 1000; i++) {
3246 udelay(1);
3247 t = readl(reg);
3248 if (PCI_MASTER_EMPTY & t)
3249 break;
3250 }
3251 if (!(PCI_MASTER_EMPTY & t)) {
3252 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3253 rc = 1;
3254 goto done;
3255 }
3256
3257
3258 i = 5;
3259 do {
3260 writel(t | GLOB_SFT_RST, reg);
3261 t = readl(reg);
3262 udelay(1);
3263 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3264
3265 if (!(GLOB_SFT_RST & t)) {
3266 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3267 rc = 1;
3268 goto done;
3269 }
3270
3271
3272 i = 5;
3273 do {
3274 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3275 t = readl(reg);
3276 udelay(1);
3277 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3278
3279 if (GLOB_SFT_RST & t) {
3280 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3281 rc = 1;
3282 }
3283done:
3284 return rc;
3285}
3286
3287static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3288 void __iomem *mmio)
3289{
3290 void __iomem *port_mmio;
3291 u32 tmp;
3292
3293 tmp = readl(mmio + RESET_CFG);
3294 if ((tmp & (1 << 0)) == 0) {
3295 hpriv->signal[idx].amps = 0x7 << 8;
3296 hpriv->signal[idx].pre = 0x1 << 5;
3297 return;
3298 }
3299
3300 port_mmio = mv_port_base(mmio, idx);
3301 tmp = readl(port_mmio + PHY_MODE2);
3302
3303 hpriv->signal[idx].amps = tmp & 0x700;
3304 hpriv->signal[idx].pre = tmp & 0xe0;
3305}
3306
3307static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3308{
3309 writel(0x00000060, mmio + GPIO_PORT_CTL);
3310}
3311
3312static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3313 unsigned int port)
3314{
3315 void __iomem *port_mmio = mv_port_base(mmio, port);
3316
3317 u32 hp_flags = hpriv->hp_flags;
3318 int fix_phy_mode2 =
3319 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3320 int fix_phy_mode4 =
3321 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3322 u32 m2, m3;
3323
3324 if (fix_phy_mode2) {
3325 m2 = readl(port_mmio + PHY_MODE2);
3326 m2 &= ~(1 << 16);
3327 m2 |= (1 << 31);
3328 writel(m2, port_mmio + PHY_MODE2);
3329
3330 udelay(200);
3331
3332 m2 = readl(port_mmio + PHY_MODE2);
3333 m2 &= ~((1 << 16) | (1 << 31));
3334 writel(m2, port_mmio + PHY_MODE2);
3335
3336 udelay(200);
3337 }
3338
3339
3340
3341
3342
3343 m3 = readl(port_mmio + PHY_MODE3);
3344 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3345
3346
3347 if (IS_SOC(hpriv))
3348 m3 &= ~0x1c;
3349
3350 if (fix_phy_mode4) {
3351 u32 m4 = readl(port_mmio + PHY_MODE4);
3352
3353
3354
3355
3356
3357 if (IS_GEN_IIE(hpriv))
3358 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3359 else
3360 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3361 writel(m4, port_mmio + PHY_MODE4);
3362 }
3363
3364
3365
3366
3367
3368
3369 writel(m3, port_mmio + PHY_MODE3);
3370
3371
3372 m2 = readl(port_mmio + PHY_MODE2);
3373
3374 m2 &= ~MV_M2_PREAMP_MASK;
3375 m2 |= hpriv->signal[port].amps;
3376 m2 |= hpriv->signal[port].pre;
3377 m2 &= ~(1 << 16);
3378
3379
3380 if (IS_GEN_IIE(hpriv)) {
3381 m2 &= ~0xC30FF01F;
3382 m2 |= 0x0000900F;
3383 }
3384
3385 writel(m2, port_mmio + PHY_MODE2);
3386}
3387
3388
3389
3390static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3391 void __iomem *mmio)
3392{
3393 return;
3394}
3395
3396static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3397 void __iomem *mmio)
3398{
3399 void __iomem *port_mmio;
3400 u32 tmp;
3401
3402 port_mmio = mv_port_base(mmio, idx);
3403 tmp = readl(port_mmio + PHY_MODE2);
3404
3405 hpriv->signal[idx].amps = tmp & 0x700;
3406 hpriv->signal[idx].pre = tmp & 0xe0;
3407}
3408
3409#undef ZERO
3410#define ZERO(reg) writel(0, port_mmio + (reg))
3411static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3412 void __iomem *mmio, unsigned int port)
3413{
3414 void __iomem *port_mmio = mv_port_base(mmio, port);
3415
3416 mv_reset_channel(hpriv, mmio, port);
3417
3418 ZERO(0x028);
3419 writel(0x101f, port_mmio + EDMA_CFG);
3420 ZERO(0x004);
3421 ZERO(0x008);
3422 ZERO(0x00c);
3423 ZERO(0x010);
3424 ZERO(0x014);
3425 ZERO(0x018);
3426 ZERO(0x01c);
3427 ZERO(0x024);
3428 ZERO(0x020);
3429 ZERO(0x02c);
3430 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3431}
3432
3433#undef ZERO
3434
3435#define ZERO(reg) writel(0, hc_mmio + (reg))
3436static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3437 void __iomem *mmio)
3438{
3439 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3440
3441 ZERO(0x00c);
3442 ZERO(0x010);
3443 ZERO(0x014);
3444
3445}
3446
3447#undef ZERO
3448
3449static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3450 void __iomem *mmio, unsigned int n_hc)
3451{
3452 unsigned int port;
3453
3454 for (port = 0; port < hpriv->n_ports; port++)
3455 mv_soc_reset_hc_port(hpriv, mmio, port);
3456
3457 mv_soc_reset_one_hc(hpriv, mmio);
3458
3459 return 0;
3460}
3461
3462static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3463 void __iomem *mmio)
3464{
3465 return;
3466}
3467
3468static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3469{
3470 return;
3471}
3472
3473static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3474 void __iomem *mmio, unsigned int port)
3475{
3476 void __iomem *port_mmio = mv_port_base(mmio, port);
3477 u32 reg;
3478
3479 reg = readl(port_mmio + PHY_MODE3);
3480 reg &= ~(0x3 << 27);
3481 reg |= (0x1 << 27);
3482 reg &= ~(0x3 << 29);
3483 reg |= (0x1 << 29);
3484 writel(reg, port_mmio + PHY_MODE3);
3485
3486 reg = readl(port_mmio + PHY_MODE4);
3487 reg &= ~0x1;
3488 reg |= (0x1 << 16);
3489 writel(reg, port_mmio + PHY_MODE4);
3490
3491 reg = readl(port_mmio + PHY_MODE9_GEN2);
3492 reg &= ~0xf;
3493 reg |= 0x8;
3494 reg &= ~(0x1 << 14);
3495 writel(reg, port_mmio + PHY_MODE9_GEN2);
3496
3497 reg = readl(port_mmio + PHY_MODE9_GEN1);
3498 reg &= ~0xf;
3499 reg |= 0x8;
3500 reg &= ~(0x1 << 14);
3501 writel(reg, port_mmio + PHY_MODE9_GEN1);
3502}
3503
3504
3505
3506
3507
3508
3509
3510
3511static bool soc_is_65n(struct mv_host_priv *hpriv)
3512{
3513 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3514
3515 if (readl(port0_mmio + PHYCFG_OFS))
3516 return true;
3517 return false;
3518}
3519
3520static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3521{
3522 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3523
3524 ifcfg = (ifcfg & 0xf7f) | 0x9b1000;
3525 if (want_gen2i)
3526 ifcfg |= (1 << 7);
3527 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3528}
3529
3530static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3531 unsigned int port_no)
3532{
3533 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3534
3535
3536
3537
3538
3539
3540 mv_stop_edma_engine(port_mmio);
3541 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3542
3543 if (!IS_GEN_I(hpriv)) {
3544
3545 mv_setup_ifcfg(port_mmio, 1);
3546 }
3547
3548
3549
3550
3551
3552 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3553 udelay(25);
3554 writelfl(0, port_mmio + EDMA_CMD);
3555
3556 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3557
3558 if (IS_GEN_I(hpriv))
3559 mdelay(1);
3560}
3561
3562static void mv_pmp_select(struct ata_port *ap, int pmp)
3563{
3564 if (sata_pmp_supported(ap)) {
3565 void __iomem *port_mmio = mv_ap_base(ap);
3566 u32 reg = readl(port_mmio + SATA_IFCTL);
3567 int old = reg & 0xf;
3568
3569 if (old != pmp) {
3570 reg = (reg & ~0xf) | pmp;
3571 writelfl(reg, port_mmio + SATA_IFCTL);
3572 }
3573 }
3574}
3575
3576static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3577 unsigned long deadline)
3578{
3579 mv_pmp_select(link->ap, sata_srst_pmp(link));
3580 return sata_std_hardreset(link, class, deadline);
3581}
3582
3583static int mv_softreset(struct ata_link *link, unsigned int *class,
3584 unsigned long deadline)
3585{
3586 mv_pmp_select(link->ap, sata_srst_pmp(link));
3587 return ata_sff_softreset(link, class, deadline);
3588}
3589
3590static int mv_hardreset(struct ata_link *link, unsigned int *class,
3591 unsigned long deadline)
3592{
3593 struct ata_port *ap = link->ap;
3594 struct mv_host_priv *hpriv = ap->host->private_data;
3595 struct mv_port_priv *pp = ap->private_data;
3596 void __iomem *mmio = hpriv->base;
3597 int rc, attempts = 0, extra = 0;
3598 u32 sstatus;
3599 bool online;
3600
3601 mv_reset_channel(hpriv, mmio, ap->port_no);
3602 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3603 pp->pp_flags &=
3604 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3605
3606
3607 do {
3608 const unsigned long *timing =
3609 sata_ehc_deb_timing(&link->eh_context);
3610
3611 rc = sata_link_hardreset(link, timing, deadline + extra,
3612 &online, NULL);
3613 rc = online ? -EAGAIN : rc;
3614 if (rc)
3615 return rc;
3616 sata_scr_read(link, SCR_STATUS, &sstatus);
3617 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3618
3619 mv_setup_ifcfg(mv_ap_base(ap), 0);
3620 if (time_after(jiffies + HZ, deadline))
3621 extra = HZ;
3622 }
3623 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3624 mv_save_cached_regs(ap);
3625 mv_edma_cfg(ap, 0, 0);
3626
3627 return rc;
3628}
3629
3630static void mv_eh_freeze(struct ata_port *ap)
3631{
3632 mv_stop_edma(ap);
3633 mv_enable_port_irqs(ap, 0);
3634}
3635
3636static void mv_eh_thaw(struct ata_port *ap)
3637{
3638 struct mv_host_priv *hpriv = ap->host->private_data;
3639 unsigned int port = ap->port_no;
3640 unsigned int hardport = mv_hardport_from_port(port);
3641 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3642 void __iomem *port_mmio = mv_ap_base(ap);
3643 u32 hc_irq_cause;
3644
3645
3646 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3647
3648
3649 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3650 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3651
3652 mv_enable_port_irqs(ap, ERR_IRQ);
3653}
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3668{
3669 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3670
3671
3672
3673 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3674 port->error_addr =
3675 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3676 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3677 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3678 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3679 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3680 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3681 port->status_addr =
3682 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3683
3684 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3685
3686
3687 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3688 writelfl(readl(serr), serr);
3689 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3690
3691
3692 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3693
3694 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3695 readl(port_mmio + EDMA_CFG),
3696 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3697 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3698}
3699
3700static unsigned int mv_in_pcix_mode(struct ata_host *host)
3701{
3702 struct mv_host_priv *hpriv = host->private_data;
3703 void __iomem *mmio = hpriv->base;
3704 u32 reg;
3705
3706 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3707 return 0;
3708 reg = readl(mmio + MV_PCI_MODE);
3709 if ((reg & MV_PCI_MODE_MASK) == 0)
3710 return 0;
3711 return 1;
3712}
3713
3714static int mv_pci_cut_through_okay(struct ata_host *host)
3715{
3716 struct mv_host_priv *hpriv = host->private_data;
3717 void __iomem *mmio = hpriv->base;
3718 u32 reg;
3719
3720 if (!mv_in_pcix_mode(host)) {
3721 reg = readl(mmio + MV_PCI_COMMAND);
3722 if (reg & MV_PCI_COMMAND_MRDTRIG)
3723 return 0;
3724 }
3725 return 1;
3726}
3727
3728static void mv_60x1b2_errata_pci7(struct ata_host *host)
3729{
3730 struct mv_host_priv *hpriv = host->private_data;
3731 void __iomem *mmio = hpriv->base;
3732
3733
3734 if (mv_in_pcix_mode(host)) {
3735 u32 reg = readl(mmio + MV_PCI_COMMAND);
3736 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3737 }
3738}
3739
3740static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3741{
3742 struct pci_dev *pdev = to_pci_dev(host->dev);
3743 struct mv_host_priv *hpriv = host->private_data;
3744 u32 hp_flags = hpriv->hp_flags;
3745
3746 switch (board_idx) {
3747 case chip_5080:
3748 hpriv->ops = &mv5xxx_ops;
3749 hp_flags |= MV_HP_GEN_I;
3750
3751 switch (pdev->revision) {
3752 case 0x1:
3753 hp_flags |= MV_HP_ERRATA_50XXB0;
3754 break;
3755 case 0x3:
3756 hp_flags |= MV_HP_ERRATA_50XXB2;
3757 break;
3758 default:
3759 dev_warn(&pdev->dev,
3760 "Applying 50XXB2 workarounds to unknown rev\n");
3761 hp_flags |= MV_HP_ERRATA_50XXB2;
3762 break;
3763 }
3764 break;
3765
3766 case chip_504x:
3767 case chip_508x:
3768 hpriv->ops = &mv5xxx_ops;
3769 hp_flags |= MV_HP_GEN_I;
3770
3771 switch (pdev->revision) {
3772 case 0x0:
3773 hp_flags |= MV_HP_ERRATA_50XXB0;
3774 break;
3775 case 0x3:
3776 hp_flags |= MV_HP_ERRATA_50XXB2;
3777 break;
3778 default:
3779 dev_warn(&pdev->dev,
3780 "Applying B2 workarounds to unknown rev\n");
3781 hp_flags |= MV_HP_ERRATA_50XXB2;
3782 break;
3783 }
3784 break;
3785
3786 case chip_604x:
3787 case chip_608x:
3788 hpriv->ops = &mv6xxx_ops;
3789 hp_flags |= MV_HP_GEN_II;
3790
3791 switch (pdev->revision) {
3792 case 0x7:
3793 mv_60x1b2_errata_pci7(host);
3794 hp_flags |= MV_HP_ERRATA_60X1B2;
3795 break;
3796 case 0x9:
3797 hp_flags |= MV_HP_ERRATA_60X1C0;
3798 break;
3799 default:
3800 dev_warn(&pdev->dev,
3801 "Applying B2 workarounds to unknown rev\n");
3802 hp_flags |= MV_HP_ERRATA_60X1B2;
3803 break;
3804 }
3805 break;
3806
3807 case chip_7042:
3808 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3809 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3810 (pdev->device == 0x2300 || pdev->device == 0x2310))
3811 {
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3830 " BIOS CORRUPTS DATA on all attached drives,"
3831 " regardless of if/how they are configured."
3832 " BEWARE!\n");
3833 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3834 " use sectors 8-9 on \"Legacy\" drives,"
3835 " and avoid the final two gigabytes on"
3836 " all RocketRAID BIOS initialized drives.\n");
3837 }
3838
3839 case chip_6042:
3840 hpriv->ops = &mv6xxx_ops;
3841 hp_flags |= MV_HP_GEN_IIE;
3842 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3843 hp_flags |= MV_HP_CUT_THROUGH;
3844
3845 switch (pdev->revision) {
3846 case 0x2:
3847 hp_flags |= MV_HP_ERRATA_60X1C0;
3848 break;
3849 default:
3850 dev_warn(&pdev->dev,
3851 "Applying 60X1C0 workarounds to unknown rev\n");
3852 hp_flags |= MV_HP_ERRATA_60X1C0;
3853 break;
3854 }
3855 break;
3856 case chip_soc:
3857 if (soc_is_65n(hpriv))
3858 hpriv->ops = &mv_soc_65n_ops;
3859 else
3860 hpriv->ops = &mv_soc_ops;
3861 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3862 MV_HP_ERRATA_60X1C0;
3863 break;
3864
3865 default:
3866 dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3867 return 1;
3868 }
3869
3870 hpriv->hp_flags = hp_flags;
3871 if (hp_flags & MV_HP_PCIE) {
3872 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3873 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3874 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3875 } else {
3876 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3877 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3878 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3879 }
3880
3881 return 0;
3882}
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894static int mv_init_host(struct ata_host *host)
3895{
3896 int rc = 0, n_hc, port, hc;
3897 struct mv_host_priv *hpriv = host->private_data;
3898 void __iomem *mmio = hpriv->base;
3899
3900 rc = mv_chip_id(host, hpriv->board_idx);
3901 if (rc)
3902 goto done;
3903
3904 if (IS_SOC(hpriv)) {
3905 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3906 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3907 } else {
3908 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3909 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3910 }
3911
3912
3913 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3914
3915
3916 mv_set_main_irq_mask(host, ~0, 0);
3917
3918 n_hc = mv_get_hc_count(host->ports[0]->flags);
3919
3920 for (port = 0; port < host->n_ports; port++)
3921 if (hpriv->ops->read_preamp)
3922 hpriv->ops->read_preamp(hpriv, port, mmio);
3923
3924 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3925 if (rc)
3926 goto done;
3927
3928 hpriv->ops->reset_flash(hpriv, mmio);
3929 hpriv->ops->reset_bus(host, mmio);
3930 hpriv->ops->enable_leds(hpriv, mmio);
3931
3932 for (port = 0; port < host->n_ports; port++) {
3933 struct ata_port *ap = host->ports[port];
3934 void __iomem *port_mmio = mv_port_base(mmio, port);
3935
3936 mv_port_init(&ap->ioaddr, port_mmio);
3937 }
3938
3939 for (hc = 0; hc < n_hc; hc++) {
3940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3941
3942 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3943 "(before clear)=0x%08x\n", hc,
3944 readl(hc_mmio + HC_CFG),
3945 readl(hc_mmio + HC_IRQ_CAUSE));
3946
3947
3948 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3949 }
3950
3951 if (!IS_SOC(hpriv)) {
3952
3953 writelfl(0, mmio + hpriv->irq_cause_offset);
3954
3955
3956 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3957 }
3958
3959
3960
3961
3962
3963 mv_set_main_irq_mask(host, 0, PCI_ERR);
3964 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3965 irq_coalescing_usecs);
3966done:
3967 return rc;
3968}
3969
3970static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3971{
3972 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3973 MV_CRQB_Q_SZ, 0);
3974 if (!hpriv->crqb_pool)
3975 return -ENOMEM;
3976
3977 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3978 MV_CRPB_Q_SZ, 0);
3979 if (!hpriv->crpb_pool)
3980 return -ENOMEM;
3981
3982 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3983 MV_SG_TBL_SZ, 0);
3984 if (!hpriv->sg_tbl_pool)
3985 return -ENOMEM;
3986
3987 return 0;
3988}
3989
3990static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3991 const struct mbus_dram_target_info *dram)
3992{
3993 int i;
3994
3995 for (i = 0; i < 4; i++) {
3996 writel(0, hpriv->base + WINDOW_CTRL(i));
3997 writel(0, hpriv->base + WINDOW_BASE(i));
3998 }
3999
4000 for (i = 0; i < dram->num_cs; i++) {
4001 const struct mbus_dram_window *cs = dram->cs + i;
4002
4003 writel(((cs->size - 1) & 0xffff0000) |
4004 (cs->mbus_attr << 8) |
4005 (dram->mbus_dram_target_id << 4) | 1,
4006 hpriv->base + WINDOW_CTRL(i));
4007 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4008 }
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019static int mv_platform_probe(struct platform_device *pdev)
4020{
4021 const struct mv_sata_platform_data *mv_platform_data;
4022 const struct mbus_dram_target_info *dram;
4023 const struct ata_port_info *ppi[] =
4024 { &mv_port_info[chip_soc], NULL };
4025 struct ata_host *host;
4026 struct mv_host_priv *hpriv;
4027 struct resource *res;
4028 int n_ports, rc;
4029
4030 ata_print_version_once(&pdev->dev, DRV_VERSION);
4031
4032
4033
4034
4035 if (unlikely(pdev->num_resources != 2)) {
4036 dev_err(&pdev->dev, "invalid number of resources\n");
4037 return -EINVAL;
4038 }
4039
4040
4041
4042
4043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4044 if (res == NULL)
4045 return -EINVAL;
4046
4047
4048 mv_platform_data = pdev->dev.platform_data;
4049 n_ports = mv_platform_data->n_ports;
4050
4051 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4052 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4053
4054 if (!host || !hpriv)
4055 return -ENOMEM;
4056 host->private_data = hpriv;
4057 hpriv->n_ports = n_ports;
4058 hpriv->board_idx = chip_soc;
4059
4060 host->iomap = NULL;
4061 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4062 resource_size(res));
4063 hpriv->base -= SATAHC0_REG_BASE;
4064
4065#if defined(CONFIG_HAVE_CLK)
4066 hpriv->clk = clk_get(&pdev->dev, NULL);
4067 if (IS_ERR(hpriv->clk))
4068 dev_notice(&pdev->dev, "cannot get clkdev\n");
4069 else
4070 clk_enable(hpriv->clk);
4071#endif
4072
4073
4074
4075
4076 dram = mv_mbus_dram_info();
4077 if (dram)
4078 mv_conf_mbus_windows(hpriv, dram);
4079
4080 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4081 if (rc)
4082 goto err;
4083
4084
4085 rc = mv_init_host(host);
4086 if (rc)
4087 goto err;
4088
4089 dev_info(&pdev->dev, "slots %u ports %d\n",
4090 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4091
4092 rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4093 IRQF_SHARED, &mv6_sht);
4094 if (!rc)
4095 return 0;
4096
4097err:
4098#if defined(CONFIG_HAVE_CLK)
4099 if (!IS_ERR(hpriv->clk)) {
4100 clk_disable(hpriv->clk);
4101 clk_put(hpriv->clk);
4102 }
4103#endif
4104
4105 return rc;
4106}
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116static int __devexit mv_platform_remove(struct platform_device *pdev)
4117{
4118 struct ata_host *host = platform_get_drvdata(pdev);
4119#if defined(CONFIG_HAVE_CLK)
4120 struct mv_host_priv *hpriv = host->private_data;
4121#endif
4122 ata_host_detach(host);
4123
4124#if defined(CONFIG_HAVE_CLK)
4125 if (!IS_ERR(hpriv->clk)) {
4126 clk_disable(hpriv->clk);
4127 clk_put(hpriv->clk);
4128 }
4129#endif
4130 return 0;
4131}
4132
4133#ifdef CONFIG_PM
4134static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4135{
4136 struct ata_host *host = platform_get_drvdata(pdev);
4137 if (host)
4138 return ata_host_suspend(host, state);
4139 else
4140 return 0;
4141}
4142
4143static int mv_platform_resume(struct platform_device *pdev)
4144{
4145 struct ata_host *host = platform_get_drvdata(pdev);
4146 const struct mbus_dram_target_info *dram;
4147 int ret;
4148
4149 if (host) {
4150 struct mv_host_priv *hpriv = host->private_data;
4151
4152
4153
4154
4155 dram = mv_mbus_dram_info();
4156 if (dram)
4157 mv_conf_mbus_windows(hpriv, dram);
4158
4159
4160 ret = mv_init_host(host);
4161 if (ret) {
4162 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4163 return ret;
4164 }
4165 ata_host_resume(host);
4166 }
4167
4168 return 0;
4169}
4170#else
4171#define mv_platform_suspend NULL
4172#define mv_platform_resume NULL
4173#endif
4174
4175static struct platform_driver mv_platform_driver = {
4176 .probe = mv_platform_probe,
4177 .remove = __devexit_p(mv_platform_remove),
4178 .suspend = mv_platform_suspend,
4179 .resume = mv_platform_resume,
4180 .driver = {
4181 .name = DRV_NAME,
4182 .owner = THIS_MODULE,
4183 },
4184};
4185
4186
4187#ifdef CONFIG_PCI
4188static int mv_pci_init_one(struct pci_dev *pdev,
4189 const struct pci_device_id *ent);
4190#ifdef CONFIG_PM
4191static int mv_pci_device_resume(struct pci_dev *pdev);
4192#endif
4193
4194
4195static struct pci_driver mv_pci_driver = {
4196 .name = DRV_NAME,
4197 .id_table = mv_pci_tbl,
4198 .probe = mv_pci_init_one,
4199 .remove = ata_pci_remove_one,
4200#ifdef CONFIG_PM
4201 .suspend = ata_pci_device_suspend,
4202 .resume = mv_pci_device_resume,
4203#endif
4204
4205};
4206
4207
4208static int pci_go_64(struct pci_dev *pdev)
4209{
4210 int rc;
4211
4212 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4213 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4214 if (rc) {
4215 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4216 if (rc) {
4217 dev_err(&pdev->dev,
4218 "64-bit DMA enable failed\n");
4219 return rc;
4220 }
4221 }
4222 } else {
4223 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4224 if (rc) {
4225 dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4226 return rc;
4227 }
4228 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4229 if (rc) {
4230 dev_err(&pdev->dev,
4231 "32-bit consistent DMA enable failed\n");
4232 return rc;
4233 }
4234 }
4235
4236 return rc;
4237}
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248static void mv_print_info(struct ata_host *host)
4249{
4250 struct pci_dev *pdev = to_pci_dev(host->dev);
4251 struct mv_host_priv *hpriv = host->private_data;
4252 u8 scc;
4253 const char *scc_s, *gen;
4254
4255
4256
4257
4258 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4259 if (scc == 0)
4260 scc_s = "SCSI";
4261 else if (scc == 0x01)
4262 scc_s = "RAID";
4263 else
4264 scc_s = "?";
4265
4266 if (IS_GEN_I(hpriv))
4267 gen = "I";
4268 else if (IS_GEN_II(hpriv))
4269 gen = "II";
4270 else if (IS_GEN_IIE(hpriv))
4271 gen = "IIE";
4272 else
4273 gen = "?";
4274
4275 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4276 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4277 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4278}
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288static int mv_pci_init_one(struct pci_dev *pdev,
4289 const struct pci_device_id *ent)
4290{
4291 unsigned int board_idx = (unsigned int)ent->driver_data;
4292 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4293 struct ata_host *host;
4294 struct mv_host_priv *hpriv;
4295 int n_ports, port, rc;
4296
4297 ata_print_version_once(&pdev->dev, DRV_VERSION);
4298
4299
4300 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4301
4302 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4303 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4304 if (!host || !hpriv)
4305 return -ENOMEM;
4306 host->private_data = hpriv;
4307 hpriv->n_ports = n_ports;
4308 hpriv->board_idx = board_idx;
4309
4310
4311 rc = pcim_enable_device(pdev);
4312 if (rc)
4313 return rc;
4314
4315 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4316 if (rc == -EBUSY)
4317 pcim_pin_device(pdev);
4318 if (rc)
4319 return rc;
4320 host->iomap = pcim_iomap_table(pdev);
4321 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4322
4323 rc = pci_go_64(pdev);
4324 if (rc)
4325 return rc;
4326
4327 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4328 if (rc)
4329 return rc;
4330
4331 for (port = 0; port < host->n_ports; port++) {
4332 struct ata_port *ap = host->ports[port];
4333 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4334 unsigned int offset = port_mmio - hpriv->base;
4335
4336 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4337 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4338 }
4339
4340
4341 rc = mv_init_host(host);
4342 if (rc)
4343 return rc;
4344
4345
4346 if (msi && pci_enable_msi(pdev) == 0)
4347 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4348
4349 mv_dump_pci_cfg(pdev, 0x68);
4350 mv_print_info(host);
4351
4352 pci_set_master(pdev);
4353 pci_try_set_mwi(pdev);
4354 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4355 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4356}
4357
4358#ifdef CONFIG_PM
4359static int mv_pci_device_resume(struct pci_dev *pdev)
4360{
4361 struct ata_host *host = pci_get_drvdata(pdev);
4362 int rc;
4363
4364 rc = ata_pci_device_do_resume(pdev);
4365 if (rc)
4366 return rc;
4367
4368
4369 rc = mv_init_host(host);
4370 if (rc)
4371 return rc;
4372
4373 ata_host_resume(host);
4374
4375 return 0;
4376}
4377#endif
4378#endif
4379
4380static int mv_platform_probe(struct platform_device *pdev);
4381static int __devexit mv_platform_remove(struct platform_device *pdev);
4382
4383static int __init mv_init(void)
4384{
4385 int rc = -ENODEV;
4386#ifdef CONFIG_PCI
4387 rc = pci_register_driver(&mv_pci_driver);
4388 if (rc < 0)
4389 return rc;
4390#endif
4391 rc = platform_driver_register(&mv_platform_driver);
4392
4393#ifdef CONFIG_PCI
4394 if (rc < 0)
4395 pci_unregister_driver(&mv_pci_driver);
4396#endif
4397 return rc;
4398}
4399
4400static void __exit mv_exit(void)
4401{
4402#ifdef CONFIG_PCI
4403 pci_unregister_driver(&mv_pci_driver);
4404#endif
4405 platform_driver_unregister(&mv_platform_driver);
4406}
4407
4408MODULE_AUTHOR("Brett Russ");
4409MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4410MODULE_LICENSE("GPL");
4411MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4412MODULE_VERSION(DRV_VERSION);
4413MODULE_ALIAS("platform:" DRV_NAME);
4414
4415module_init(mv_init);
4416module_exit(mv_exit);
4417