1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/dmapool.h>
47#include <linux/dma-mapping.h>
48#include <linux/device.h>
49#include <linux/clk.h>
50#include <linux/phy/phy.h>
51#include <linux/platform_device.h>
52#include <linux/ata_platform.h>
53#include <linux/mbus.h>
54#include <linux/bitops.h>
55#include <linux/gfp.h>
56#include <linux/of.h>
57#include <linux/of_irq.h>
58#include <scsi/scsi_host.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_device.h>
61#include <linux/libata.h>
62
63#define DRV_NAME "sata_mv"
64#define DRV_VERSION "1.28"
65
66
67
68
69
70#ifdef CONFIG_PCI
71static int msi;
72module_param(msi, int, S_IRUGO);
73MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
74#endif
75
76static int irq_coalescing_io_count;
77module_param(irq_coalescing_io_count, int, S_IRUGO);
78MODULE_PARM_DESC(irq_coalescing_io_count,
79 "IRQ coalescing I/O count threshold (0..255)");
80
81static int irq_coalescing_usecs;
82module_param(irq_coalescing_usecs, int, S_IRUGO);
83MODULE_PARM_DESC(irq_coalescing_usecs,
84 "IRQ coalescing time threshold in usecs");
85
86enum {
87
88 MV_PRIMARY_BAR = 0,
89 MV_IO_BAR = 2,
90 MV_MISC_BAR = 3,
91
92 MV_MAJOR_REG_AREA_SZ = 0x10000,
93 MV_MINOR_REG_AREA_SZ = 0x2000,
94
95
96 COAL_CLOCKS_PER_USEC = 150,
97 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1),
98 MAX_COAL_IO_COUNT = 255,
99
100 MV_PCI_REG_BASE = 0,
101
102
103
104
105
106
107
108
109 COAL_REG_BASE = 0x18000,
110 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
111 ALL_PORTS_COAL_IRQ = (1 << 4),
112
113 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
114 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
115
116
117
118
119 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
120 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
121
122 SATAHC0_REG_BASE = 0x20000,
123 FLASH_CTL = 0x1046c,
124 GPIO_PORT_CTL = 0x104f0,
125 RESET_CFG = 0x180d8,
126
127 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
128 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
129 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ,
130 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
131
132 MV_MAX_Q_DEPTH = 32,
133 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
134
135
136
137
138
139 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
140 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
141 MV_MAX_SG_CT = 256,
142 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
143
144
145 MV_PORT_HC_SHIFT = 2,
146 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT),
147
148 MV_PORT_MASK = (MV_PORTS_PER_HC - 1),
149
150
151 MV_FLAG_DUAL_HC = (1 << 30),
152
153 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
154
155 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
156
157 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
158 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
159
160 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
161
162 CRQB_FLAG_READ = (1 << 0),
163 CRQB_TAG_SHIFT = 1,
164 CRQB_IOID_SHIFT = 6,
165 CRQB_PMP_SHIFT = 12,
166 CRQB_HOSTQ_SHIFT = 17,
167 CRQB_CMD_ADDR_SHIFT = 8,
168 CRQB_CMD_CS = (0x2 << 11),
169 CRQB_CMD_LAST = (1 << 15),
170
171 CRPB_FLAG_STATUS_SHIFT = 8,
172 CRPB_IOID_SHIFT_6 = 5,
173 CRPB_IOID_SHIFT_7 = 7,
174
175 EPRD_FLAG_END_OF_TBL = (1 << 31),
176
177
178
179 MV_PCI_COMMAND = 0xc00,
180 MV_PCI_COMMAND_MWRCOM = (1 << 4),
181 MV_PCI_COMMAND_MRDTRIG = (1 << 7),
182
183 PCI_MAIN_CMD_STS = 0xd30,
184 STOP_PCI_MASTER = (1 << 2),
185 PCI_MASTER_EMPTY = (1 << 3),
186 GLOB_SFT_RST = (1 << 4),
187
188 MV_PCI_MODE = 0xd00,
189 MV_PCI_MODE_MASK = 0x30,
190
191 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
192 MV_PCI_DISC_TIMER = 0xd04,
193 MV_PCI_MSI_TRIGGER = 0xc38,
194 MV_PCI_SERR_MASK = 0xc28,
195 MV_PCI_XBAR_TMOUT = 0x1d04,
196 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
197 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
198 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
199 MV_PCI_ERR_COMMAND = 0x1d50,
200
201 PCI_IRQ_CAUSE = 0x1d58,
202 PCI_IRQ_MASK = 0x1d5c,
203 PCI_UNMASK_ALL_IRQS = 0x7fffff,
204
205 PCIE_IRQ_CAUSE = 0x1900,
206 PCIE_IRQ_MASK = 0x1910,
207 PCIE_UNMASK_ALL_IRQS = 0x40a,
208
209
210 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
211 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
212 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
213 SOC_HC_MAIN_IRQ_MASK = 0x20024,
214 ERR_IRQ = (1 << 0),
215 DONE_IRQ = (1 << 1),
216 HC0_IRQ_PEND = 0x1ff,
217 HC_SHIFT = 9,
218 DONE_IRQ_0_3 = 0x000000aa,
219 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT),
220 PCI_ERR = (1 << 18),
221 TRAN_COAL_LO_DONE = (1 << 19),
222 TRAN_COAL_HI_DONE = (1 << 20),
223 PORTS_0_3_COAL_DONE = (1 << 8),
224 PORTS_4_7_COAL_DONE = (1 << 17),
225 ALL_PORTS_COAL_DONE = (1 << 21),
226 GPIO_INT = (1 << 22),
227 SELF_INT = (1 << 23),
228 TWSI_INT = (1 << 24),
229 HC_MAIN_RSVD = (0x7f << 25),
230 HC_MAIN_RSVD_5 = (0x1fff << 19),
231 HC_MAIN_RSVD_SOC = (0x3fffffb << 6),
232
233
234 HC_CFG = 0x00,
235
236 HC_IRQ_CAUSE = 0x14,
237 DMA_IRQ = (1 << 0),
238 HC_COAL_IRQ = (1 << 4),
239 DEV_IRQ = (1 << 8),
240
241
242
243
244
245
246
247
248 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
249 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
250
251 SOC_LED_CTRL = 0x2c,
252 SOC_LED_CTRL_BLINK = (1 << 0),
253 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),
254
255
256
257 SHD_BLK = 0x100,
258 SHD_CTL_AST = 0x20,
259
260
261 SATA_STATUS = 0x300,
262 SATA_ACTIVE = 0x350,
263 FIS_IRQ_CAUSE = 0x364,
264 FIS_IRQ_CAUSE_AN = (1 << 9),
265
266 LTMODE = 0x30c,
267 LTMODE_BIT8 = (1 << 8),
268
269 PHY_MODE2 = 0x330,
270 PHY_MODE3 = 0x310,
271
272 PHY_MODE4 = 0x314,
273 PHY_MODE4_CFG_MASK = 0x00000003,
274 PHY_MODE4_CFG_VALUE = 0x00000001,
275 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa,
276 PHY_MODE4_RSVD_ONES = 0x00000005,
277
278 SATA_IFCTL = 0x344,
279 SATA_TESTCTL = 0x348,
280 SATA_IFSTAT = 0x34c,
281 VENDOR_UNIQUE_FIS = 0x35c,
282
283 FISCFG = 0x360,
284 FISCFG_WAIT_DEV_ERR = (1 << 8),
285 FISCFG_SINGLE_SYNC = (1 << 16),
286
287 PHY_MODE9_GEN2 = 0x398,
288 PHY_MODE9_GEN1 = 0x39c,
289 PHYCFG_OFS = 0x3a0,
290
291 MV5_PHY_MODE = 0x74,
292 MV5_LTMODE = 0x30,
293 MV5_PHY_CTL = 0x0C,
294 SATA_IFCFG = 0x050,
295 LP_PHY_CTL = 0x058,
296 LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
297 LP_PHY_CTL_PIN_PU_RX = (1 << 1),
298 LP_PHY_CTL_PIN_PU_TX = (1 << 2),
299 LP_PHY_CTL_GEN_TX_3G = (1 << 5),
300 LP_PHY_CTL_GEN_RX_3G = (1 << 9),
301
302 MV_M2_PREAMP_MASK = 0x7e0,
303
304
305 EDMA_CFG = 0,
306 EDMA_CFG_Q_DEPTH = 0x1f,
307 EDMA_CFG_NCQ = (1 << 5),
308 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14),
309 EDMA_CFG_RD_BRST_EXT = (1 << 11),
310 EDMA_CFG_WR_BUFF_LEN = (1 << 13),
311 EDMA_CFG_EDMA_FBS = (1 << 16),
312 EDMA_CFG_FBS = (1 << 26),
313
314 EDMA_ERR_IRQ_CAUSE = 0x8,
315 EDMA_ERR_IRQ_MASK = 0xc,
316 EDMA_ERR_D_PAR = (1 << 0),
317 EDMA_ERR_PRD_PAR = (1 << 1),
318 EDMA_ERR_DEV = (1 << 2),
319 EDMA_ERR_DEV_DCON = (1 << 3),
320 EDMA_ERR_DEV_CON = (1 << 4),
321 EDMA_ERR_SERR = (1 << 5),
322 EDMA_ERR_SELF_DIS = (1 << 7),
323 EDMA_ERR_SELF_DIS_5 = (1 << 8),
324 EDMA_ERR_BIST_ASYNC = (1 << 8),
325 EDMA_ERR_TRANS_IRQ_7 = (1 << 8),
326 EDMA_ERR_CRQB_PAR = (1 << 9),
327 EDMA_ERR_CRPB_PAR = (1 << 10),
328 EDMA_ERR_INTRL_PAR = (1 << 11),
329 EDMA_ERR_IORDY = (1 << 12),
330
331 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
332 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13),
333 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14),
334 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
335 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16),
336
337 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
338
339 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
340 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21),
341 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22),
342 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23),
343 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24),
344 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25),
345
346 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
347
348 EDMA_ERR_TRANS_PROTO = (1 << 31),
349 EDMA_ERR_OVERRUN_5 = (1 << 5),
350 EDMA_ERR_UNDERRUN_5 = (1 << 6),
351
352 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
353 EDMA_ERR_LNK_CTRL_RX_1 |
354 EDMA_ERR_LNK_CTRL_RX_3 |
355 EDMA_ERR_LNK_CTRL_TX,
356
357 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
358 EDMA_ERR_PRD_PAR |
359 EDMA_ERR_DEV_DCON |
360 EDMA_ERR_DEV_CON |
361 EDMA_ERR_SERR |
362 EDMA_ERR_SELF_DIS |
363 EDMA_ERR_CRQB_PAR |
364 EDMA_ERR_CRPB_PAR |
365 EDMA_ERR_INTRL_PAR |
366 EDMA_ERR_IORDY |
367 EDMA_ERR_LNK_CTRL_RX_2 |
368 EDMA_ERR_LNK_DATA_RX |
369 EDMA_ERR_LNK_DATA_TX |
370 EDMA_ERR_TRANS_PROTO,
371
372 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
373 EDMA_ERR_PRD_PAR |
374 EDMA_ERR_DEV_DCON |
375 EDMA_ERR_DEV_CON |
376 EDMA_ERR_OVERRUN_5 |
377 EDMA_ERR_UNDERRUN_5 |
378 EDMA_ERR_SELF_DIS_5 |
379 EDMA_ERR_CRQB_PAR |
380 EDMA_ERR_CRPB_PAR |
381 EDMA_ERR_INTRL_PAR |
382 EDMA_ERR_IORDY,
383
384 EDMA_REQ_Q_BASE_HI = 0x10,
385 EDMA_REQ_Q_IN_PTR = 0x14,
386
387 EDMA_REQ_Q_OUT_PTR = 0x18,
388 EDMA_REQ_Q_PTR_SHIFT = 5,
389
390 EDMA_RSP_Q_BASE_HI = 0x1c,
391 EDMA_RSP_Q_IN_PTR = 0x20,
392 EDMA_RSP_Q_OUT_PTR = 0x24,
393 EDMA_RSP_Q_PTR_SHIFT = 3,
394
395 EDMA_CMD = 0x28,
396 EDMA_EN = (1 << 0),
397 EDMA_DS = (1 << 1),
398 EDMA_RESET = (1 << 2),
399
400 EDMA_STATUS = 0x30,
401 EDMA_STATUS_CACHE_EMPTY = (1 << 6),
402 EDMA_STATUS_IDLE = (1 << 7),
403
404 EDMA_IORDY_TMOUT = 0x34,
405 EDMA_ARB_CFG = 0x38,
406
407 EDMA_HALTCOND = 0x60,
408 EDMA_UNKNOWN_RSVD = 0x6C,
409
410 BMDMA_CMD = 0x224,
411 BMDMA_STATUS = 0x228,
412 BMDMA_PRD_LOW = 0x22c,
413 BMDMA_PRD_HIGH = 0x230,
414
415
416 MV_HP_FLAG_MSI = (1 << 0),
417 MV_HP_ERRATA_50XXB0 = (1 << 1),
418 MV_HP_ERRATA_50XXB2 = (1 << 2),
419 MV_HP_ERRATA_60X1B2 = (1 << 3),
420 MV_HP_ERRATA_60X1C0 = (1 << 4),
421 MV_HP_GEN_I = (1 << 6),
422 MV_HP_GEN_II = (1 << 7),
423 MV_HP_GEN_IIE = (1 << 8),
424 MV_HP_PCIE = (1 << 9),
425 MV_HP_CUT_THROUGH = (1 << 10),
426 MV_HP_FLAG_SOC = (1 << 11),
427 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),
428 MV_HP_FIX_LP_PHY_CTL = (1 << 13),
429
430
431 MV_PP_FLAG_EDMA_EN = (1 << 0),
432 MV_PP_FLAG_NCQ_EN = (1 << 1),
433 MV_PP_FLAG_FBS_EN = (1 << 2),
434 MV_PP_FLAG_DELAYED_EH = (1 << 3),
435 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),
436};
437
438#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
439#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
440#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
441#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
442#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
443
444#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
445#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
446
447enum {
448
449
450
451 MV_DMA_BOUNDARY = 0xffffU,
452
453
454
455
456 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
457
458
459 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
460};
461
462enum chip_type {
463 chip_504x,
464 chip_508x,
465 chip_5080,
466 chip_604x,
467 chip_608x,
468 chip_6042,
469 chip_7042,
470 chip_soc,
471};
472
473
474struct mv_crqb {
475 __le32 sg_addr;
476 __le32 sg_addr_hi;
477 __le16 ctrl_flags;
478 __le16 ata_cmd[11];
479};
480
481struct mv_crqb_iie {
482 __le32 addr;
483 __le32 addr_hi;
484 __le32 flags;
485 __le32 len;
486 __le32 ata_cmd[4];
487};
488
489
490struct mv_crpb {
491 __le16 id;
492 __le16 flags;
493 __le32 tmstmp;
494};
495
496
497struct mv_sg {
498 __le32 addr;
499 __le32 flags_size;
500 __le32 addr_hi;
501 __le32 reserved;
502};
503
504
505
506
507
508
509struct mv_cached_regs {
510 u32 fiscfg;
511 u32 ltmode;
512 u32 haltcond;
513 u32 unknown_rsvd;
514};
515
516struct mv_port_priv {
517 struct mv_crqb *crqb;
518 dma_addr_t crqb_dma;
519 struct mv_crpb *crpb;
520 dma_addr_t crpb_dma;
521 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
522 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
523
524 unsigned int req_idx;
525 unsigned int resp_idx;
526
527 u32 pp_flags;
528 struct mv_cached_regs cached;
529 unsigned int delayed_eh_pmp_map;
530};
531
532struct mv_port_signal {
533 u32 amps;
534 u32 pre;
535};
536
537struct mv_host_priv {
538 u32 hp_flags;
539 unsigned int board_idx;
540 u32 main_irq_mask;
541 struct mv_port_signal signal[8];
542 const struct mv_hw_ops *ops;
543 int n_ports;
544 void __iomem *base;
545 void __iomem *main_irq_cause_addr;
546 void __iomem *main_irq_mask_addr;
547 u32 irq_cause_offset;
548 u32 irq_mask_offset;
549 u32 unmask_all_irqs;
550
551
552
553
554
555
556
557
558 struct clk *clk;
559 struct clk **port_clks;
560
561
562
563
564
565 struct phy **port_phys;
566
567
568
569
570
571 struct dma_pool *crqb_pool;
572 struct dma_pool *crpb_pool;
573 struct dma_pool *sg_tbl_pool;
574};
575
576struct mv_hw_ops {
577 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
578 unsigned int port);
579 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
580 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
581 void __iomem *mmio);
582 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
583 unsigned int n_hc);
584 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
585 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
586};
587
588static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
589static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
590static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
591static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
592static int mv_port_start(struct ata_port *ap);
593static void mv_port_stop(struct ata_port *ap);
594static int mv_qc_defer(struct ata_queued_cmd *qc);
595static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
596static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
597static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
598static int mv_hardreset(struct ata_link *link, unsigned int *class,
599 unsigned long deadline);
600static void mv_eh_freeze(struct ata_port *ap);
601static void mv_eh_thaw(struct ata_port *ap);
602static void mv6_dev_config(struct ata_device *dev);
603
604static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
605 unsigned int port);
606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
607static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
608 void __iomem *mmio);
609static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
610 unsigned int n_hc);
611static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
612static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
613
614static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
615 unsigned int port);
616static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
617static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
618 void __iomem *mmio);
619static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
620 unsigned int n_hc);
621static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
622static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
623 void __iomem *mmio);
624static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
625 void __iomem *mmio);
626static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
627 void __iomem *mmio, unsigned int n_hc);
628static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
629 void __iomem *mmio);
630static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
631static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
632 void __iomem *mmio, unsigned int port);
633static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
634static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
635 unsigned int port_no);
636static int mv_stop_edma(struct ata_port *ap);
637static int mv_stop_edma_engine(void __iomem *port_mmio);
638static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
639
640static void mv_pmp_select(struct ata_port *ap, int pmp);
641static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
642 unsigned long deadline);
643static int mv_softreset(struct ata_link *link, unsigned int *class,
644 unsigned long deadline);
645static void mv_pmp_error_handler(struct ata_port *ap);
646static void mv_process_crpb_entries(struct ata_port *ap,
647 struct mv_port_priv *pp);
648
649static void mv_sff_irq_clear(struct ata_port *ap);
650static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
651static void mv_bmdma_setup(struct ata_queued_cmd *qc);
652static void mv_bmdma_start(struct ata_queued_cmd *qc);
653static void mv_bmdma_stop(struct ata_queued_cmd *qc);
654static u8 mv_bmdma_status(struct ata_port *ap);
655static u8 mv_sff_check_status(struct ata_port *ap);
656
657
658
659
660
661#ifdef CONFIG_PCI
662static struct scsi_host_template mv5_sht = {
663 ATA_BASE_SHT(DRV_NAME),
664 .sg_tablesize = MV_MAX_SG_CT / 2,
665 .dma_boundary = MV_DMA_BOUNDARY,
666};
667#endif
668static struct scsi_host_template mv6_sht = {
669 ATA_NCQ_SHT(DRV_NAME),
670 .can_queue = MV_MAX_Q_DEPTH - 1,
671 .sg_tablesize = MV_MAX_SG_CT / 2,
672 .dma_boundary = MV_DMA_BOUNDARY,
673};
674
675static struct ata_port_operations mv5_ops = {
676 .inherits = &ata_sff_port_ops,
677
678 .lost_interrupt = ATA_OP_NULL,
679
680 .qc_defer = mv_qc_defer,
681 .qc_prep = mv_qc_prep,
682 .qc_issue = mv_qc_issue,
683
684 .freeze = mv_eh_freeze,
685 .thaw = mv_eh_thaw,
686 .hardreset = mv_hardreset,
687
688 .scr_read = mv5_scr_read,
689 .scr_write = mv5_scr_write,
690
691 .port_start = mv_port_start,
692 .port_stop = mv_port_stop,
693};
694
695static struct ata_port_operations mv6_ops = {
696 .inherits = &ata_bmdma_port_ops,
697
698 .lost_interrupt = ATA_OP_NULL,
699
700 .qc_defer = mv_qc_defer,
701 .qc_prep = mv_qc_prep,
702 .qc_issue = mv_qc_issue,
703
704 .dev_config = mv6_dev_config,
705
706 .freeze = mv_eh_freeze,
707 .thaw = mv_eh_thaw,
708 .hardreset = mv_hardreset,
709 .softreset = mv_softreset,
710 .pmp_hardreset = mv_pmp_hardreset,
711 .pmp_softreset = mv_softreset,
712 .error_handler = mv_pmp_error_handler,
713
714 .scr_read = mv_scr_read,
715 .scr_write = mv_scr_write,
716
717 .sff_check_status = mv_sff_check_status,
718 .sff_irq_clear = mv_sff_irq_clear,
719 .check_atapi_dma = mv_check_atapi_dma,
720 .bmdma_setup = mv_bmdma_setup,
721 .bmdma_start = mv_bmdma_start,
722 .bmdma_stop = mv_bmdma_stop,
723 .bmdma_status = mv_bmdma_status,
724
725 .port_start = mv_port_start,
726 .port_stop = mv_port_stop,
727};
728
729static struct ata_port_operations mv_iie_ops = {
730 .inherits = &mv6_ops,
731 .dev_config = ATA_OP_NULL,
732 .qc_prep = mv_qc_prep_iie,
733};
734
735static const struct ata_port_info mv_port_info[] = {
736 {
737 .flags = MV_GEN_I_FLAGS,
738 .pio_mask = ATA_PIO4,
739 .udma_mask = ATA_UDMA6,
740 .port_ops = &mv5_ops,
741 },
742 {
743 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
744 .pio_mask = ATA_PIO4,
745 .udma_mask = ATA_UDMA6,
746 .port_ops = &mv5_ops,
747 },
748 {
749 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
750 .pio_mask = ATA_PIO4,
751 .udma_mask = ATA_UDMA6,
752 .port_ops = &mv5_ops,
753 },
754 {
755 .flags = MV_GEN_II_FLAGS,
756 .pio_mask = ATA_PIO4,
757 .udma_mask = ATA_UDMA6,
758 .port_ops = &mv6_ops,
759 },
760 {
761 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
762 .pio_mask = ATA_PIO4,
763 .udma_mask = ATA_UDMA6,
764 .port_ops = &mv6_ops,
765 },
766 {
767 .flags = MV_GEN_IIE_FLAGS,
768 .pio_mask = ATA_PIO4,
769 .udma_mask = ATA_UDMA6,
770 .port_ops = &mv_iie_ops,
771 },
772 {
773 .flags = MV_GEN_IIE_FLAGS,
774 .pio_mask = ATA_PIO4,
775 .udma_mask = ATA_UDMA6,
776 .port_ops = &mv_iie_ops,
777 },
778 {
779 .flags = MV_GEN_IIE_FLAGS,
780 .pio_mask = ATA_PIO4,
781 .udma_mask = ATA_UDMA6,
782 .port_ops = &mv_iie_ops,
783 },
784};
785
786static const struct pci_device_id mv_pci_tbl[] = {
787 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
788 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
789 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
790 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
791
792 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
793 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
794 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
795
796 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
797 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
798 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
799 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
800 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
801
802 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
803
804
805 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
806
807
808 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
809
810
811 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
812 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
813
814 { }
815};
816
817static const struct mv_hw_ops mv5xxx_ops = {
818 .phy_errata = mv5_phy_errata,
819 .enable_leds = mv5_enable_leds,
820 .read_preamp = mv5_read_preamp,
821 .reset_hc = mv5_reset_hc,
822 .reset_flash = mv5_reset_flash,
823 .reset_bus = mv5_reset_bus,
824};
825
826static const struct mv_hw_ops mv6xxx_ops = {
827 .phy_errata = mv6_phy_errata,
828 .enable_leds = mv6_enable_leds,
829 .read_preamp = mv6_read_preamp,
830 .reset_hc = mv6_reset_hc,
831 .reset_flash = mv6_reset_flash,
832 .reset_bus = mv_reset_pci_bus,
833};
834
835static const struct mv_hw_ops mv_soc_ops = {
836 .phy_errata = mv6_phy_errata,
837 .enable_leds = mv_soc_enable_leds,
838 .read_preamp = mv_soc_read_preamp,
839 .reset_hc = mv_soc_reset_hc,
840 .reset_flash = mv_soc_reset_flash,
841 .reset_bus = mv_soc_reset_bus,
842};
843
844static const struct mv_hw_ops mv_soc_65n_ops = {
845 .phy_errata = mv_soc_65n_phy_errata,
846 .enable_leds = mv_soc_enable_leds,
847 .reset_hc = mv_soc_reset_hc,
848 .reset_flash = mv_soc_reset_flash,
849 .reset_bus = mv_soc_reset_bus,
850};
851
852
853
854
855
856static inline void writelfl(unsigned long data, void __iomem *addr)
857{
858 writel(data, addr);
859 (void) readl(addr);
860}
861
862static inline unsigned int mv_hc_from_port(unsigned int port)
863{
864 return port >> MV_PORT_HC_SHIFT;
865}
866
867static inline unsigned int mv_hardport_from_port(unsigned int port)
868{
869 return port & MV_PORT_MASK;
870}
871
872
873
874
875
876
877
878
879
880
881
882
883#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
884{ \
885 shift = mv_hc_from_port(port) * HC_SHIFT; \
886 hardport = mv_hardport_from_port(port); \
887 shift += hardport * 2; \
888}
889
890static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
891{
892 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
893}
894
895static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
896 unsigned int port)
897{
898 return mv_hc_base(base, mv_hc_from_port(port));
899}
900
901static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
902{
903 return mv_hc_base_from_port(base, port) +
904 MV_SATAHC_ARBTR_REG_SZ +
905 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
906}
907
908static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
909{
910 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
911 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
912
913 return hc_mmio + ofs;
914}
915
916static inline void __iomem *mv_host_base(struct ata_host *host)
917{
918 struct mv_host_priv *hpriv = host->private_data;
919 return hpriv->base;
920}
921
922static inline void __iomem *mv_ap_base(struct ata_port *ap)
923{
924 return mv_port_base(mv_host_base(ap->host), ap->port_no);
925}
926
927static inline int mv_get_hc_count(unsigned long port_flags)
928{
929 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
930}
931
932
933
934
935
936
937
938
939
940
941
942static void mv_save_cached_regs(struct ata_port *ap)
943{
944 void __iomem *port_mmio = mv_ap_base(ap);
945 struct mv_port_priv *pp = ap->private_data;
946
947 pp->cached.fiscfg = readl(port_mmio + FISCFG);
948 pp->cached.ltmode = readl(port_mmio + LTMODE);
949 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
950 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
951}
952
953
954
955
956
957
958
959
960
961
962static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
963{
964 if (new != *old) {
965 unsigned long laddr;
966 *old = new;
967
968
969
970
971
972
973
974
975
976 laddr = (unsigned long)addr & 0xffff;
977 if (laddr >= 0x300 && laddr <= 0x33c) {
978 laddr &= 0x000f;
979 if (laddr == 0x4 || laddr == 0xc) {
980 writelfl(new, addr);
981 return;
982 }
983 }
984 writel(new, addr);
985 }
986}
987
988static void mv_set_edma_ptrs(void __iomem *port_mmio,
989 struct mv_host_priv *hpriv,
990 struct mv_port_priv *pp)
991{
992 u32 index;
993
994
995
996
997 pp->req_idx &= MV_MAX_Q_DEPTH_MASK;
998 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
999
1000 WARN_ON(pp->crqb_dma & 0x3ff);
1001 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1002 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1003 port_mmio + EDMA_REQ_Q_IN_PTR);
1004 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1005
1006
1007
1008
1009 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;
1010 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1011
1012 WARN_ON(pp->crpb_dma & 0xff);
1013 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1014 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1015 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1016 port_mmio + EDMA_RSP_Q_OUT_PTR);
1017}
1018
1019static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1020{
1021
1022
1023
1024
1025
1026
1027
1028
1029 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1030 mask &= ~DONE_IRQ_0_3;
1031 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1032 mask &= ~DONE_IRQ_4_7;
1033 writelfl(mask, hpriv->main_irq_mask_addr);
1034}
1035
1036static void mv_set_main_irq_mask(struct ata_host *host,
1037 u32 disable_bits, u32 enable_bits)
1038{
1039 struct mv_host_priv *hpriv = host->private_data;
1040 u32 old_mask, new_mask;
1041
1042 old_mask = hpriv->main_irq_mask;
1043 new_mask = (old_mask & ~disable_bits) | enable_bits;
1044 if (new_mask != old_mask) {
1045 hpriv->main_irq_mask = new_mask;
1046 mv_write_main_irq_mask(new_mask, hpriv);
1047 }
1048}
1049
1050static void mv_enable_port_irqs(struct ata_port *ap,
1051 unsigned int port_bits)
1052{
1053 unsigned int shift, hardport, port = ap->port_no;
1054 u32 disable_bits, enable_bits;
1055
1056 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1057
1058 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1059 enable_bits = port_bits << shift;
1060 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1061}
1062
1063static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1064 void __iomem *port_mmio,
1065 unsigned int port_irqs)
1066{
1067 struct mv_host_priv *hpriv = ap->host->private_data;
1068 int hardport = mv_hardport_from_port(ap->port_no);
1069 void __iomem *hc_mmio = mv_hc_base_from_port(
1070 mv_host_base(ap->host), ap->port_no);
1071 u32 hc_irq_cause;
1072
1073
1074 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1075
1076
1077 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1078 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1079
1080
1081 if (IS_GEN_IIE(hpriv))
1082 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1083
1084 mv_enable_port_irqs(ap, port_irqs);
1085}
1086
1087static void mv_set_irq_coalescing(struct ata_host *host,
1088 unsigned int count, unsigned int usecs)
1089{
1090 struct mv_host_priv *hpriv = host->private_data;
1091 void __iomem *mmio = hpriv->base, *hc_mmio;
1092 u32 coal_enable = 0;
1093 unsigned long flags;
1094 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1095 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1096 ALL_PORTS_COAL_DONE;
1097
1098
1099 if (!usecs || !count) {
1100 clks = count = 0;
1101 } else {
1102
1103 clks = usecs * COAL_CLOCKS_PER_USEC;
1104 if (clks > MAX_COAL_TIME_THRESHOLD)
1105 clks = MAX_COAL_TIME_THRESHOLD;
1106 if (count > MAX_COAL_IO_COUNT)
1107 count = MAX_COAL_IO_COUNT;
1108 }
1109
1110 spin_lock_irqsave(&host->lock, flags);
1111 mv_set_main_irq_mask(host, coal_disable, 0);
1112
1113 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1114
1115
1116
1117
1118 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1119 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1120
1121 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1122 if (count)
1123 coal_enable = ALL_PORTS_COAL_DONE;
1124 clks = count = 0;
1125 }
1126
1127
1128
1129
1130 hc_mmio = mv_hc_base_from_port(mmio, 0);
1131 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1132 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1133 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1134 if (count)
1135 coal_enable |= PORTS_0_3_COAL_DONE;
1136 if (is_dual_hc) {
1137 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1138 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1139 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1140 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1141 if (count)
1142 coal_enable |= PORTS_4_7_COAL_DONE;
1143 }
1144
1145 mv_set_main_irq_mask(host, 0, coal_enable);
1146 spin_unlock_irqrestore(&host->lock, flags);
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1161 struct mv_port_priv *pp, u8 protocol)
1162{
1163 int want_ncq = (protocol == ATA_PROT_NCQ);
1164
1165 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1166 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1167 if (want_ncq != using_ncq)
1168 mv_stop_edma(ap);
1169 }
1170 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1171 struct mv_host_priv *hpriv = ap->host->private_data;
1172
1173 mv_edma_cfg(ap, want_ncq, 1);
1174
1175 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1176 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1177
1178 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1179 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1180 }
1181}
1182
1183static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1184{
1185 void __iomem *port_mmio = mv_ap_base(ap);
1186 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1187 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1188 int i;
1189
1190
1191
1192
1193
1194
1195
1196
1197 for (i = 0; i < timeout; ++i) {
1198 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1199 if ((edma_stat & empty_idle) == empty_idle)
1200 break;
1201 udelay(per_loop);
1202 }
1203
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213static int mv_stop_edma_engine(void __iomem *port_mmio)
1214{
1215 int i;
1216
1217
1218 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1219
1220
1221 for (i = 10000; i > 0; i--) {
1222 u32 reg = readl(port_mmio + EDMA_CMD);
1223 if (!(reg & EDMA_EN))
1224 return 0;
1225 udelay(10);
1226 }
1227 return -EIO;
1228}
1229
1230static int mv_stop_edma(struct ata_port *ap)
1231{
1232 void __iomem *port_mmio = mv_ap_base(ap);
1233 struct mv_port_priv *pp = ap->private_data;
1234 int err = 0;
1235
1236 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1237 return 0;
1238 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1239 mv_wait_for_edma_empty_idle(ap);
1240 if (mv_stop_edma_engine(port_mmio)) {
1241 ata_port_err(ap, "Unable to stop eDMA\n");
1242 err = -EIO;
1243 }
1244 mv_edma_cfg(ap, 0, 0);
1245 return err;
1246}
1247
1248#ifdef ATA_DEBUG
1249static void mv_dump_mem(void __iomem *start, unsigned bytes)
1250{
1251 int b, w;
1252 for (b = 0; b < bytes; ) {
1253 DPRINTK("%p: ", start + b);
1254 for (w = 0; b < bytes && w < 4; w++) {
1255 printk("%08x ", readl(start + b));
1256 b += sizeof(u32);
1257 }
1258 printk("\n");
1259 }
1260}
1261#endif
1262#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1263static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1264{
1265#ifdef ATA_DEBUG
1266 int b, w;
1267 u32 dw;
1268 for (b = 0; b < bytes; ) {
1269 DPRINTK("%02x: ", b);
1270 for (w = 0; b < bytes && w < 4; w++) {
1271 (void) pci_read_config_dword(pdev, b, &dw);
1272 printk("%08x ", dw);
1273 b += sizeof(u32);
1274 }
1275 printk("\n");
1276 }
1277#endif
1278}
1279#endif
1280static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1281 struct pci_dev *pdev)
1282{
1283#ifdef ATA_DEBUG
1284 void __iomem *hc_base = mv_hc_base(mmio_base,
1285 port >> MV_PORT_HC_SHIFT);
1286 void __iomem *port_base;
1287 int start_port, num_ports, p, start_hc, num_hcs, hc;
1288
1289 if (0 > port) {
1290 start_hc = start_port = 0;
1291 num_ports = 8;
1292 num_hcs = 2;
1293 } else {
1294 start_hc = port >> MV_PORT_HC_SHIFT;
1295 start_port = port;
1296 num_ports = num_hcs = 1;
1297 }
1298 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1299 num_ports > 1 ? num_ports - 1 : start_port);
1300
1301 if (NULL != pdev) {
1302 DPRINTK("PCI config space regs:\n");
1303 mv_dump_pci_cfg(pdev, 0x68);
1304 }
1305 DPRINTK("PCI regs:\n");
1306 mv_dump_mem(mmio_base+0xc00, 0x3c);
1307 mv_dump_mem(mmio_base+0xd00, 0x34);
1308 mv_dump_mem(mmio_base+0xf00, 0x4);
1309 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1310 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1311 hc_base = mv_hc_base(mmio_base, hc);
1312 DPRINTK("HC regs (HC %i):\n", hc);
1313 mv_dump_mem(hc_base, 0x1c);
1314 }
1315 for (p = start_port; p < start_port + num_ports; p++) {
1316 port_base = mv_port_base(mmio_base, p);
1317 DPRINTK("EDMA regs (port %i):\n", p);
1318 mv_dump_mem(port_base, 0x54);
1319 DPRINTK("SATA regs (port %i):\n", p);
1320 mv_dump_mem(port_base+0x300, 0x60);
1321 }
1322#endif
1323}
1324
1325static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1326{
1327 unsigned int ofs;
1328
1329 switch (sc_reg_in) {
1330 case SCR_STATUS:
1331 case SCR_CONTROL:
1332 case SCR_ERROR:
1333 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1334 break;
1335 case SCR_ACTIVE:
1336 ofs = SATA_ACTIVE;
1337 break;
1338 default:
1339 ofs = 0xffffffffU;
1340 break;
1341 }
1342 return ofs;
1343}
1344
1345static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1346{
1347 unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
1349 if (ofs != 0xffffffffU) {
1350 *val = readl(mv_ap_base(link->ap) + ofs);
1351 return 0;
1352 } else
1353 return -EINVAL;
1354}
1355
1356static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1357{
1358 unsigned int ofs = mv_scr_offset(sc_reg_in);
1359
1360 if (ofs != 0xffffffffU) {
1361 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1362 struct mv_host_priv *hpriv = link->ap->host->private_data;
1363 if (sc_reg_in == SCR_CONTROL) {
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1378 val |= 0xf000;
1379
1380 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1381 void __iomem *lp_phy_addr =
1382 mv_ap_base(link->ap) + LP_PHY_CTL;
1383
1384
1385
1386 u32 lp_phy_val =
1387 LP_PHY_CTL_PIN_PU_PLL |
1388 LP_PHY_CTL_PIN_PU_RX |
1389 LP_PHY_CTL_PIN_PU_TX;
1390
1391 if ((val & 0xf0) != 0x10)
1392 lp_phy_val |=
1393 LP_PHY_CTL_GEN_TX_3G |
1394 LP_PHY_CTL_GEN_RX_3G;
1395
1396 writelfl(lp_phy_val, lp_phy_addr);
1397 }
1398 }
1399 writelfl(val, addr);
1400 return 0;
1401 } else
1402 return -EINVAL;
1403}
1404
1405static void mv6_dev_config(struct ata_device *adev)
1406{
1407
1408
1409
1410
1411
1412
1413 if (adev->flags & ATA_DFLAG_NCQ) {
1414 if (sata_pmp_attached(adev->link->ap)) {
1415 adev->flags &= ~ATA_DFLAG_NCQ;
1416 ata_dev_info(adev,
1417 "NCQ disabled for command-based switching\n");
1418 }
1419 }
1420}
1421
1422static int mv_qc_defer(struct ata_queued_cmd *qc)
1423{
1424 struct ata_link *link = qc->dev->link;
1425 struct ata_port *ap = link->ap;
1426 struct mv_port_priv *pp = ap->private_data;
1427
1428
1429
1430
1431
1432 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1433 return ATA_DEFER_PORT;
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 if (unlikely(ap->excl_link)) {
1444 if (link == ap->excl_link) {
1445 if (ap->nr_active_links)
1446 return ATA_DEFER_PORT;
1447 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1448 return 0;
1449 } else
1450 return ATA_DEFER_PORT;
1451 }
1452
1453
1454
1455
1456 if (ap->nr_active_links == 0)
1457 return 0;
1458
1459
1460
1461
1462
1463
1464
1465 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1466 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1467 if (ata_is_ncq(qc->tf.protocol))
1468 return 0;
1469 else {
1470 ap->excl_link = link;
1471 return ATA_DEFER_PORT;
1472 }
1473 }
1474
1475 return ATA_DEFER_PORT;
1476}
1477
1478static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1479{
1480 struct mv_port_priv *pp = ap->private_data;
1481 void __iomem *port_mmio;
1482
1483 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1484 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1485 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1486
1487 ltmode = *old_ltmode & ~LTMODE_BIT8;
1488 haltcond = *old_haltcond | EDMA_ERR_DEV;
1489
1490 if (want_fbs) {
1491 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1492 ltmode = *old_ltmode | LTMODE_BIT8;
1493 if (want_ncq)
1494 haltcond &= ~EDMA_ERR_DEV;
1495 else
1496 fiscfg |= FISCFG_WAIT_DEV_ERR;
1497 } else {
1498 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1499 }
1500
1501 port_mmio = mv_ap_base(ap);
1502 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1503 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1504 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1505}
1506
1507static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1508{
1509 struct mv_host_priv *hpriv = ap->host->private_data;
1510 u32 old, new;
1511
1512
1513 old = readl(hpriv->base + GPIO_PORT_CTL);
1514 if (want_ncq)
1515 new = old | (1 << 22);
1516 else
1517 new = old & ~(1 << 22);
1518 if (new != old)
1519 writel(new, hpriv->base + GPIO_PORT_CTL);
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1535{
1536 struct mv_port_priv *pp = ap->private_data;
1537 u32 new, *old = &pp->cached.unknown_rsvd;
1538
1539 if (enable_bmdma)
1540 new = *old | 1;
1541 else
1542 new = *old & ~1;
1543 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560static void mv_soc_led_blink_enable(struct ata_port *ap)
1561{
1562 struct ata_host *host = ap->host;
1563 struct mv_host_priv *hpriv = host->private_data;
1564 void __iomem *hc_mmio;
1565 u32 led_ctrl;
1566
1567 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1568 return;
1569 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1570 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1571 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1572 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1573}
1574
1575static void mv_soc_led_blink_disable(struct ata_port *ap)
1576{
1577 struct ata_host *host = ap->host;
1578 struct mv_host_priv *hpriv = host->private_data;
1579 void __iomem *hc_mmio;
1580 u32 led_ctrl;
1581 unsigned int port;
1582
1583 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1584 return;
1585
1586
1587 for (port = 0; port < hpriv->n_ports; port++) {
1588 struct ata_port *this_ap = host->ports[port];
1589 struct mv_port_priv *pp = this_ap->private_data;
1590
1591 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1592 return;
1593 }
1594
1595 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1596 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1597 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1598 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1599}
1600
1601static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1602{
1603 u32 cfg;
1604 struct mv_port_priv *pp = ap->private_data;
1605 struct mv_host_priv *hpriv = ap->host->private_data;
1606 void __iomem *port_mmio = mv_ap_base(ap);
1607
1608
1609 cfg = EDMA_CFG_Q_DEPTH;
1610 pp->pp_flags &=
1611 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1612
1613 if (IS_GEN_I(hpriv))
1614 cfg |= (1 << 8);
1615
1616 else if (IS_GEN_II(hpriv)) {
1617 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1618 mv_60x1_errata_sata25(ap, want_ncq);
1619
1620 } else if (IS_GEN_IIE(hpriv)) {
1621 int want_fbs = sata_pmp_attached(ap);
1622
1623
1624
1625
1626
1627
1628
1629
1630 want_fbs &= want_ncq;
1631
1632 mv_config_fbs(ap, want_ncq, want_fbs);
1633
1634 if (want_fbs) {
1635 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1636 cfg |= EDMA_CFG_EDMA_FBS;
1637 }
1638
1639 cfg |= (1 << 23);
1640 if (want_edma) {
1641 cfg |= (1 << 22);
1642 if (!IS_SOC(hpriv))
1643 cfg |= (1 << 18);
1644 }
1645 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1646 cfg |= (1 << 17);
1647 mv_bmdma_enable_iie(ap, !want_edma);
1648
1649 if (IS_SOC(hpriv)) {
1650 if (want_ncq)
1651 mv_soc_led_blink_enable(ap);
1652 else
1653 mv_soc_led_blink_disable(ap);
1654 }
1655 }
1656
1657 if (want_ncq) {
1658 cfg |= EDMA_CFG_NCQ;
1659 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1660 }
1661
1662 writelfl(cfg, port_mmio + EDMA_CFG);
1663}
1664
1665static void mv_port_free_dma_mem(struct ata_port *ap)
1666{
1667 struct mv_host_priv *hpriv = ap->host->private_data;
1668 struct mv_port_priv *pp = ap->private_data;
1669 int tag;
1670
1671 if (pp->crqb) {
1672 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1673 pp->crqb = NULL;
1674 }
1675 if (pp->crpb) {
1676 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1677 pp->crpb = NULL;
1678 }
1679
1680
1681
1682
1683 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1684 if (pp->sg_tbl[tag]) {
1685 if (tag == 0 || !IS_GEN_I(hpriv))
1686 dma_pool_free(hpriv->sg_tbl_pool,
1687 pp->sg_tbl[tag],
1688 pp->sg_tbl_dma[tag]);
1689 pp->sg_tbl[tag] = NULL;
1690 }
1691 }
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static int mv_port_start(struct ata_port *ap)
1705{
1706 struct device *dev = ap->host->dev;
1707 struct mv_host_priv *hpriv = ap->host->private_data;
1708 struct mv_port_priv *pp;
1709 unsigned long flags;
1710 int tag;
1711
1712 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1713 if (!pp)
1714 return -ENOMEM;
1715 ap->private_data = pp;
1716
1717 pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1718 if (!pp->crqb)
1719 return -ENOMEM;
1720
1721 pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1722 if (!pp->crpb)
1723 goto out_port_free_dma_mem;
1724
1725
1726 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1727 ap->flags |= ATA_FLAG_AN;
1728
1729
1730
1731
1732 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1733 if (tag == 0 || !IS_GEN_I(hpriv)) {
1734 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1735 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1736 if (!pp->sg_tbl[tag])
1737 goto out_port_free_dma_mem;
1738 } else {
1739 pp->sg_tbl[tag] = pp->sg_tbl[0];
1740 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1741 }
1742 }
1743
1744 spin_lock_irqsave(ap->lock, flags);
1745 mv_save_cached_regs(ap);
1746 mv_edma_cfg(ap, 0, 0);
1747 spin_unlock_irqrestore(ap->lock, flags);
1748
1749 return 0;
1750
1751out_port_free_dma_mem:
1752 mv_port_free_dma_mem(ap);
1753 return -ENOMEM;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static void mv_port_stop(struct ata_port *ap)
1766{
1767 unsigned long flags;
1768
1769 spin_lock_irqsave(ap->lock, flags);
1770 mv_stop_edma(ap);
1771 mv_enable_port_irqs(ap, 0);
1772 spin_unlock_irqrestore(ap->lock, flags);
1773 mv_port_free_dma_mem(ap);
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static void mv_fill_sg(struct ata_queued_cmd *qc)
1786{
1787 struct mv_port_priv *pp = qc->ap->private_data;
1788 struct scatterlist *sg;
1789 struct mv_sg *mv_sg, *last_sg = NULL;
1790 unsigned int si;
1791
1792 mv_sg = pp->sg_tbl[qc->hw_tag];
1793 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1794 dma_addr_t addr = sg_dma_address(sg);
1795 u32 sg_len = sg_dma_len(sg);
1796
1797 while (sg_len) {
1798 u32 offset = addr & 0xffff;
1799 u32 len = sg_len;
1800
1801 if (offset + len > 0x10000)
1802 len = 0x10000 - offset;
1803
1804 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1805 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1806 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1807 mv_sg->reserved = 0;
1808
1809 sg_len -= len;
1810 addr += len;
1811
1812 last_sg = mv_sg;
1813 mv_sg++;
1814 }
1815 }
1816
1817 if (likely(last_sg))
1818 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1819 mb();
1820}
1821
1822static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1823{
1824 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1825 (last ? CRQB_CMD_LAST : 0);
1826 *cmdw = cpu_to_le16(tmp);
1827}
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837static void mv_sff_irq_clear(struct ata_port *ap)
1838{
1839 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1854{
1855 struct scsi_cmnd *scmd = qc->scsicmd;
1856
1857 if (scmd) {
1858 switch (scmd->cmnd[0]) {
1859 case READ_6:
1860 case READ_10:
1861 case READ_12:
1862 case WRITE_6:
1863 case WRITE_10:
1864 case WRITE_12:
1865 case GPCMD_READ_CD:
1866 case GPCMD_SEND_DVD_STRUCTURE:
1867 case GPCMD_SEND_CUE_SHEET:
1868 return 0;
1869 }
1870 }
1871 return -EOPNOTSUPP;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1882{
1883 struct ata_port *ap = qc->ap;
1884 void __iomem *port_mmio = mv_ap_base(ap);
1885 struct mv_port_priv *pp = ap->private_data;
1886
1887 mv_fill_sg(qc);
1888
1889
1890 writel(0, port_mmio + BMDMA_CMD);
1891
1892
1893 writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1894 port_mmio + BMDMA_PRD_HIGH);
1895 writelfl(pp->sg_tbl_dma[qc->hw_tag],
1896 port_mmio + BMDMA_PRD_LOW);
1897
1898
1899 ap->ops->sff_exec_command(ap, &qc->tf);
1900}
1901
1902
1903
1904
1905
1906
1907
1908
1909static void mv_bmdma_start(struct ata_queued_cmd *qc)
1910{
1911 struct ata_port *ap = qc->ap;
1912 void __iomem *port_mmio = mv_ap_base(ap);
1913 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1914 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1915
1916
1917 writelfl(cmd, port_mmio + BMDMA_CMD);
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929static void mv_bmdma_stop_ap(struct ata_port *ap)
1930{
1931 void __iomem *port_mmio = mv_ap_base(ap);
1932 u32 cmd;
1933
1934
1935 cmd = readl(port_mmio + BMDMA_CMD);
1936 if (cmd & ATA_DMA_START) {
1937 cmd &= ~ATA_DMA_START;
1938 writelfl(cmd, port_mmio + BMDMA_CMD);
1939
1940
1941 ata_sff_dma_pause(ap);
1942 }
1943}
1944
1945static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1946{
1947 mv_bmdma_stop_ap(qc->ap);
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959static u8 mv_bmdma_status(struct ata_port *ap)
1960{
1961 void __iomem *port_mmio = mv_ap_base(ap);
1962 u32 reg, status;
1963
1964
1965
1966
1967
1968 reg = readl(port_mmio + BMDMA_STATUS);
1969 if (reg & ATA_DMA_ACTIVE)
1970 status = ATA_DMA_ACTIVE;
1971 else if (reg & ATA_DMA_ERR)
1972 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1973 else {
1974
1975
1976
1977
1978
1979
1980 mv_bmdma_stop_ap(ap);
1981 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1982 status = 0;
1983 else
1984 status = ATA_DMA_INTR;
1985 }
1986 return status;
1987}
1988
1989static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1990{
1991 struct ata_taskfile *tf = &qc->tf;
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2006 if (qc->dev->multi_count > 7) {
2007 switch (tf->command) {
2008 case ATA_CMD_WRITE_MULTI:
2009 tf->command = ATA_CMD_PIO_WRITE;
2010 break;
2011 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2012 tf->flags &= ~ATA_TFLAG_FUA;
2013 fallthrough;
2014 case ATA_CMD_WRITE_MULTI_EXT:
2015 tf->command = ATA_CMD_PIO_WRITE_EXT;
2016 break;
2017 }
2018 }
2019 }
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2035{
2036 struct ata_port *ap = qc->ap;
2037 struct mv_port_priv *pp = ap->private_data;
2038 __le16 *cw;
2039 struct ata_taskfile *tf = &qc->tf;
2040 u16 flags = 0;
2041 unsigned in_index;
2042
2043 switch (tf->protocol) {
2044 case ATA_PROT_DMA:
2045 if (tf->command == ATA_CMD_DSM)
2046 return AC_ERR_OK;
2047 fallthrough;
2048 case ATA_PROT_NCQ:
2049 break;
2050 case ATA_PROT_PIO:
2051 mv_rw_multi_errata_sata24(qc);
2052 return AC_ERR_OK;
2053 default:
2054 return AC_ERR_OK;
2055 }
2056
2057
2058
2059 if (!(tf->flags & ATA_TFLAG_WRITE))
2060 flags |= CRQB_FLAG_READ;
2061 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2062 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2063 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2064
2065
2066 in_index = pp->req_idx;
2067
2068 pp->crqb[in_index].sg_addr =
2069 cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2070 pp->crqb[in_index].sg_addr_hi =
2071 cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2072 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2073
2074 cw = &pp->crqb[in_index].ata_cmd[0];
2075
2076
2077
2078
2079
2080
2081
2082
2083 switch (tf->command) {
2084 case ATA_CMD_READ:
2085 case ATA_CMD_READ_EXT:
2086 case ATA_CMD_WRITE:
2087 case ATA_CMD_WRITE_EXT:
2088 case ATA_CMD_WRITE_FUA_EXT:
2089 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2090 break;
2091 case ATA_CMD_FPDMA_READ:
2092 case ATA_CMD_FPDMA_WRITE:
2093 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2094 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2095 break;
2096 default:
2097
2098
2099
2100
2101
2102 ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2103 tf->command);
2104 return AC_ERR_INVALID;
2105 }
2106 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2107 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2108 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2109 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2110 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2111 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2112 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2113 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2114 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);
2115
2116 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2117 return AC_ERR_OK;
2118 mv_fill_sg(qc);
2119
2120 return AC_ERR_OK;
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2136{
2137 struct ata_port *ap = qc->ap;
2138 struct mv_port_priv *pp = ap->private_data;
2139 struct mv_crqb_iie *crqb;
2140 struct ata_taskfile *tf = &qc->tf;
2141 unsigned in_index;
2142 u32 flags = 0;
2143
2144 if ((tf->protocol != ATA_PROT_DMA) &&
2145 (tf->protocol != ATA_PROT_NCQ))
2146 return AC_ERR_OK;
2147 if (tf->command == ATA_CMD_DSM)
2148 return AC_ERR_OK;
2149
2150
2151 if (!(tf->flags & ATA_TFLAG_WRITE))
2152 flags |= CRQB_FLAG_READ;
2153
2154 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2155 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2156 flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2157 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2158
2159
2160 in_index = pp->req_idx;
2161
2162 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2163 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2164 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2165 crqb->flags = cpu_to_le32(flags);
2166
2167 crqb->ata_cmd[0] = cpu_to_le32(
2168 (tf->command << 16) |
2169 (tf->feature << 24)
2170 );
2171 crqb->ata_cmd[1] = cpu_to_le32(
2172 (tf->lbal << 0) |
2173 (tf->lbam << 8) |
2174 (tf->lbah << 16) |
2175 (tf->device << 24)
2176 );
2177 crqb->ata_cmd[2] = cpu_to_le32(
2178 (tf->hob_lbal << 0) |
2179 (tf->hob_lbam << 8) |
2180 (tf->hob_lbah << 16) |
2181 (tf->hob_feature << 24)
2182 );
2183 crqb->ata_cmd[3] = cpu_to_le32(
2184 (tf->nsect << 0) |
2185 (tf->hob_nsect << 8)
2186 );
2187
2188 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2189 return AC_ERR_OK;
2190 mv_fill_sg(qc);
2191
2192 return AC_ERR_OK;
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208static u8 mv_sff_check_status(struct ata_port *ap)
2209{
2210 u8 stat = ioread8(ap->ioaddr.status_addr);
2211 struct mv_port_priv *pp = ap->private_data;
2212
2213 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2214 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2215 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2216 else
2217 stat = ATA_BUSY;
2218 }
2219 return stat;
2220}
2221
2222
2223
2224
2225
2226
2227static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2228{
2229 void __iomem *port_mmio = mv_ap_base(ap);
2230 u32 ifctl, old_ifctl, ifstat;
2231 int i, timeout = 200, final_word = nwords - 1;
2232
2233
2234 old_ifctl = readl(port_mmio + SATA_IFCTL);
2235 ifctl = 0x100 | (old_ifctl & 0xf);
2236 writelfl(ifctl, port_mmio + SATA_IFCTL);
2237
2238
2239 for (i = 0; i < final_word; ++i)
2240 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2241
2242
2243 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2244 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2245
2246
2247
2248
2249
2250 do {
2251 ifstat = readl(port_mmio + SATA_IFSTAT);
2252 } while (!(ifstat & 0x1000) && --timeout);
2253
2254
2255 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2256
2257
2258 if ((ifstat & 0x3000) != 0x1000) {
2259 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2260 __func__, ifstat);
2261 return AC_ERR_OTHER;
2262 }
2263 return 0;
2264}
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2284{
2285 struct ata_port *ap = qc->ap;
2286 struct mv_port_priv *pp = ap->private_data;
2287 struct ata_link *link = qc->dev->link;
2288 u32 fis[5];
2289 int err = 0;
2290
2291 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2292 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2293 if (err)
2294 return err;
2295
2296 switch (qc->tf.protocol) {
2297 case ATAPI_PROT_PIO:
2298 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2299 fallthrough;
2300 case ATAPI_PROT_NODATA:
2301 ap->hsm_task_state = HSM_ST_FIRST;
2302 break;
2303 case ATA_PROT_PIO:
2304 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2305 if (qc->tf.flags & ATA_TFLAG_WRITE)
2306 ap->hsm_task_state = HSM_ST_FIRST;
2307 else
2308 ap->hsm_task_state = HSM_ST;
2309 break;
2310 default:
2311 ap->hsm_task_state = HSM_ST_LAST;
2312 break;
2313 }
2314
2315 if (qc->tf.flags & ATA_TFLAG_POLLING)
2316 ata_sff_queue_pio_task(link, 0);
2317 return 0;
2318}
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2333{
2334 static int limit_warnings = 10;
2335 struct ata_port *ap = qc->ap;
2336 void __iomem *port_mmio = mv_ap_base(ap);
2337 struct mv_port_priv *pp = ap->private_data;
2338 u32 in_index;
2339 unsigned int port_irqs;
2340
2341 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2342
2343 switch (qc->tf.protocol) {
2344 case ATA_PROT_DMA:
2345 if (qc->tf.command == ATA_CMD_DSM) {
2346 if (!ap->ops->bmdma_setup)
2347 return AC_ERR_OTHER;
2348 break;
2349 }
2350 fallthrough;
2351 case ATA_PROT_NCQ:
2352 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2353 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2354 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2355
2356
2357 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2358 port_mmio + EDMA_REQ_Q_IN_PTR);
2359 return 0;
2360
2361 case ATA_PROT_PIO:
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2374 --limit_warnings;
2375 ata_link_warn(qc->dev->link, DRV_NAME
2376 ": attempting PIO w/multiple DRQ: "
2377 "this may fail due to h/w errata\n");
2378 }
2379 fallthrough;
2380 case ATA_PROT_NODATA:
2381 case ATAPI_PROT_PIO:
2382 case ATAPI_PROT_NODATA:
2383 if (ap->flags & ATA_FLAG_PIO_POLLING)
2384 qc->tf.flags |= ATA_TFLAG_POLLING;
2385 break;
2386 }
2387
2388 if (qc->tf.flags & ATA_TFLAG_POLLING)
2389 port_irqs = ERR_IRQ;
2390 else
2391 port_irqs = ERR_IRQ | DONE_IRQ;
2392
2393
2394
2395
2396
2397
2398 mv_stop_edma(ap);
2399 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2400 mv_pmp_select(ap, qc->dev->link->pmp);
2401
2402 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2403 struct mv_host_priv *hpriv = ap->host->private_data;
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415 if (IS_GEN_II(hpriv))
2416 return mv_qc_issue_fis(qc);
2417 }
2418 return ata_bmdma_qc_issue(qc);
2419}
2420
2421static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2422{
2423 struct mv_port_priv *pp = ap->private_data;
2424 struct ata_queued_cmd *qc;
2425
2426 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2427 return NULL;
2428 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2429 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2430 return qc;
2431 return NULL;
2432}
2433
2434static void mv_pmp_error_handler(struct ata_port *ap)
2435{
2436 unsigned int pmp, pmp_map;
2437 struct mv_port_priv *pp = ap->private_data;
2438
2439 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2440
2441
2442
2443
2444
2445
2446 pmp_map = pp->delayed_eh_pmp_map;
2447 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2448 for (pmp = 0; pmp_map != 0; pmp++) {
2449 unsigned int this_pmp = (1 << pmp);
2450 if (pmp_map & this_pmp) {
2451 struct ata_link *link = &ap->pmp_link[pmp];
2452 pmp_map &= ~this_pmp;
2453 ata_eh_analyze_ncq_error(link);
2454 }
2455 }
2456 ata_port_freeze(ap);
2457 }
2458 sata_pmp_error_handler(ap);
2459}
2460
2461static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2462{
2463 void __iomem *port_mmio = mv_ap_base(ap);
2464
2465 return readl(port_mmio + SATA_TESTCTL) >> 16;
2466}
2467
2468static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2469{
2470 unsigned int pmp;
2471
2472
2473
2474
2475 for (pmp = 0; pmp_map != 0; pmp++) {
2476 unsigned int this_pmp = (1 << pmp);
2477 if (pmp_map & this_pmp) {
2478 struct ata_link *link = &ap->pmp_link[pmp];
2479 struct ata_eh_info *ehi = &link->eh_info;
2480
2481 pmp_map &= ~this_pmp;
2482 ata_ehi_clear_desc(ehi);
2483 ata_ehi_push_desc(ehi, "dev err");
2484 ehi->err_mask |= AC_ERR_DEV;
2485 ehi->action |= ATA_EH_RESET;
2486 ata_link_abort(link);
2487 }
2488 }
2489}
2490
2491static int mv_req_q_empty(struct ata_port *ap)
2492{
2493 void __iomem *port_mmio = mv_ap_base(ap);
2494 u32 in_ptr, out_ptr;
2495
2496 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2497 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2498 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2499 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2500 return (in_ptr == out_ptr);
2501}
2502
2503static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2504{
2505 struct mv_port_priv *pp = ap->private_data;
2506 int failed_links;
2507 unsigned int old_map, new_map;
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2518 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2519 pp->delayed_eh_pmp_map = 0;
2520 }
2521 old_map = pp->delayed_eh_pmp_map;
2522 new_map = old_map | mv_get_err_pmp_map(ap);
2523
2524 if (old_map != new_map) {
2525 pp->delayed_eh_pmp_map = new_map;
2526 mv_pmp_eh_prep(ap, new_map & ~old_map);
2527 }
2528 failed_links = hweight16(new_map);
2529
2530 ata_port_info(ap,
2531 "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2532 __func__, pp->delayed_eh_pmp_map,
2533 ap->qc_active, failed_links,
2534 ap->nr_active_links);
2535
2536 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2537 mv_process_crpb_entries(ap, pp);
2538 mv_stop_edma(ap);
2539 mv_eh_freeze(ap);
2540 ata_port_info(ap, "%s: done\n", __func__);
2541 return 1;
2542 }
2543 ata_port_info(ap, "%s: waiting\n", __func__);
2544 return 1;
2545}
2546
2547static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2548{
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560 return 0;
2561}
2562
2563static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2564{
2565 struct mv_port_priv *pp = ap->private_data;
2566
2567 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2568 return 0;
2569 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2570 return 0;
2571
2572 if (!(edma_err_cause & EDMA_ERR_DEV))
2573 return 0;
2574 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2575 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2576 return 0;
2577
2578 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2579
2580
2581
2582
2583
2584 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2585 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2586 __func__, edma_err_cause, pp->pp_flags);
2587 return 0;
2588 }
2589 return mv_handle_fbs_ncq_dev_err(ap);
2590 } else {
2591
2592
2593
2594
2595
2596 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2597 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2598 __func__, edma_err_cause, pp->pp_flags);
2599 return 0;
2600 }
2601 return mv_handle_fbs_non_ncq_dev_err(ap);
2602 }
2603 return 0;
2604}
2605
2606static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2607{
2608 struct ata_eh_info *ehi = &ap->link.eh_info;
2609 char *when = "idle";
2610
2611 ata_ehi_clear_desc(ehi);
2612 if (edma_was_enabled) {
2613 when = "EDMA enabled";
2614 } else {
2615 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2616 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2617 when = "polling";
2618 }
2619 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2620 ehi->err_mask |= AC_ERR_OTHER;
2621 ehi->action |= ATA_EH_RESET;
2622 ata_port_freeze(ap);
2623}
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636static void mv_err_intr(struct ata_port *ap)
2637{
2638 void __iomem *port_mmio = mv_ap_base(ap);
2639 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2640 u32 fis_cause = 0;
2641 struct mv_port_priv *pp = ap->private_data;
2642 struct mv_host_priv *hpriv = ap->host->private_data;
2643 unsigned int action = 0, err_mask = 0;
2644 struct ata_eh_info *ehi = &ap->link.eh_info;
2645 struct ata_queued_cmd *qc;
2646 int abort = 0;
2647
2648
2649
2650
2651
2652
2653 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2654 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2655
2656 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2657 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2658 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2659 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2660 }
2661 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2662
2663 if (edma_err_cause & EDMA_ERR_DEV) {
2664
2665
2666
2667
2668 if (mv_handle_dev_err(ap, edma_err_cause))
2669 return;
2670 }
2671
2672 qc = mv_get_active_qc(ap);
2673 ata_ehi_clear_desc(ehi);
2674 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2675 edma_err_cause, pp->pp_flags);
2676
2677 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2678 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2679 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2680 u32 ec = edma_err_cause &
2681 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2682 sata_async_notification(ap);
2683 if (!ec)
2684 return;
2685 ata_ehi_push_desc(ehi, "SDB notify");
2686 }
2687 }
2688
2689
2690
2691 if (edma_err_cause & EDMA_ERR_DEV) {
2692 err_mask |= AC_ERR_DEV;
2693 action |= ATA_EH_RESET;
2694 ata_ehi_push_desc(ehi, "dev error");
2695 }
2696 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2697 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2698 EDMA_ERR_INTRL_PAR)) {
2699 err_mask |= AC_ERR_ATA_BUS;
2700 action |= ATA_EH_RESET;
2701 ata_ehi_push_desc(ehi, "parity error");
2702 }
2703 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2704 ata_ehi_hotplugged(ehi);
2705 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2706 "dev disconnect" : "dev connect");
2707 action |= ATA_EH_RESET;
2708 }
2709
2710
2711
2712
2713
2714 if (IS_GEN_I(hpriv)) {
2715 eh_freeze_mask = EDMA_EH_FREEZE_5;
2716 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2717 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2718 ata_ehi_push_desc(ehi, "EDMA self-disable");
2719 }
2720 } else {
2721 eh_freeze_mask = EDMA_EH_FREEZE;
2722 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2723 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2724 ata_ehi_push_desc(ehi, "EDMA self-disable");
2725 }
2726 if (edma_err_cause & EDMA_ERR_SERR) {
2727 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2728 err_mask |= AC_ERR_ATA_BUS;
2729 action |= ATA_EH_RESET;
2730 }
2731 }
2732
2733 if (!err_mask) {
2734 err_mask = AC_ERR_OTHER;
2735 action |= ATA_EH_RESET;
2736 }
2737
2738 ehi->serror |= serr;
2739 ehi->action |= action;
2740
2741 if (qc)
2742 qc->err_mask |= err_mask;
2743 else
2744 ehi->err_mask |= err_mask;
2745
2746 if (err_mask == AC_ERR_DEV) {
2747
2748
2749
2750
2751
2752 mv_eh_freeze(ap);
2753 abort = 1;
2754 } else if (edma_err_cause & eh_freeze_mask) {
2755
2756
2757
2758 ata_port_freeze(ap);
2759 } else {
2760 abort = 1;
2761 }
2762
2763 if (abort) {
2764 if (qc)
2765 ata_link_abort(qc->dev->link);
2766 else
2767 ata_port_abort(ap);
2768 }
2769}
2770
2771static bool mv_process_crpb_response(struct ata_port *ap,
2772 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2773{
2774 u8 ata_status;
2775 u16 edma_status = le16_to_cpu(response->flags);
2776
2777
2778
2779
2780
2781
2782 if (!ncq_enabled) {
2783 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2784 if (err_cause) {
2785
2786
2787
2788
2789 return false;
2790 }
2791 }
2792 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2793 if (!ac_err_mask(ata_status))
2794 return true;
2795
2796 return false;
2797}
2798
2799static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2800{
2801 void __iomem *port_mmio = mv_ap_base(ap);
2802 struct mv_host_priv *hpriv = ap->host->private_data;
2803 u32 in_index;
2804 bool work_done = false;
2805 u32 done_mask = 0;
2806 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2807
2808
2809 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2810 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2811
2812
2813 while (in_index != pp->resp_idx) {
2814 unsigned int tag;
2815 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2816
2817 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2818
2819 if (IS_GEN_I(hpriv)) {
2820
2821 tag = ap->link.active_tag;
2822 } else {
2823
2824 tag = le16_to_cpu(response->id) & 0x1f;
2825 }
2826 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2827 done_mask |= 1 << tag;
2828 work_done = true;
2829 }
2830
2831 if (work_done) {
2832 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2833
2834
2835 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2836 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2837 port_mmio + EDMA_RSP_Q_OUT_PTR);
2838 }
2839}
2840
2841static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2842{
2843 struct mv_port_priv *pp;
2844 int edma_was_enabled;
2845
2846
2847
2848
2849
2850
2851 pp = ap->private_data;
2852 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2853
2854
2855
2856 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2857 mv_process_crpb_entries(ap, pp);
2858 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2859 mv_handle_fbs_ncq_dev_err(ap);
2860 }
2861
2862
2863
2864 if (unlikely(port_cause & ERR_IRQ)) {
2865 mv_err_intr(ap);
2866 } else if (!edma_was_enabled) {
2867 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2868 if (qc)
2869 ata_bmdma_port_intr(ap, qc);
2870 else
2871 mv_unexpected_intr(ap, edma_was_enabled);
2872 }
2873}
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2884{
2885 struct mv_host_priv *hpriv = host->private_data;
2886 void __iomem *mmio = hpriv->base, *hc_mmio;
2887 unsigned int handled = 0, port;
2888
2889
2890 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2891 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2892
2893 for (port = 0; port < hpriv->n_ports; port++) {
2894 struct ata_port *ap = host->ports[port];
2895 unsigned int p, shift, hardport, port_cause;
2896
2897 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2898
2899
2900
2901
2902 if (hardport == 0) {
2903 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2904 u32 port_mask, ack_irqs;
2905
2906
2907
2908 if (!hc_cause) {
2909 port += MV_PORTS_PER_HC - 1;
2910 continue;
2911 }
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924 ack_irqs = 0;
2925 if (hc_cause & PORTS_0_3_COAL_DONE)
2926 ack_irqs = HC_COAL_IRQ;
2927 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2928 if ((port + p) >= hpriv->n_ports)
2929 break;
2930 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2931 if (hc_cause & port_mask)
2932 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2933 }
2934 hc_mmio = mv_hc_base_from_port(mmio, port);
2935 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2936 handled = 1;
2937 }
2938
2939
2940
2941 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2942 if (port_cause)
2943 mv_port_intr(ap, port_cause);
2944 }
2945 return handled;
2946}
2947
2948static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2949{
2950 struct mv_host_priv *hpriv = host->private_data;
2951 struct ata_port *ap;
2952 struct ata_queued_cmd *qc;
2953 struct ata_eh_info *ehi;
2954 unsigned int i, err_mask, printed = 0;
2955 u32 err_cause;
2956
2957 err_cause = readl(mmio + hpriv->irq_cause_offset);
2958
2959 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2960
2961 DPRINTK("All regs @ PCI error\n");
2962 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2963
2964 writelfl(0, mmio + hpriv->irq_cause_offset);
2965
2966 for (i = 0; i < host->n_ports; i++) {
2967 ap = host->ports[i];
2968 if (!ata_link_offline(&ap->link)) {
2969 ehi = &ap->link.eh_info;
2970 ata_ehi_clear_desc(ehi);
2971 if (!printed++)
2972 ata_ehi_push_desc(ehi,
2973 "PCI err cause 0x%08x", err_cause);
2974 err_mask = AC_ERR_HOST_BUS;
2975 ehi->action = ATA_EH_RESET;
2976 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2977 if (qc)
2978 qc->err_mask |= err_mask;
2979 else
2980 ehi->err_mask |= err_mask;
2981
2982 ata_port_freeze(ap);
2983 }
2984 }
2985 return 1;
2986}
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3003{
3004 struct ata_host *host = dev_instance;
3005 struct mv_host_priv *hpriv = host->private_data;
3006 unsigned int handled = 0;
3007 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3008 u32 main_irq_cause, pending_irqs;
3009
3010 spin_lock(&host->lock);
3011
3012
3013 if (using_msi)
3014 mv_write_main_irq_mask(0, hpriv);
3015
3016 main_irq_cause = readl(hpriv->main_irq_cause_addr);
3017 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
3018
3019
3020
3021
3022 if (pending_irqs && main_irq_cause != 0xffffffffU) {
3023 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3024 handled = mv_pci_error(host, hpriv->base);
3025 else
3026 handled = mv_host_intr(host, pending_irqs);
3027 }
3028
3029
3030 if (using_msi)
3031 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3032
3033 spin_unlock(&host->lock);
3034
3035 return IRQ_RETVAL(handled);
3036}
3037
3038static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3039{
3040 unsigned int ofs;
3041
3042 switch (sc_reg_in) {
3043 case SCR_STATUS:
3044 case SCR_ERROR:
3045 case SCR_CONTROL:
3046 ofs = sc_reg_in * sizeof(u32);
3047 break;
3048 default:
3049 ofs = 0xffffffffU;
3050 break;
3051 }
3052 return ofs;
3053}
3054
3055static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3056{
3057 struct mv_host_priv *hpriv = link->ap->host->private_data;
3058 void __iomem *mmio = hpriv->base;
3059 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3060 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3061
3062 if (ofs != 0xffffffffU) {
3063 *val = readl(addr + ofs);
3064 return 0;
3065 } else
3066 return -EINVAL;
3067}
3068
3069static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3070{
3071 struct mv_host_priv *hpriv = link->ap->host->private_data;
3072 void __iomem *mmio = hpriv->base;
3073 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3074 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3075
3076 if (ofs != 0xffffffffU) {
3077 writelfl(val, addr + ofs);
3078 return 0;
3079 } else
3080 return -EINVAL;
3081}
3082
3083static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3084{
3085 struct pci_dev *pdev = to_pci_dev(host->dev);
3086 int early_5080;
3087
3088 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3089
3090 if (!early_5080) {
3091 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3092 tmp |= (1 << 0);
3093 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3094 }
3095
3096 mv_reset_pci_bus(host, mmio);
3097}
3098
3099static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3100{
3101 writel(0x0fcfffff, mmio + FLASH_CTL);
3102}
3103
3104static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3105 void __iomem *mmio)
3106{
3107 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3108 u32 tmp;
3109
3110 tmp = readl(phy_mmio + MV5_PHY_MODE);
3111
3112 hpriv->signal[idx].pre = tmp & 0x1800;
3113 hpriv->signal[idx].amps = tmp & 0xe0;
3114}
3115
3116static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3117{
3118 u32 tmp;
3119
3120 writel(0, mmio + GPIO_PORT_CTL);
3121
3122
3123
3124 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3125 tmp |= ~(1 << 0);
3126 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3127}
3128
3129static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3130 unsigned int port)
3131{
3132 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3133 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3134 u32 tmp;
3135 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3136
3137 if (fix_apm_sq) {
3138 tmp = readl(phy_mmio + MV5_LTMODE);
3139 tmp |= (1 << 19);
3140 writel(tmp, phy_mmio + MV5_LTMODE);
3141
3142 tmp = readl(phy_mmio + MV5_PHY_CTL);
3143 tmp &= ~0x3;
3144 tmp |= 0x1;
3145 writel(tmp, phy_mmio + MV5_PHY_CTL);
3146 }
3147
3148 tmp = readl(phy_mmio + MV5_PHY_MODE);
3149 tmp &= ~mask;
3150 tmp |= hpriv->signal[port].pre;
3151 tmp |= hpriv->signal[port].amps;
3152 writel(tmp, phy_mmio + MV5_PHY_MODE);
3153}
3154
3155
3156#undef ZERO
3157#define ZERO(reg) writel(0, port_mmio + (reg))
3158static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3159 unsigned int port)
3160{
3161 void __iomem *port_mmio = mv_port_base(mmio, port);
3162
3163 mv_reset_channel(hpriv, mmio, port);
3164
3165 ZERO(0x028);
3166 writel(0x11f, port_mmio + EDMA_CFG);
3167 ZERO(0x004);
3168 ZERO(0x008);
3169 ZERO(0x00c);
3170 ZERO(0x010);
3171 ZERO(0x014);
3172 ZERO(0x018);
3173 ZERO(0x01c);
3174 ZERO(0x024);
3175 ZERO(0x020);
3176 ZERO(0x02c);
3177 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3178}
3179#undef ZERO
3180
3181#define ZERO(reg) writel(0, hc_mmio + (reg))
3182static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3183 unsigned int hc)
3184{
3185 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3186 u32 tmp;
3187
3188 ZERO(0x00c);
3189 ZERO(0x010);
3190 ZERO(0x014);
3191 ZERO(0x018);
3192
3193 tmp = readl(hc_mmio + 0x20);
3194 tmp &= 0x1c1c1c1c;
3195 tmp |= 0x03030303;
3196 writel(tmp, hc_mmio + 0x20);
3197}
3198#undef ZERO
3199
3200static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3201 unsigned int n_hc)
3202{
3203 unsigned int hc, port;
3204
3205 for (hc = 0; hc < n_hc; hc++) {
3206 for (port = 0; port < MV_PORTS_PER_HC; port++)
3207 mv5_reset_hc_port(hpriv, mmio,
3208 (hc * MV_PORTS_PER_HC) + port);
3209
3210 mv5_reset_one_hc(hpriv, mmio, hc);
3211 }
3212
3213 return 0;
3214}
3215
3216#undef ZERO
3217#define ZERO(reg) writel(0, mmio + (reg))
3218static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3219{
3220 struct mv_host_priv *hpriv = host->private_data;
3221 u32 tmp;
3222
3223 tmp = readl(mmio + MV_PCI_MODE);
3224 tmp &= 0xff00ffff;
3225 writel(tmp, mmio + MV_PCI_MODE);
3226
3227 ZERO(MV_PCI_DISC_TIMER);
3228 ZERO(MV_PCI_MSI_TRIGGER);
3229 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3230 ZERO(MV_PCI_SERR_MASK);
3231 ZERO(hpriv->irq_cause_offset);
3232 ZERO(hpriv->irq_mask_offset);
3233 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3234 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3235 ZERO(MV_PCI_ERR_ATTRIBUTE);
3236 ZERO(MV_PCI_ERR_COMMAND);
3237}
3238#undef ZERO
3239
3240static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3241{
3242 u32 tmp;
3243
3244 mv5_reset_flash(hpriv, mmio);
3245
3246 tmp = readl(mmio + GPIO_PORT_CTL);
3247 tmp &= 0x3;
3248 tmp |= (1 << 5) | (1 << 6);
3249 writel(tmp, mmio + GPIO_PORT_CTL);
3250}
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3262 unsigned int n_hc)
3263{
3264 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3265 int i, rc = 0;
3266 u32 t;
3267
3268
3269
3270
3271 t = readl(reg);
3272 writel(t | STOP_PCI_MASTER, reg);
3273
3274 for (i = 0; i < 1000; i++) {
3275 udelay(1);
3276 t = readl(reg);
3277 if (PCI_MASTER_EMPTY & t)
3278 break;
3279 }
3280 if (!(PCI_MASTER_EMPTY & t)) {
3281 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3282 rc = 1;
3283 goto done;
3284 }
3285
3286
3287 i = 5;
3288 do {
3289 writel(t | GLOB_SFT_RST, reg);
3290 t = readl(reg);
3291 udelay(1);
3292 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3293
3294 if (!(GLOB_SFT_RST & t)) {
3295 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3296 rc = 1;
3297 goto done;
3298 }
3299
3300
3301 i = 5;
3302 do {
3303 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3304 t = readl(reg);
3305 udelay(1);
3306 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3307
3308 if (GLOB_SFT_RST & t) {
3309 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3310 rc = 1;
3311 }
3312done:
3313 return rc;
3314}
3315
3316static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3317 void __iomem *mmio)
3318{
3319 void __iomem *port_mmio;
3320 u32 tmp;
3321
3322 tmp = readl(mmio + RESET_CFG);
3323 if ((tmp & (1 << 0)) == 0) {
3324 hpriv->signal[idx].amps = 0x7 << 8;
3325 hpriv->signal[idx].pre = 0x1 << 5;
3326 return;
3327 }
3328
3329 port_mmio = mv_port_base(mmio, idx);
3330 tmp = readl(port_mmio + PHY_MODE2);
3331
3332 hpriv->signal[idx].amps = tmp & 0x700;
3333 hpriv->signal[idx].pre = tmp & 0xe0;
3334}
3335
3336static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3337{
3338 writel(0x00000060, mmio + GPIO_PORT_CTL);
3339}
3340
3341static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3342 unsigned int port)
3343{
3344 void __iomem *port_mmio = mv_port_base(mmio, port);
3345
3346 u32 hp_flags = hpriv->hp_flags;
3347 int fix_phy_mode2 =
3348 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3349 int fix_phy_mode4 =
3350 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3351 u32 m2, m3;
3352
3353 if (fix_phy_mode2) {
3354 m2 = readl(port_mmio + PHY_MODE2);
3355 m2 &= ~(1 << 16);
3356 m2 |= (1 << 31);
3357 writel(m2, port_mmio + PHY_MODE2);
3358
3359 udelay(200);
3360
3361 m2 = readl(port_mmio + PHY_MODE2);
3362 m2 &= ~((1 << 16) | (1 << 31));
3363 writel(m2, port_mmio + PHY_MODE2);
3364
3365 udelay(200);
3366 }
3367
3368
3369
3370
3371
3372 m3 = readl(port_mmio + PHY_MODE3);
3373 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3374
3375
3376 if (IS_SOC(hpriv))
3377 m3 &= ~0x1c;
3378
3379 if (fix_phy_mode4) {
3380 u32 m4 = readl(port_mmio + PHY_MODE4);
3381
3382
3383
3384
3385
3386 if (IS_GEN_IIE(hpriv))
3387 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3388 else
3389 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3390 writel(m4, port_mmio + PHY_MODE4);
3391 }
3392
3393
3394
3395
3396
3397
3398 writel(m3, port_mmio + PHY_MODE3);
3399
3400
3401 m2 = readl(port_mmio + PHY_MODE2);
3402
3403 m2 &= ~MV_M2_PREAMP_MASK;
3404 m2 |= hpriv->signal[port].amps;
3405 m2 |= hpriv->signal[port].pre;
3406 m2 &= ~(1 << 16);
3407
3408
3409 if (IS_GEN_IIE(hpriv)) {
3410 m2 &= ~0xC30FF01F;
3411 m2 |= 0x0000900F;
3412 }
3413
3414 writel(m2, port_mmio + PHY_MODE2);
3415}
3416
3417
3418
3419static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3420 void __iomem *mmio)
3421{
3422 return;
3423}
3424
3425static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3426 void __iomem *mmio)
3427{
3428 void __iomem *port_mmio;
3429 u32 tmp;
3430
3431 port_mmio = mv_port_base(mmio, idx);
3432 tmp = readl(port_mmio + PHY_MODE2);
3433
3434 hpriv->signal[idx].amps = tmp & 0x700;
3435 hpriv->signal[idx].pre = tmp & 0xe0;
3436}
3437
3438#undef ZERO
3439#define ZERO(reg) writel(0, port_mmio + (reg))
3440static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3441 void __iomem *mmio, unsigned int port)
3442{
3443 void __iomem *port_mmio = mv_port_base(mmio, port);
3444
3445 mv_reset_channel(hpriv, mmio, port);
3446
3447 ZERO(0x028);
3448 writel(0x101f, port_mmio + EDMA_CFG);
3449 ZERO(0x004);
3450 ZERO(0x008);
3451 ZERO(0x00c);
3452 ZERO(0x010);
3453 ZERO(0x014);
3454 ZERO(0x018);
3455 ZERO(0x01c);
3456 ZERO(0x024);
3457 ZERO(0x020);
3458 ZERO(0x02c);
3459 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3460}
3461
3462#undef ZERO
3463
3464#define ZERO(reg) writel(0, hc_mmio + (reg))
3465static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3466 void __iomem *mmio)
3467{
3468 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3469
3470 ZERO(0x00c);
3471 ZERO(0x010);
3472 ZERO(0x014);
3473
3474}
3475
3476#undef ZERO
3477
3478static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3479 void __iomem *mmio, unsigned int n_hc)
3480{
3481 unsigned int port;
3482
3483 for (port = 0; port < hpriv->n_ports; port++)
3484 mv_soc_reset_hc_port(hpriv, mmio, port);
3485
3486 mv_soc_reset_one_hc(hpriv, mmio);
3487
3488 return 0;
3489}
3490
3491static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3492 void __iomem *mmio)
3493{
3494 return;
3495}
3496
3497static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3498{
3499 return;
3500}
3501
3502static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3503 void __iomem *mmio, unsigned int port)
3504{
3505 void __iomem *port_mmio = mv_port_base(mmio, port);
3506 u32 reg;
3507
3508 reg = readl(port_mmio + PHY_MODE3);
3509 reg &= ~(0x3 << 27);
3510 reg |= (0x1 << 27);
3511 reg &= ~(0x3 << 29);
3512 reg |= (0x1 << 29);
3513 writel(reg, port_mmio + PHY_MODE3);
3514
3515 reg = readl(port_mmio + PHY_MODE4);
3516 reg &= ~0x1;
3517 reg |= (0x1 << 16);
3518 writel(reg, port_mmio + PHY_MODE4);
3519
3520 reg = readl(port_mmio + PHY_MODE9_GEN2);
3521 reg &= ~0xf;
3522 reg |= 0x8;
3523 reg &= ~(0x1 << 14);
3524 writel(reg, port_mmio + PHY_MODE9_GEN2);
3525
3526 reg = readl(port_mmio + PHY_MODE9_GEN1);
3527 reg &= ~0xf;
3528 reg |= 0x8;
3529 reg &= ~(0x1 << 14);
3530 writel(reg, port_mmio + PHY_MODE9_GEN1);
3531}
3532
3533
3534
3535
3536
3537
3538
3539
3540static bool soc_is_65n(struct mv_host_priv *hpriv)
3541{
3542 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3543
3544 if (readl(port0_mmio + PHYCFG_OFS))
3545 return true;
3546 return false;
3547}
3548
3549static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3550{
3551 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3552
3553 ifcfg = (ifcfg & 0xf7f) | 0x9b1000;
3554 if (want_gen2i)
3555 ifcfg |= (1 << 7);
3556 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3557}
3558
3559static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3560 unsigned int port_no)
3561{
3562 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3563
3564
3565
3566
3567
3568
3569 mv_stop_edma_engine(port_mmio);
3570 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3571
3572 if (!IS_GEN_I(hpriv)) {
3573
3574 mv_setup_ifcfg(port_mmio, 1);
3575 }
3576
3577
3578
3579
3580
3581 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3582 udelay(25);
3583 writelfl(0, port_mmio + EDMA_CMD);
3584
3585 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3586
3587 if (IS_GEN_I(hpriv))
3588 usleep_range(500, 1000);
3589}
3590
3591static void mv_pmp_select(struct ata_port *ap, int pmp)
3592{
3593 if (sata_pmp_supported(ap)) {
3594 void __iomem *port_mmio = mv_ap_base(ap);
3595 u32 reg = readl(port_mmio + SATA_IFCTL);
3596 int old = reg & 0xf;
3597
3598 if (old != pmp) {
3599 reg = (reg & ~0xf) | pmp;
3600 writelfl(reg, port_mmio + SATA_IFCTL);
3601 }
3602 }
3603}
3604
3605static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3606 unsigned long deadline)
3607{
3608 mv_pmp_select(link->ap, sata_srst_pmp(link));
3609 return sata_std_hardreset(link, class, deadline);
3610}
3611
3612static int mv_softreset(struct ata_link *link, unsigned int *class,
3613 unsigned long deadline)
3614{
3615 mv_pmp_select(link->ap, sata_srst_pmp(link));
3616 return ata_sff_softreset(link, class, deadline);
3617}
3618
3619static int mv_hardreset(struct ata_link *link, unsigned int *class,
3620 unsigned long deadline)
3621{
3622 struct ata_port *ap = link->ap;
3623 struct mv_host_priv *hpriv = ap->host->private_data;
3624 struct mv_port_priv *pp = ap->private_data;
3625 void __iomem *mmio = hpriv->base;
3626 int rc, attempts = 0, extra = 0;
3627 u32 sstatus;
3628 bool online;
3629
3630 mv_reset_channel(hpriv, mmio, ap->port_no);
3631 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3632 pp->pp_flags &=
3633 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3634
3635
3636 do {
3637 const unsigned long *timing =
3638 sata_ehc_deb_timing(&link->eh_context);
3639
3640 rc = sata_link_hardreset(link, timing, deadline + extra,
3641 &online, NULL);
3642 rc = online ? -EAGAIN : rc;
3643 if (rc)
3644 return rc;
3645 sata_scr_read(link, SCR_STATUS, &sstatus);
3646 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3647
3648 mv_setup_ifcfg(mv_ap_base(ap), 0);
3649 if (time_after(jiffies + HZ, deadline))
3650 extra = HZ;
3651 }
3652 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3653 mv_save_cached_regs(ap);
3654 mv_edma_cfg(ap, 0, 0);
3655
3656 return rc;
3657}
3658
3659static void mv_eh_freeze(struct ata_port *ap)
3660{
3661 mv_stop_edma(ap);
3662 mv_enable_port_irqs(ap, 0);
3663}
3664
3665static void mv_eh_thaw(struct ata_port *ap)
3666{
3667 struct mv_host_priv *hpriv = ap->host->private_data;
3668 unsigned int port = ap->port_no;
3669 unsigned int hardport = mv_hardport_from_port(port);
3670 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3671 void __iomem *port_mmio = mv_ap_base(ap);
3672 u32 hc_irq_cause;
3673
3674
3675 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3676
3677
3678 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3679 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3680
3681 mv_enable_port_irqs(ap, ERR_IRQ);
3682}
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3697{
3698 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3699
3700
3701
3702 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3703 port->error_addr =
3704 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3705 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3706 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3707 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3708 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3709 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3710 port->status_addr =
3711 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3712
3713 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3714
3715
3716 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3717 writelfl(readl(serr), serr);
3718 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3719
3720
3721 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3722
3723 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3724 readl(port_mmio + EDMA_CFG),
3725 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3726 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3727}
3728
3729static unsigned int mv_in_pcix_mode(struct ata_host *host)
3730{
3731 struct mv_host_priv *hpriv = host->private_data;
3732 void __iomem *mmio = hpriv->base;
3733 u32 reg;
3734
3735 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3736 return 0;
3737 reg = readl(mmio + MV_PCI_MODE);
3738 if ((reg & MV_PCI_MODE_MASK) == 0)
3739 return 0;
3740 return 1;
3741}
3742
3743static int mv_pci_cut_through_okay(struct ata_host *host)
3744{
3745 struct mv_host_priv *hpriv = host->private_data;
3746 void __iomem *mmio = hpriv->base;
3747 u32 reg;
3748
3749 if (!mv_in_pcix_mode(host)) {
3750 reg = readl(mmio + MV_PCI_COMMAND);
3751 if (reg & MV_PCI_COMMAND_MRDTRIG)
3752 return 0;
3753 }
3754 return 1;
3755}
3756
3757static void mv_60x1b2_errata_pci7(struct ata_host *host)
3758{
3759 struct mv_host_priv *hpriv = host->private_data;
3760 void __iomem *mmio = hpriv->base;
3761
3762
3763 if (mv_in_pcix_mode(host)) {
3764 u32 reg = readl(mmio + MV_PCI_COMMAND);
3765 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3766 }
3767}
3768
3769static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3770{
3771 struct pci_dev *pdev = to_pci_dev(host->dev);
3772 struct mv_host_priv *hpriv = host->private_data;
3773 u32 hp_flags = hpriv->hp_flags;
3774
3775 switch (board_idx) {
3776 case chip_5080:
3777 hpriv->ops = &mv5xxx_ops;
3778 hp_flags |= MV_HP_GEN_I;
3779
3780 switch (pdev->revision) {
3781 case 0x1:
3782 hp_flags |= MV_HP_ERRATA_50XXB0;
3783 break;
3784 case 0x3:
3785 hp_flags |= MV_HP_ERRATA_50XXB2;
3786 break;
3787 default:
3788 dev_warn(&pdev->dev,
3789 "Applying 50XXB2 workarounds to unknown rev\n");
3790 hp_flags |= MV_HP_ERRATA_50XXB2;
3791 break;
3792 }
3793 break;
3794
3795 case chip_504x:
3796 case chip_508x:
3797 hpriv->ops = &mv5xxx_ops;
3798 hp_flags |= MV_HP_GEN_I;
3799
3800 switch (pdev->revision) {
3801 case 0x0:
3802 hp_flags |= MV_HP_ERRATA_50XXB0;
3803 break;
3804 case 0x3:
3805 hp_flags |= MV_HP_ERRATA_50XXB2;
3806 break;
3807 default:
3808 dev_warn(&pdev->dev,
3809 "Applying B2 workarounds to unknown rev\n");
3810 hp_flags |= MV_HP_ERRATA_50XXB2;
3811 break;
3812 }
3813 break;
3814
3815 case chip_604x:
3816 case chip_608x:
3817 hpriv->ops = &mv6xxx_ops;
3818 hp_flags |= MV_HP_GEN_II;
3819
3820 switch (pdev->revision) {
3821 case 0x7:
3822 mv_60x1b2_errata_pci7(host);
3823 hp_flags |= MV_HP_ERRATA_60X1B2;
3824 break;
3825 case 0x9:
3826 hp_flags |= MV_HP_ERRATA_60X1C0;
3827 break;
3828 default:
3829 dev_warn(&pdev->dev,
3830 "Applying B2 workarounds to unknown rev\n");
3831 hp_flags |= MV_HP_ERRATA_60X1B2;
3832 break;
3833 }
3834 break;
3835
3836 case chip_7042:
3837 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3838 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3839 (pdev->device == 0x2300 || pdev->device == 0x2310))
3840 {
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3859 " BIOS CORRUPTS DATA on all attached drives,"
3860 " regardless of if/how they are configured."
3861 " BEWARE!\n");
3862 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3863 " use sectors 8-9 on \"Legacy\" drives,"
3864 " and avoid the final two gigabytes on"
3865 " all RocketRAID BIOS initialized drives.\n");
3866 }
3867 fallthrough;
3868 case chip_6042:
3869 hpriv->ops = &mv6xxx_ops;
3870 hp_flags |= MV_HP_GEN_IIE;
3871 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3872 hp_flags |= MV_HP_CUT_THROUGH;
3873
3874 switch (pdev->revision) {
3875 case 0x2:
3876 hp_flags |= MV_HP_ERRATA_60X1C0;
3877 break;
3878 default:
3879 dev_warn(&pdev->dev,
3880 "Applying 60X1C0 workarounds to unknown rev\n");
3881 hp_flags |= MV_HP_ERRATA_60X1C0;
3882 break;
3883 }
3884 break;
3885 case chip_soc:
3886 if (soc_is_65n(hpriv))
3887 hpriv->ops = &mv_soc_65n_ops;
3888 else
3889 hpriv->ops = &mv_soc_ops;
3890 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3891 MV_HP_ERRATA_60X1C0;
3892 break;
3893
3894 default:
3895 dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3896 return 1;
3897 }
3898
3899 hpriv->hp_flags = hp_flags;
3900 if (hp_flags & MV_HP_PCIE) {
3901 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3902 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3903 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3904 } else {
3905 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3906 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3907 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3908 }
3909
3910 return 0;
3911}
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923static int mv_init_host(struct ata_host *host)
3924{
3925 int rc = 0, n_hc, port, hc;
3926 struct mv_host_priv *hpriv = host->private_data;
3927 void __iomem *mmio = hpriv->base;
3928
3929 rc = mv_chip_id(host, hpriv->board_idx);
3930 if (rc)
3931 goto done;
3932
3933 if (IS_SOC(hpriv)) {
3934 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3935 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3936 } else {
3937 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3938 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3939 }
3940
3941
3942 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3943
3944
3945 mv_set_main_irq_mask(host, ~0, 0);
3946
3947 n_hc = mv_get_hc_count(host->ports[0]->flags);
3948
3949 for (port = 0; port < host->n_ports; port++)
3950 if (hpriv->ops->read_preamp)
3951 hpriv->ops->read_preamp(hpriv, port, mmio);
3952
3953 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3954 if (rc)
3955 goto done;
3956
3957 hpriv->ops->reset_flash(hpriv, mmio);
3958 hpriv->ops->reset_bus(host, mmio);
3959 hpriv->ops->enable_leds(hpriv, mmio);
3960
3961 for (port = 0; port < host->n_ports; port++) {
3962 struct ata_port *ap = host->ports[port];
3963 void __iomem *port_mmio = mv_port_base(mmio, port);
3964
3965 mv_port_init(&ap->ioaddr, port_mmio);
3966 }
3967
3968 for (hc = 0; hc < n_hc; hc++) {
3969 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3970
3971 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3972 "(before clear)=0x%08x\n", hc,
3973 readl(hc_mmio + HC_CFG),
3974 readl(hc_mmio + HC_IRQ_CAUSE));
3975
3976
3977 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3978 }
3979
3980 if (!IS_SOC(hpriv)) {
3981
3982 writelfl(0, mmio + hpriv->irq_cause_offset);
3983
3984
3985 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3986 }
3987
3988
3989
3990
3991
3992 mv_set_main_irq_mask(host, 0, PCI_ERR);
3993 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3994 irq_coalescing_usecs);
3995done:
3996 return rc;
3997}
3998
3999static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
4000{
4001 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
4002 MV_CRQB_Q_SZ, 0);
4003 if (!hpriv->crqb_pool)
4004 return -ENOMEM;
4005
4006 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4007 MV_CRPB_Q_SZ, 0);
4008 if (!hpriv->crpb_pool)
4009 return -ENOMEM;
4010
4011 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4012 MV_SG_TBL_SZ, 0);
4013 if (!hpriv->sg_tbl_pool)
4014 return -ENOMEM;
4015
4016 return 0;
4017}
4018
4019static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4020 const struct mbus_dram_target_info *dram)
4021{
4022 int i;
4023
4024 for (i = 0; i < 4; i++) {
4025 writel(0, hpriv->base + WINDOW_CTRL(i));
4026 writel(0, hpriv->base + WINDOW_BASE(i));
4027 }
4028
4029 for (i = 0; i < dram->num_cs; i++) {
4030 const struct mbus_dram_window *cs = dram->cs + i;
4031
4032 writel(((cs->size - 1) & 0xffff0000) |
4033 (cs->mbus_attr << 8) |
4034 (dram->mbus_dram_target_id << 4) | 1,
4035 hpriv->base + WINDOW_CTRL(i));
4036 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4037 }
4038}
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048static int mv_platform_probe(struct platform_device *pdev)
4049{
4050 const struct mv_sata_platform_data *mv_platform_data;
4051 const struct mbus_dram_target_info *dram;
4052 const struct ata_port_info *ppi[] =
4053 { &mv_port_info[chip_soc], NULL };
4054 struct ata_host *host;
4055 struct mv_host_priv *hpriv;
4056 struct resource *res;
4057 int n_ports = 0, irq = 0;
4058 int rc;
4059 int port;
4060
4061 ata_print_version_once(&pdev->dev, DRV_VERSION);
4062
4063
4064
4065
4066 if (unlikely(pdev->num_resources != 2)) {
4067 dev_err(&pdev->dev, "invalid number of resources\n");
4068 return -EINVAL;
4069 }
4070
4071
4072
4073
4074 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4075 if (res == NULL)
4076 return -EINVAL;
4077
4078
4079 if (pdev->dev.of_node) {
4080 rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4081 &n_ports);
4082 if (rc) {
4083 dev_err(&pdev->dev,
4084 "error parsing nr-ports property: %d\n", rc);
4085 return rc;
4086 }
4087
4088 if (n_ports <= 0) {
4089 dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4090 n_ports);
4091 return -EINVAL;
4092 }
4093
4094 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4095 } else {
4096 mv_platform_data = dev_get_platdata(&pdev->dev);
4097 n_ports = mv_platform_data->n_ports;
4098 irq = platform_get_irq(pdev, 0);
4099 }
4100
4101 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4102 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4103
4104 if (!host || !hpriv)
4105 return -ENOMEM;
4106 hpriv->port_clks = devm_kcalloc(&pdev->dev,
4107 n_ports, sizeof(struct clk *),
4108 GFP_KERNEL);
4109 if (!hpriv->port_clks)
4110 return -ENOMEM;
4111 hpriv->port_phys = devm_kcalloc(&pdev->dev,
4112 n_ports, sizeof(struct phy *),
4113 GFP_KERNEL);
4114 if (!hpriv->port_phys)
4115 return -ENOMEM;
4116 host->private_data = hpriv;
4117 hpriv->board_idx = chip_soc;
4118
4119 host->iomap = NULL;
4120 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4121 resource_size(res));
4122 if (!hpriv->base)
4123 return -ENOMEM;
4124
4125 hpriv->base -= SATAHC0_REG_BASE;
4126
4127 hpriv->clk = clk_get(&pdev->dev, NULL);
4128 if (IS_ERR(hpriv->clk))
4129 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4130 else
4131 clk_prepare_enable(hpriv->clk);
4132
4133 for (port = 0; port < n_ports; port++) {
4134 char port_number[16];
4135 sprintf(port_number, "%d", port);
4136 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4137 if (!IS_ERR(hpriv->port_clks[port]))
4138 clk_prepare_enable(hpriv->port_clks[port]);
4139
4140 sprintf(port_number, "port%d", port);
4141 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4142 port_number);
4143 if (IS_ERR(hpriv->port_phys[port])) {
4144 rc = PTR_ERR(hpriv->port_phys[port]);
4145 hpriv->port_phys[port] = NULL;
4146 if (rc != -EPROBE_DEFER)
4147 dev_warn(&pdev->dev, "error getting phy %d", rc);
4148
4149
4150 hpriv->n_ports = port;
4151 goto err;
4152 } else
4153 phy_power_on(hpriv->port_phys[port]);
4154 }
4155
4156
4157 hpriv->n_ports = n_ports;
4158
4159
4160
4161
4162 dram = mv_mbus_dram_info();
4163 if (dram)
4164 mv_conf_mbus_windows(hpriv, dram);
4165
4166 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4167 if (rc)
4168 goto err;
4169
4170
4171
4172
4173
4174 if (pdev->dev.of_node &&
4175 of_device_is_compatible(pdev->dev.of_node,
4176 "marvell,armada-370-sata"))
4177 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4178
4179
4180 rc = mv_init_host(host);
4181 if (rc)
4182 goto err;
4183
4184 dev_info(&pdev->dev, "slots %u ports %d\n",
4185 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4186
4187 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4188 if (!rc)
4189 return 0;
4190
4191err:
4192 if (!IS_ERR(hpriv->clk)) {
4193 clk_disable_unprepare(hpriv->clk);
4194 clk_put(hpriv->clk);
4195 }
4196 for (port = 0; port < hpriv->n_ports; port++) {
4197 if (!IS_ERR(hpriv->port_clks[port])) {
4198 clk_disable_unprepare(hpriv->port_clks[port]);
4199 clk_put(hpriv->port_clks[port]);
4200 }
4201 phy_power_off(hpriv->port_phys[port]);
4202 }
4203
4204 return rc;
4205}
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215static int mv_platform_remove(struct platform_device *pdev)
4216{
4217 struct ata_host *host = platform_get_drvdata(pdev);
4218 struct mv_host_priv *hpriv = host->private_data;
4219 int port;
4220 ata_host_detach(host);
4221
4222 if (!IS_ERR(hpriv->clk)) {
4223 clk_disable_unprepare(hpriv->clk);
4224 clk_put(hpriv->clk);
4225 }
4226 for (port = 0; port < host->n_ports; port++) {
4227 if (!IS_ERR(hpriv->port_clks[port])) {
4228 clk_disable_unprepare(hpriv->port_clks[port]);
4229 clk_put(hpriv->port_clks[port]);
4230 }
4231 phy_power_off(hpriv->port_phys[port]);
4232 }
4233 return 0;
4234}
4235
4236#ifdef CONFIG_PM_SLEEP
4237static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4238{
4239 struct ata_host *host = platform_get_drvdata(pdev);
4240 if (host)
4241 return ata_host_suspend(host, state);
4242 else
4243 return 0;
4244}
4245
4246static int mv_platform_resume(struct platform_device *pdev)
4247{
4248 struct ata_host *host = platform_get_drvdata(pdev);
4249 const struct mbus_dram_target_info *dram;
4250 int ret;
4251
4252 if (host) {
4253 struct mv_host_priv *hpriv = host->private_data;
4254
4255
4256
4257
4258 dram = mv_mbus_dram_info();
4259 if (dram)
4260 mv_conf_mbus_windows(hpriv, dram);
4261
4262
4263 ret = mv_init_host(host);
4264 if (ret) {
4265 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4266 return ret;
4267 }
4268 ata_host_resume(host);
4269 }
4270
4271 return 0;
4272}
4273#else
4274#define mv_platform_suspend NULL
4275#define mv_platform_resume NULL
4276#endif
4277
4278#ifdef CONFIG_OF
4279static const struct of_device_id mv_sata_dt_ids[] = {
4280 { .compatible = "marvell,armada-370-sata", },
4281 { .compatible = "marvell,orion-sata", },
4282 {},
4283};
4284MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4285#endif
4286
4287static struct platform_driver mv_platform_driver = {
4288 .probe = mv_platform_probe,
4289 .remove = mv_platform_remove,
4290 .suspend = mv_platform_suspend,
4291 .resume = mv_platform_resume,
4292 .driver = {
4293 .name = DRV_NAME,
4294 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4295 },
4296};
4297
4298
4299#ifdef CONFIG_PCI
4300static int mv_pci_init_one(struct pci_dev *pdev,
4301 const struct pci_device_id *ent);
4302#ifdef CONFIG_PM_SLEEP
4303static int mv_pci_device_resume(struct pci_dev *pdev);
4304#endif
4305
4306
4307static struct pci_driver mv_pci_driver = {
4308 .name = DRV_NAME,
4309 .id_table = mv_pci_tbl,
4310 .probe = mv_pci_init_one,
4311 .remove = ata_pci_remove_one,
4312#ifdef CONFIG_PM_SLEEP
4313 .suspend = ata_pci_device_suspend,
4314 .resume = mv_pci_device_resume,
4315#endif
4316
4317};
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328static void mv_print_info(struct ata_host *host)
4329{
4330 struct pci_dev *pdev = to_pci_dev(host->dev);
4331 struct mv_host_priv *hpriv = host->private_data;
4332 u8 scc;
4333 const char *scc_s, *gen;
4334
4335
4336
4337
4338 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4339 if (scc == 0)
4340 scc_s = "SCSI";
4341 else if (scc == 0x01)
4342 scc_s = "RAID";
4343 else
4344 scc_s = "?";
4345
4346 if (IS_GEN_I(hpriv))
4347 gen = "I";
4348 else if (IS_GEN_II(hpriv))
4349 gen = "II";
4350 else if (IS_GEN_IIE(hpriv))
4351 gen = "IIE";
4352 else
4353 gen = "?";
4354
4355 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4356 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4357 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4358}
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368static int mv_pci_init_one(struct pci_dev *pdev,
4369 const struct pci_device_id *ent)
4370{
4371 unsigned int board_idx = (unsigned int)ent->driver_data;
4372 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4373 struct ata_host *host;
4374 struct mv_host_priv *hpriv;
4375 int n_ports, port, rc;
4376
4377 ata_print_version_once(&pdev->dev, DRV_VERSION);
4378
4379
4380 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4381
4382 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4383 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4384 if (!host || !hpriv)
4385 return -ENOMEM;
4386 host->private_data = hpriv;
4387 hpriv->n_ports = n_ports;
4388 hpriv->board_idx = board_idx;
4389
4390
4391 rc = pcim_enable_device(pdev);
4392 if (rc)
4393 return rc;
4394
4395 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4396 if (rc == -EBUSY)
4397 pcim_pin_device(pdev);
4398 if (rc)
4399 return rc;
4400 host->iomap = pcim_iomap_table(pdev);
4401 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4402
4403 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4404 if (rc) {
4405 dev_err(&pdev->dev, "DMA enable failed\n");
4406 return rc;
4407 }
4408
4409 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4410 if (rc)
4411 return rc;
4412
4413 for (port = 0; port < host->n_ports; port++) {
4414 struct ata_port *ap = host->ports[port];
4415 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4416 unsigned int offset = port_mmio - hpriv->base;
4417
4418 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4419 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4420 }
4421
4422
4423 rc = mv_init_host(host);
4424 if (rc)
4425 return rc;
4426
4427
4428 if (msi && pci_enable_msi(pdev) == 0)
4429 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4430
4431 mv_dump_pci_cfg(pdev, 0x68);
4432 mv_print_info(host);
4433
4434 pci_set_master(pdev);
4435 pci_try_set_mwi(pdev);
4436 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4437 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4438}
4439
4440#ifdef CONFIG_PM_SLEEP
4441static int mv_pci_device_resume(struct pci_dev *pdev)
4442{
4443 struct ata_host *host = pci_get_drvdata(pdev);
4444 int rc;
4445
4446 rc = ata_pci_device_do_resume(pdev);
4447 if (rc)
4448 return rc;
4449
4450
4451 rc = mv_init_host(host);
4452 if (rc)
4453 return rc;
4454
4455 ata_host_resume(host);
4456
4457 return 0;
4458}
4459#endif
4460#endif
4461
4462static int __init mv_init(void)
4463{
4464 int rc = -ENODEV;
4465#ifdef CONFIG_PCI
4466 rc = pci_register_driver(&mv_pci_driver);
4467 if (rc < 0)
4468 return rc;
4469#endif
4470 rc = platform_driver_register(&mv_platform_driver);
4471
4472#ifdef CONFIG_PCI
4473 if (rc < 0)
4474 pci_unregister_driver(&mv_pci_driver);
4475#endif
4476 return rc;
4477}
4478
4479static void __exit mv_exit(void)
4480{
4481#ifdef CONFIG_PCI
4482 pci_unregister_driver(&mv_pci_driver);
4483#endif
4484 platform_driver_unregister(&mv_platform_driver);
4485}
4486
4487MODULE_AUTHOR("Brett Russ");
4488MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4489MODULE_LICENSE("GPL v2");
4490MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4491MODULE_VERSION(DRV_VERSION);
4492MODULE_ALIAS("platform:" DRV_NAME);
4493
4494module_init(mv_init);
4495module_exit(mv_exit);
4496