1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/err.h>
24#include <linux/io.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/of_device.h>
28#include <linux/of_address.h>
29#include <linux/platform_device.h>
30#include <linux/libata.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/export.h>
34#include <linux/gpio/consumer.h>
35
36#include "ahci.h"
37
38#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
39#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
40#define SERDES_CR_CTL 0x80a0
41#define SERDES_CR_ADDR 0x80a1
42#define SERDES_CR_DATA 0x80a2
43#define CR_BUSY 0x0001
44#define CR_START 0x0001
45#define CR_WR_RDN 0x0002
46#define CPHY_TX_INPUT_STS 0x2001
47#define CPHY_RX_INPUT_STS 0x2002
48#define CPHY_SATA_TX_OVERRIDE 0x8000
49#define CPHY_SATA_RX_OVERRIDE 0x4000
50#define CPHY_TX_OVERRIDE 0x2004
51#define CPHY_RX_OVERRIDE 0x2005
52#define SPHY_LANE 0x100
53#define SPHY_HALF_RATE 0x0001
54#define CPHY_SATA_DPLL_MODE 0x0700
55#define CPHY_SATA_DPLL_SHIFT 8
56#define CPHY_SATA_DPLL_RESET (1 << 11)
57#define CPHY_SATA_TX_ATTEN 0x1c00
58#define CPHY_SATA_TX_ATTEN_SHIFT 10
59#define CPHY_PHY_COUNT 6
60#define CPHY_LANE_COUNT 4
61#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
62
63static DEFINE_SPINLOCK(cphy_lock);
64
65
66
67struct phy_lane_info {
68 void __iomem *phy_base;
69 u8 lane_mapping;
70 u8 phy_devs;
71 u8 tx_atten;
72};
73static struct phy_lane_info port_data[CPHY_PORT_COUNT];
74
75static DEFINE_SPINLOCK(sgpio_lock);
76#define SCLOCK 0
77#define SLOAD 1
78#define SDATA 2
79#define SGPIO_PINS 3
80#define SGPIO_PORTS 8
81
82struct ecx_plat_data {
83 u32 n_ports;
84
85 u32 pre_clocks;
86 u32 post_clocks;
87 struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
88 u32 sgpio_pattern;
89 u32 port_to_sgpio[SGPIO_PORTS];
90};
91
92#define SGPIO_SIGNALS 3
93#define ECX_ACTIVITY_BITS 0x300000
94#define ECX_ACTIVITY_SHIFT 0
95#define ECX_LOCATE_BITS 0x80000
96#define ECX_LOCATE_SHIFT 1
97#define ECX_FAULT_BITS 0x400000
98#define ECX_FAULT_SHIFT 2
99static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
100 u32 shift)
101{
102 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
103}
104
105static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
106{
107 if (state & ECX_ACTIVITY_BITS)
108 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
109 ECX_ACTIVITY_SHIFT);
110 else
111 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
112 ECX_ACTIVITY_SHIFT);
113 if (state & ECX_LOCATE_BITS)
114 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
115 ECX_LOCATE_SHIFT);
116 else
117 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
118 ECX_LOCATE_SHIFT);
119 if (state & ECX_FAULT_BITS)
120 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
121 ECX_FAULT_SHIFT);
122 else
123 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
124 ECX_FAULT_SHIFT);
125}
126
127
128
129
130
131static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
132{
133 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
134 udelay(50);
135 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
136 udelay(50);
137}
138
139static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
140 ssize_t size)
141{
142 struct ahci_host_priv *hpriv = ap->host->private_data;
143 struct ecx_plat_data *pdata = hpriv->plat_data;
144 struct ahci_port_priv *pp = ap->private_data;
145 unsigned long flags;
146 int pmp, i;
147 struct ahci_em_priv *emp;
148 u32 sgpio_out;
149
150
151 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
152 if (pmp < EM_MAX_SLOTS)
153 emp = &pp->em_priv[pmp];
154 else
155 return -EINVAL;
156
157 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
158 return size;
159
160 spin_lock_irqsave(&sgpio_lock, flags);
161 ecx_parse_sgpio(pdata, ap->port_no, state);
162 sgpio_out = pdata->sgpio_pattern;
163 for (i = 0; i < pdata->pre_clocks; i++)
164 ecx_led_cycle_clock(pdata);
165
166 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
167 ecx_led_cycle_clock(pdata);
168 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
169
170
171
172
173 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
174 gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
175 sgpio_out >>= 1;
176 ecx_led_cycle_clock(pdata);
177 }
178 for (i = 0; i < pdata->post_clocks; i++)
179 ecx_led_cycle_clock(pdata);
180
181
182 emp->led_state = state;
183
184 spin_unlock_irqrestore(&sgpio_lock, flags);
185 return size;
186}
187
188static void highbank_set_em_messages(struct device *dev,
189 struct ahci_host_priv *hpriv,
190 struct ata_port_info *pi)
191{
192 struct device_node *np = dev->of_node;
193 struct ecx_plat_data *pdata = hpriv->plat_data;
194 int i;
195
196 for (i = 0; i < SGPIO_PINS; i++) {
197 struct gpio_desc *gpiod;
198
199 gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
200 GPIOD_OUT_HIGH);
201 if (IS_ERR(gpiod)) {
202 dev_err(dev, "failed to get GPIO %d\n", i);
203 continue;
204 }
205 gpiod_set_consumer_name(gpiod, "CX SGPIO");
206
207 pdata->sgpio_gpiod[i] = gpiod;
208 }
209 of_property_read_u32_array(np, "calxeda,led-order",
210 pdata->port_to_sgpio,
211 pdata->n_ports);
212 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
213 pdata->pre_clocks = 0;
214 if (of_property_read_u32(np, "calxeda,post-clocks",
215 &pdata->post_clocks))
216 pdata->post_clocks = 0;
217
218
219 hpriv->em_loc = 0;
220 hpriv->em_buf_sz = 4;
221 hpriv->em_msg_type = EM_MSG_TYPE_LED;
222 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
223}
224
225static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
226{
227 u32 data;
228 u8 dev = port_data[sata_port].phy_devs;
229 spin_lock(&cphy_lock);
230 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
231 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
232 spin_unlock(&cphy_lock);
233 return data;
234}
235
236static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
237{
238 u8 dev = port_data[sata_port].phy_devs;
239 spin_lock(&cphy_lock);
240 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
241 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
242 spin_unlock(&cphy_lock);
243}
244
245static void combo_phy_wait_for_ready(u8 sata_port)
246{
247 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
248 udelay(5);
249}
250
251static u32 combo_phy_read(u8 sata_port, u32 addr)
252{
253 combo_phy_wait_for_ready(sata_port);
254 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
255 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
256 combo_phy_wait_for_ready(sata_port);
257 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
258}
259
260static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
261{
262 combo_phy_wait_for_ready(sata_port);
263 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
264 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
265 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
266}
267
268static void highbank_cphy_disable_overrides(u8 sata_port)
269{
270 u8 lane = port_data[sata_port].lane_mapping;
271 u32 tmp;
272 if (unlikely(port_data[sata_port].phy_base == NULL))
273 return;
274 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
275 tmp &= ~CPHY_SATA_RX_OVERRIDE;
276 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
277}
278
279static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
280{
281 u8 lane = port_data[sata_port].lane_mapping;
282 u32 tmp;
283
284 if (val & 0x8)
285 return;
286
287 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
288 tmp &= ~CPHY_SATA_TX_OVERRIDE;
289 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
290
291 tmp |= CPHY_SATA_TX_OVERRIDE;
292 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
293
294 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
295 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
296}
297
298static void cphy_override_rx_mode(u8 sata_port, u32 val)
299{
300 u8 lane = port_data[sata_port].lane_mapping;
301 u32 tmp;
302 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
303 tmp &= ~CPHY_SATA_RX_OVERRIDE;
304 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
305
306 tmp |= CPHY_SATA_RX_OVERRIDE;
307 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
308
309 tmp &= ~CPHY_SATA_DPLL_MODE;
310 tmp |= val << CPHY_SATA_DPLL_SHIFT;
311 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
312
313 tmp |= CPHY_SATA_DPLL_RESET;
314 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
315
316 tmp &= ~CPHY_SATA_DPLL_RESET;
317 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
318
319 msleep(15);
320}
321
322static void highbank_cphy_override_lane(u8 sata_port)
323{
324 u8 lane = port_data[sata_port].lane_mapping;
325 u32 tmp, k = 0;
326
327 if (unlikely(port_data[sata_port].phy_base == NULL))
328 return;
329 do {
330 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
331 lane * SPHY_LANE);
332 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
333 cphy_override_rx_mode(sata_port, 3);
334 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
335}
336
337static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
338{
339 struct device_node *sata_node = dev->of_node;
340 int phy_count = 0, phy, port = 0, i;
341 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
342 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
343 u32 tx_atten[CPHY_PORT_COUNT] = {};
344
345 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
346
347 do {
348 u32 tmp;
349 struct of_phandle_args phy_data;
350 if (of_parse_phandle_with_args(sata_node,
351 "calxeda,port-phys", "#phy-cells",
352 port, &phy_data))
353 break;
354 for (phy = 0; phy < phy_count; phy++) {
355 if (phy_nodes[phy] == phy_data.np)
356 break;
357 }
358 if (phy_nodes[phy] == NULL) {
359 phy_nodes[phy] = phy_data.np;
360 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
361 if (cphy_base[phy] == NULL) {
362 return 0;
363 }
364 phy_count += 1;
365 }
366 port_data[port].lane_mapping = phy_data.args[0];
367 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
368 port_data[port].phy_devs = tmp;
369 port_data[port].phy_base = cphy_base[phy];
370 of_node_put(phy_data.np);
371 port += 1;
372 } while (port < CPHY_PORT_COUNT);
373 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
374 tx_atten, port);
375 for (i = 0; i < port; i++)
376 port_data[i].tx_atten = (u8) tx_atten[i];
377 return 0;
378}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
397 unsigned long deadline)
398{
399 static const unsigned long timing[] = { 5, 100, 500};
400 struct ata_port *ap = link->ap;
401 struct ahci_port_priv *pp = ap->private_data;
402 struct ahci_host_priv *hpriv = ap->host->private_data;
403 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
404 struct ata_taskfile tf;
405 bool online;
406 u32 sstatus;
407 int rc;
408 int retry = 100;
409
410 hpriv->stop_engine(ap);
411
412
413 ata_tf_init(link->device, &tf);
414 tf.command = ATA_BUSY;
415 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
416
417 do {
418 highbank_cphy_disable_overrides(link->ap->port_no);
419 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
420 highbank_cphy_override_lane(link->ap->port_no);
421
422
423
424
425 if (sata_scr_read(link, SCR_STATUS, &sstatus))
426 break;
427 if (!(sstatus & 0x3))
428 break;
429 } while (!online && retry--);
430
431 hpriv->start_engine(ap);
432
433 if (online)
434 *class = ahci_dev_classify(ap);
435
436 return rc;
437}
438
439static struct ata_port_operations ahci_highbank_ops = {
440 .inherits = &ahci_ops,
441 .hardreset = ahci_highbank_hardreset,
442 .transmit_led_message = ecx_transmit_led_message,
443};
444
445static const struct ata_port_info ahci_highbank_port_info = {
446 .flags = AHCI_FLAG_COMMON,
447 .pio_mask = ATA_PIO4,
448 .udma_mask = ATA_UDMA6,
449 .port_ops = &ahci_highbank_ops,
450};
451
452static struct scsi_host_template ahci_highbank_platform_sht = {
453 AHCI_SHT("sata_highbank"),
454};
455
456static const struct of_device_id ahci_of_match[] = {
457 { .compatible = "calxeda,hb-ahci" },
458 {},
459};
460MODULE_DEVICE_TABLE(of, ahci_of_match);
461
462static int ahci_highbank_probe(struct platform_device *pdev)
463{
464 struct device *dev = &pdev->dev;
465 struct ahci_host_priv *hpriv;
466 struct ecx_plat_data *pdata;
467 struct ata_host *host;
468 struct resource *mem;
469 int irq;
470 int i;
471 int rc;
472 u32 n_ports;
473 struct ata_port_info pi = ahci_highbank_port_info;
474 const struct ata_port_info *ppi[] = { &pi, NULL };
475
476 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
477 if (!mem) {
478 dev_err(dev, "no mmio space\n");
479 return -EINVAL;
480 }
481
482 irq = platform_get_irq(pdev, 0);
483 if (irq <= 0) {
484 dev_err(dev, "no irq\n");
485 return -EINVAL;
486 }
487
488 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
489 if (!hpriv) {
490 dev_err(dev, "can't alloc ahci_host_priv\n");
491 return -ENOMEM;
492 }
493 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
494 if (!pdata) {
495 dev_err(dev, "can't alloc ecx_plat_data\n");
496 return -ENOMEM;
497 }
498
499 hpriv->irq = irq;
500 hpriv->flags |= (unsigned long)pi.private_data;
501
502 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
503 if (!hpriv->mmio) {
504 dev_err(dev, "can't map %pR\n", mem);
505 return -ENOMEM;
506 }
507
508 rc = highbank_initialize_phys(dev, hpriv->mmio);
509 if (rc)
510 return rc;
511
512
513 ahci_save_initial_config(dev, hpriv);
514
515
516 if (hpriv->cap & HOST_CAP_NCQ)
517 pi.flags |= ATA_FLAG_NCQ;
518
519 if (hpriv->cap & HOST_CAP_PMP)
520 pi.flags |= ATA_FLAG_PMP;
521
522 if (hpriv->cap & HOST_CAP_64)
523 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
524
525
526
527
528
529
530 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
531
532 pdata->n_ports = n_ports;
533 hpriv->plat_data = pdata;
534 highbank_set_em_messages(dev, hpriv, &pi);
535
536 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
537 if (!host) {
538 rc = -ENOMEM;
539 goto err0;
540 }
541
542 host->private_data = hpriv;
543
544 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
545 host->flags |= ATA_HOST_PARALLEL_SCAN;
546
547 for (i = 0; i < host->n_ports; i++) {
548 struct ata_port *ap = host->ports[i];
549
550 ata_port_desc(ap, "mmio %pR", mem);
551 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
552
553
554 if (ap->flags & ATA_FLAG_EM)
555 ap->em_message_type = hpriv->em_msg_type;
556
557
558 if (!(hpriv->port_map & (1 << i)))
559 ap->ops = &ata_dummy_port_ops;
560 }
561
562 rc = ahci_reset_controller(host);
563 if (rc)
564 goto err0;
565
566 ahci_init_controller(host);
567 ahci_print_info(host, "platform");
568
569 rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
570 if (rc)
571 goto err0;
572
573 return 0;
574err0:
575 return rc;
576}
577
578#ifdef CONFIG_PM_SLEEP
579static int ahci_highbank_suspend(struct device *dev)
580{
581 struct ata_host *host = dev_get_drvdata(dev);
582 struct ahci_host_priv *hpriv = host->private_data;
583 void __iomem *mmio = hpriv->mmio;
584 u32 ctl;
585
586 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
587 dev_err(dev, "firmware update required for suspend/resume\n");
588 return -EIO;
589 }
590
591
592
593
594
595
596 ctl = readl(mmio + HOST_CTL);
597 ctl &= ~HOST_IRQ_EN;
598 writel(ctl, mmio + HOST_CTL);
599 readl(mmio + HOST_CTL);
600
601 return ata_host_suspend(host, PMSG_SUSPEND);
602}
603
604static int ahci_highbank_resume(struct device *dev)
605{
606 struct ata_host *host = dev_get_drvdata(dev);
607 int rc;
608
609 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
610 rc = ahci_reset_controller(host);
611 if (rc)
612 return rc;
613
614 ahci_init_controller(host);
615 }
616
617 ata_host_resume(host);
618
619 return 0;
620}
621#endif
622
623static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
624 ahci_highbank_suspend, ahci_highbank_resume);
625
626static struct platform_driver ahci_highbank_driver = {
627 .remove = ata_platform_remove_one,
628 .driver = {
629 .name = "highbank-ahci",
630 .of_match_table = ahci_of_match,
631 .pm = &ahci_highbank_pm_ops,
632 },
633 .probe = ahci_highbank_probe,
634};
635
636module_platform_driver(ahci_highbank_driver);
637
638MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
639MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
640MODULE_LICENSE("GPL");
641MODULE_ALIAS("sata:highbank");
642