1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/err.h>
24#include <linux/io.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/of_device.h>
28#include <linux/of_address.h>
29#include <linux/platform_device.h>
30#include <linux/libata.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/export.h>
34#include <linux/gpio.h>
35#include <linux/of_gpio.h>
36
37#include "ahci.h"
38
39#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
40#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
41#define SERDES_CR_CTL 0x80a0
42#define SERDES_CR_ADDR 0x80a1
43#define SERDES_CR_DATA 0x80a2
44#define CR_BUSY 0x0001
45#define CR_START 0x0001
46#define CR_WR_RDN 0x0002
47#define CPHY_TX_INPUT_STS 0x2001
48#define CPHY_RX_INPUT_STS 0x2002
49#define CPHY_SATA_TX_OVERRIDE 0x8000
50#define CPHY_SATA_RX_OVERRIDE 0x4000
51#define CPHY_TX_OVERRIDE 0x2004
52#define CPHY_RX_OVERRIDE 0x2005
53#define SPHY_LANE 0x100
54#define SPHY_HALF_RATE 0x0001
55#define CPHY_SATA_DPLL_MODE 0x0700
56#define CPHY_SATA_DPLL_SHIFT 8
57#define CPHY_SATA_DPLL_RESET (1 << 11)
58#define CPHY_SATA_TX_ATTEN 0x1c00
59#define CPHY_SATA_TX_ATTEN_SHIFT 10
60#define CPHY_PHY_COUNT 6
61#define CPHY_LANE_COUNT 4
62#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
63
64static DEFINE_SPINLOCK(cphy_lock);
65
66
67
68struct phy_lane_info {
69 void __iomem *phy_base;
70 u8 lane_mapping;
71 u8 phy_devs;
72 u8 tx_atten;
73};
74static struct phy_lane_info port_data[CPHY_PORT_COUNT];
75
76static DEFINE_SPINLOCK(sgpio_lock);
77#define SCLOCK 0
78#define SLOAD 1
79#define SDATA 2
80#define SGPIO_PINS 3
81#define SGPIO_PORTS 8
82
83struct ecx_plat_data {
84 u32 n_ports;
85
86 u32 pre_clocks;
87 u32 post_clocks;
88 unsigned sgpio_gpio[SGPIO_PINS];
89 u32 sgpio_pattern;
90 u32 port_to_sgpio[SGPIO_PORTS];
91};
92
93#define SGPIO_SIGNALS 3
94#define ECX_ACTIVITY_BITS 0x300000
95#define ECX_ACTIVITY_SHIFT 0
96#define ECX_LOCATE_BITS 0x80000
97#define ECX_LOCATE_SHIFT 1
98#define ECX_FAULT_BITS 0x400000
99#define ECX_FAULT_SHIFT 2
100static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
101 u32 shift)
102{
103 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
104}
105
106static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
107{
108 if (state & ECX_ACTIVITY_BITS)
109 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110 ECX_ACTIVITY_SHIFT);
111 else
112 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113 ECX_ACTIVITY_SHIFT);
114 if (state & ECX_LOCATE_BITS)
115 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
116 ECX_LOCATE_SHIFT);
117 else
118 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
119 ECX_LOCATE_SHIFT);
120 if (state & ECX_FAULT_BITS)
121 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
122 ECX_FAULT_SHIFT);
123 else
124 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
125 ECX_FAULT_SHIFT);
126}
127
128
129
130
131
132static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
133{
134 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
135 udelay(50);
136 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
137 udelay(50);
138}
139
140static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
141 ssize_t size)
142{
143 struct ahci_host_priv *hpriv = ap->host->private_data;
144 struct ecx_plat_data *pdata = hpriv->plat_data;
145 struct ahci_port_priv *pp = ap->private_data;
146 unsigned long flags;
147 int pmp, i;
148 struct ahci_em_priv *emp;
149 u32 sgpio_out;
150
151
152 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
153 if (pmp < EM_MAX_SLOTS)
154 emp = &pp->em_priv[pmp];
155 else
156 return -EINVAL;
157
158 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
159 return size;
160
161 spin_lock_irqsave(&sgpio_lock, flags);
162 ecx_parse_sgpio(pdata, ap->port_no, state);
163 sgpio_out = pdata->sgpio_pattern;
164 for (i = 0; i < pdata->pre_clocks; i++)
165 ecx_led_cycle_clock(pdata);
166
167 gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
168 ecx_led_cycle_clock(pdata);
169 gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
170
171
172
173
174 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
175 gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
176 sgpio_out >>= 1;
177 ecx_led_cycle_clock(pdata);
178 }
179 for (i = 0; i < pdata->post_clocks; i++)
180 ecx_led_cycle_clock(pdata);
181
182
183 emp->led_state = state;
184
185 spin_unlock_irqrestore(&sgpio_lock, flags);
186 return size;
187}
188
189static void highbank_set_em_messages(struct device *dev,
190 struct ahci_host_priv *hpriv,
191 struct ata_port_info *pi)
192{
193 struct device_node *np = dev->of_node;
194 struct ecx_plat_data *pdata = hpriv->plat_data;
195 int i;
196 int err;
197
198 for (i = 0; i < SGPIO_PINS; i++) {
199 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
200 if (err < 0)
201 return;
202
203 pdata->sgpio_gpio[i] = err;
204 err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
205 if (err) {
206 pr_err("sata_highbank gpio_request %d failed: %d\n",
207 i, err);
208 return;
209 }
210 gpio_direction_output(pdata->sgpio_gpio[i], 1);
211 }
212 of_property_read_u32_array(np, "calxeda,led-order",
213 pdata->port_to_sgpio,
214 pdata->n_ports);
215 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
216 pdata->pre_clocks = 0;
217 if (of_property_read_u32(np, "calxeda,post-clocks",
218 &pdata->post_clocks))
219 pdata->post_clocks = 0;
220
221
222 hpriv->em_loc = 0;
223 hpriv->em_buf_sz = 4;
224 hpriv->em_msg_type = EM_MSG_TYPE_LED;
225 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
226}
227
228static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
229{
230 u32 data;
231 u8 dev = port_data[sata_port].phy_devs;
232 spin_lock(&cphy_lock);
233 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
234 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
235 spin_unlock(&cphy_lock);
236 return data;
237}
238
239static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
240{
241 u8 dev = port_data[sata_port].phy_devs;
242 spin_lock(&cphy_lock);
243 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
244 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
245 spin_unlock(&cphy_lock);
246}
247
248static void combo_phy_wait_for_ready(u8 sata_port)
249{
250 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
251 udelay(5);
252}
253
254static u32 combo_phy_read(u8 sata_port, u32 addr)
255{
256 combo_phy_wait_for_ready(sata_port);
257 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
258 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
259 combo_phy_wait_for_ready(sata_port);
260 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
261}
262
263static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
264{
265 combo_phy_wait_for_ready(sata_port);
266 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
267 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
268 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
269}
270
271static void highbank_cphy_disable_overrides(u8 sata_port)
272{
273 u8 lane = port_data[sata_port].lane_mapping;
274 u32 tmp;
275 if (unlikely(port_data[sata_port].phy_base == NULL))
276 return;
277 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
278 tmp &= ~CPHY_SATA_RX_OVERRIDE;
279 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
280}
281
282static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
283{
284 u8 lane = port_data[sata_port].lane_mapping;
285 u32 tmp;
286
287 if (val & 0x8)
288 return;
289
290 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
291 tmp &= ~CPHY_SATA_TX_OVERRIDE;
292 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
293
294 tmp |= CPHY_SATA_TX_OVERRIDE;
295 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
296
297 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
298 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
299}
300
301static void cphy_override_rx_mode(u8 sata_port, u32 val)
302{
303 u8 lane = port_data[sata_port].lane_mapping;
304 u32 tmp;
305 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
306 tmp &= ~CPHY_SATA_RX_OVERRIDE;
307 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
308
309 tmp |= CPHY_SATA_RX_OVERRIDE;
310 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
311
312 tmp &= ~CPHY_SATA_DPLL_MODE;
313 tmp |= val << CPHY_SATA_DPLL_SHIFT;
314 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
315
316 tmp |= CPHY_SATA_DPLL_RESET;
317 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
318
319 tmp &= ~CPHY_SATA_DPLL_RESET;
320 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
321
322 msleep(15);
323}
324
325static void highbank_cphy_override_lane(u8 sata_port)
326{
327 u8 lane = port_data[sata_port].lane_mapping;
328 u32 tmp, k = 0;
329
330 if (unlikely(port_data[sata_port].phy_base == NULL))
331 return;
332 do {
333 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
334 lane * SPHY_LANE);
335 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
336 cphy_override_rx_mode(sata_port, 3);
337 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
338}
339
340static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
341{
342 struct device_node *sata_node = dev->of_node;
343 int phy_count = 0, phy, port = 0, i;
344 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
345 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
346 u32 tx_atten[CPHY_PORT_COUNT] = {};
347
348 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
349
350 do {
351 u32 tmp;
352 struct of_phandle_args phy_data;
353 if (of_parse_phandle_with_args(sata_node,
354 "calxeda,port-phys", "#phy-cells",
355 port, &phy_data))
356 break;
357 for (phy = 0; phy < phy_count; phy++) {
358 if (phy_nodes[phy] == phy_data.np)
359 break;
360 }
361 if (phy_nodes[phy] == NULL) {
362 phy_nodes[phy] = phy_data.np;
363 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
364 if (cphy_base[phy] == NULL) {
365 return 0;
366 }
367 phy_count += 1;
368 }
369 port_data[port].lane_mapping = phy_data.args[0];
370 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
371 port_data[port].phy_devs = tmp;
372 port_data[port].phy_base = cphy_base[phy];
373 of_node_put(phy_data.np);
374 port += 1;
375 } while (port < CPHY_PORT_COUNT);
376 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
377 tx_atten, port);
378 for (i = 0; i < port; i++)
379 port_data[i].tx_atten = (u8) tx_atten[i];
380 return 0;
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
400 unsigned long deadline)
401{
402 static const unsigned long timing[] = { 5, 100, 500};
403 struct ata_port *ap = link->ap;
404 struct ahci_port_priv *pp = ap->private_data;
405 struct ahci_host_priv *hpriv = ap->host->private_data;
406 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
407 struct ata_taskfile tf;
408 bool online;
409 u32 sstatus;
410 int rc;
411 int retry = 100;
412
413 hpriv->stop_engine(ap);
414
415
416 ata_tf_init(link->device, &tf);
417 tf.command = ATA_BUSY;
418 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
419
420 do {
421 highbank_cphy_disable_overrides(link->ap->port_no);
422 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
423 highbank_cphy_override_lane(link->ap->port_no);
424
425
426
427
428 if (sata_scr_read(link, SCR_STATUS, &sstatus))
429 break;
430 if (!(sstatus & 0x3))
431 break;
432 } while (!online && retry--);
433
434 hpriv->start_engine(ap);
435
436 if (online)
437 *class = ahci_dev_classify(ap);
438
439 return rc;
440}
441
442static struct ata_port_operations ahci_highbank_ops = {
443 .inherits = &ahci_ops,
444 .hardreset = ahci_highbank_hardreset,
445 .transmit_led_message = ecx_transmit_led_message,
446};
447
448static const struct ata_port_info ahci_highbank_port_info = {
449 .flags = AHCI_FLAG_COMMON,
450 .pio_mask = ATA_PIO4,
451 .udma_mask = ATA_UDMA6,
452 .port_ops = &ahci_highbank_ops,
453};
454
455static struct scsi_host_template ahci_highbank_platform_sht = {
456 AHCI_SHT("sata_highbank"),
457};
458
459static const struct of_device_id ahci_of_match[] = {
460 { .compatible = "calxeda,hb-ahci" },
461 {},
462};
463MODULE_DEVICE_TABLE(of, ahci_of_match);
464
465static int ahci_highbank_probe(struct platform_device *pdev)
466{
467 struct device *dev = &pdev->dev;
468 struct ahci_host_priv *hpriv;
469 struct ecx_plat_data *pdata;
470 struct ata_host *host;
471 struct resource *mem;
472 int irq;
473 int i;
474 int rc;
475 u32 n_ports;
476 struct ata_port_info pi = ahci_highbank_port_info;
477 const struct ata_port_info *ppi[] = { &pi, NULL };
478
479 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
480 if (!mem) {
481 dev_err(dev, "no mmio space\n");
482 return -EINVAL;
483 }
484
485 irq = platform_get_irq(pdev, 0);
486 if (irq <= 0) {
487 dev_err(dev, "no irq\n");
488 return -EINVAL;
489 }
490
491 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
492 if (!hpriv) {
493 dev_err(dev, "can't alloc ahci_host_priv\n");
494 return -ENOMEM;
495 }
496 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
497 if (!pdata) {
498 dev_err(dev, "can't alloc ecx_plat_data\n");
499 return -ENOMEM;
500 }
501
502 hpriv->irq = irq;
503 hpriv->flags |= (unsigned long)pi.private_data;
504
505 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
506 if (!hpriv->mmio) {
507 dev_err(dev, "can't map %pR\n", mem);
508 return -ENOMEM;
509 }
510
511 rc = highbank_initialize_phys(dev, hpriv->mmio);
512 if (rc)
513 return rc;
514
515
516 ahci_save_initial_config(dev, hpriv);
517
518
519 if (hpriv->cap & HOST_CAP_NCQ)
520 pi.flags |= ATA_FLAG_NCQ;
521
522 if (hpriv->cap & HOST_CAP_PMP)
523 pi.flags |= ATA_FLAG_PMP;
524
525 if (hpriv->cap & HOST_CAP_64)
526 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
527
528
529
530
531
532
533 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
534
535 pdata->n_ports = n_ports;
536 hpriv->plat_data = pdata;
537 highbank_set_em_messages(dev, hpriv, &pi);
538
539 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
540 if (!host) {
541 rc = -ENOMEM;
542 goto err0;
543 }
544
545 host->private_data = hpriv;
546
547 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
548 host->flags |= ATA_HOST_PARALLEL_SCAN;
549
550 for (i = 0; i < host->n_ports; i++) {
551 struct ata_port *ap = host->ports[i];
552
553 ata_port_desc(ap, "mmio %pR", mem);
554 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
555
556
557 if (ap->flags & ATA_FLAG_EM)
558 ap->em_message_type = hpriv->em_msg_type;
559
560
561 if (!(hpriv->port_map & (1 << i)))
562 ap->ops = &ata_dummy_port_ops;
563 }
564
565 rc = ahci_reset_controller(host);
566 if (rc)
567 goto err0;
568
569 ahci_init_controller(host);
570 ahci_print_info(host, "platform");
571
572 rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
573 if (rc)
574 goto err0;
575
576 return 0;
577err0:
578 return rc;
579}
580
581#ifdef CONFIG_PM_SLEEP
582static int ahci_highbank_suspend(struct device *dev)
583{
584 struct ata_host *host = dev_get_drvdata(dev);
585 struct ahci_host_priv *hpriv = host->private_data;
586 void __iomem *mmio = hpriv->mmio;
587 u32 ctl;
588 int rc;
589
590 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
591 dev_err(dev, "firmware update required for suspend/resume\n");
592 return -EIO;
593 }
594
595
596
597
598
599
600 ctl = readl(mmio + HOST_CTL);
601 ctl &= ~HOST_IRQ_EN;
602 writel(ctl, mmio + HOST_CTL);
603 readl(mmio + HOST_CTL);
604
605 rc = ata_host_suspend(host, PMSG_SUSPEND);
606 if (rc)
607 return rc;
608
609 return 0;
610}
611
612static int ahci_highbank_resume(struct device *dev)
613{
614 struct ata_host *host = dev_get_drvdata(dev);
615 int rc;
616
617 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
618 rc = ahci_reset_controller(host);
619 if (rc)
620 return rc;
621
622 ahci_init_controller(host);
623 }
624
625 ata_host_resume(host);
626
627 return 0;
628}
629#endif
630
631static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
632 ahci_highbank_suspend, ahci_highbank_resume);
633
634static struct platform_driver ahci_highbank_driver = {
635 .remove = ata_platform_remove_one,
636 .driver = {
637 .name = "highbank-ahci",
638 .of_match_table = ahci_of_match,
639 .pm = &ahci_highbank_pm_ops,
640 },
641 .probe = ahci_highbank_probe,
642};
643
644module_platform_driver(ahci_highbank_driver);
645
646MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
647MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
648MODULE_LICENSE("GPL");
649MODULE_ALIAS("sata:highbank");
650