1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/io.h>
26#include <linux/spinlock.h>
27#include <linux/device.h>
28#include <linux/of_device.h>
29#include <linux/of_address.h>
30#include <linux/platform_device.h>
31#include <linux/libata.h>
32#include <linux/ahci_platform.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/export.h>
36#include <linux/gpio.h>
37#include <linux/of_gpio.h>
38
39#include "ahci.h"
40
41#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
42#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
43#define SERDES_CR_CTL 0x80a0
44#define SERDES_CR_ADDR 0x80a1
45#define SERDES_CR_DATA 0x80a2
46#define CR_BUSY 0x0001
47#define CR_START 0x0001
48#define CR_WR_RDN 0x0002
49#define CPHY_TX_INPUT_STS 0x2001
50#define CPHY_RX_INPUT_STS 0x2002
51#define CPHY_SATA_TX_OVERRIDE 0x8000
52#define CPHY_SATA_RX_OVERRIDE 0x4000
53#define CPHY_TX_OVERRIDE 0x2004
54#define CPHY_RX_OVERRIDE 0x2005
55#define SPHY_LANE 0x100
56#define SPHY_HALF_RATE 0x0001
57#define CPHY_SATA_DPLL_MODE 0x0700
58#define CPHY_SATA_DPLL_SHIFT 8
59#define CPHY_SATA_DPLL_RESET (1 << 11)
60#define CPHY_SATA_TX_ATTEN 0x1c00
61#define CPHY_SATA_TX_ATTEN_SHIFT 10
62#define CPHY_PHY_COUNT 6
63#define CPHY_LANE_COUNT 4
64#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
65
66static DEFINE_SPINLOCK(cphy_lock);
67
68
69
70struct phy_lane_info {
71 void __iomem *phy_base;
72 u8 lane_mapping;
73 u8 phy_devs;
74 u8 tx_atten;
75};
76static struct phy_lane_info port_data[CPHY_PORT_COUNT];
77
78static DEFINE_SPINLOCK(sgpio_lock);
79#define SCLOCK 0
80#define SLOAD 1
81#define SDATA 2
82#define SGPIO_PINS 3
83#define SGPIO_PORTS 8
84
85struct ecx_plat_data {
86 u32 n_ports;
87
88 u32 pre_clocks;
89 u32 post_clocks;
90 unsigned sgpio_gpio[SGPIO_PINS];
91 u32 sgpio_pattern;
92 u32 port_to_sgpio[SGPIO_PORTS];
93};
94
95#define SGPIO_SIGNALS 3
96#define ECX_ACTIVITY_BITS 0x300000
97#define ECX_ACTIVITY_SHIFT 0
98#define ECX_LOCATE_BITS 0x80000
99#define ECX_LOCATE_SHIFT 1
100#define ECX_FAULT_BITS 0x400000
101#define ECX_FAULT_SHIFT 2
102static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
103 u32 shift)
104{
105 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
106}
107
108static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
109{
110 if (state & ECX_ACTIVITY_BITS)
111 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
112 ECX_ACTIVITY_SHIFT);
113 else
114 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
115 ECX_ACTIVITY_SHIFT);
116 if (state & ECX_LOCATE_BITS)
117 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
118 ECX_LOCATE_SHIFT);
119 else
120 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
121 ECX_LOCATE_SHIFT);
122 if (state & ECX_FAULT_BITS)
123 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
124 ECX_FAULT_SHIFT);
125 else
126 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
127 ECX_FAULT_SHIFT);
128}
129
130
131
132
133
134static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
135{
136 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
137 udelay(50);
138 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
139 udelay(50);
140}
141
142static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
143 ssize_t size)
144{
145 struct ahci_host_priv *hpriv = ap->host->private_data;
146 struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
147 struct ahci_port_priv *pp = ap->private_data;
148 unsigned long flags;
149 int pmp, i;
150 struct ahci_em_priv *emp;
151 u32 sgpio_out;
152
153
154 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
155 if (pmp < EM_MAX_SLOTS)
156 emp = &pp->em_priv[pmp];
157 else
158 return -EINVAL;
159
160 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
161 return size;
162
163 spin_lock_irqsave(&sgpio_lock, flags);
164 ecx_parse_sgpio(pdata, ap->port_no, state);
165 sgpio_out = pdata->sgpio_pattern;
166 for (i = 0; i < pdata->pre_clocks; i++)
167 ecx_led_cycle_clock(pdata);
168
169 gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
170 ecx_led_cycle_clock(pdata);
171 gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
172
173
174
175
176 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
177 gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
178 sgpio_out >>= 1;
179 ecx_led_cycle_clock(pdata);
180 }
181 for (i = 0; i < pdata->post_clocks; i++)
182 ecx_led_cycle_clock(pdata);
183
184
185 emp->led_state = state;
186
187 spin_unlock_irqrestore(&sgpio_lock, flags);
188 return size;
189}
190
191static void highbank_set_em_messages(struct device *dev,
192 struct ahci_host_priv *hpriv,
193 struct ata_port_info *pi)
194{
195 struct device_node *np = dev->of_node;
196 struct ecx_plat_data *pdata = hpriv->plat_data;
197 int i;
198 int err;
199
200 for (i = 0; i < SGPIO_PINS; i++) {
201 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
202 if (IS_ERR_VALUE(err))
203 return;
204
205 pdata->sgpio_gpio[i] = err;
206 err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
207 if (err) {
208 pr_err("sata_highbank gpio_request %d failed: %d\n",
209 i, err);
210 return;
211 }
212 gpio_direction_output(pdata->sgpio_gpio[i], 1);
213 }
214 of_property_read_u32_array(np, "calxeda,led-order",
215 pdata->port_to_sgpio,
216 pdata->n_ports);
217 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
218 pdata->pre_clocks = 0;
219 if (of_property_read_u32(np, "calxeda,post-clocks",
220 &pdata->post_clocks))
221 pdata->post_clocks = 0;
222
223
224 hpriv->em_loc = 0;
225 hpriv->em_buf_sz = 4;
226 hpriv->em_msg_type = EM_MSG_TYPE_LED;
227 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
228}
229
230static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
231{
232 u32 data;
233 u8 dev = port_data[sata_port].phy_devs;
234 spin_lock(&cphy_lock);
235 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
236 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
237 spin_unlock(&cphy_lock);
238 return data;
239}
240
241static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
242{
243 u8 dev = port_data[sata_port].phy_devs;
244 spin_lock(&cphy_lock);
245 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
246 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
247 spin_unlock(&cphy_lock);
248}
249
250static void combo_phy_wait_for_ready(u8 sata_port)
251{
252 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
253 udelay(5);
254}
255
256static u32 combo_phy_read(u8 sata_port, u32 addr)
257{
258 combo_phy_wait_for_ready(sata_port);
259 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
260 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
261 combo_phy_wait_for_ready(sata_port);
262 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
263}
264
265static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
266{
267 combo_phy_wait_for_ready(sata_port);
268 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
269 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
270 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
271}
272
273static void highbank_cphy_disable_overrides(u8 sata_port)
274{
275 u8 lane = port_data[sata_port].lane_mapping;
276 u32 tmp;
277 if (unlikely(port_data[sata_port].phy_base == NULL))
278 return;
279 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
280 tmp &= ~CPHY_SATA_RX_OVERRIDE;
281 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
282}
283
284static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
285{
286 u8 lane = port_data[sata_port].lane_mapping;
287 u32 tmp;
288
289 if (val & 0x8)
290 return;
291
292 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
293 tmp &= ~CPHY_SATA_TX_OVERRIDE;
294 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
295
296 tmp |= CPHY_SATA_TX_OVERRIDE;
297 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
298
299 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
300 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
301}
302
303static void cphy_override_rx_mode(u8 sata_port, u32 val)
304{
305 u8 lane = port_data[sata_port].lane_mapping;
306 u32 tmp;
307 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
308 tmp &= ~CPHY_SATA_RX_OVERRIDE;
309 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
310
311 tmp |= CPHY_SATA_RX_OVERRIDE;
312 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
313
314 tmp &= ~CPHY_SATA_DPLL_MODE;
315 tmp |= val << CPHY_SATA_DPLL_SHIFT;
316 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
317
318 tmp |= CPHY_SATA_DPLL_RESET;
319 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
320
321 tmp &= ~CPHY_SATA_DPLL_RESET;
322 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
323
324 msleep(15);
325}
326
327static void highbank_cphy_override_lane(u8 sata_port)
328{
329 u8 lane = port_data[sata_port].lane_mapping;
330 u32 tmp, k = 0;
331
332 if (unlikely(port_data[sata_port].phy_base == NULL))
333 return;
334 do {
335 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
336 lane * SPHY_LANE);
337 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
338 cphy_override_rx_mode(sata_port, 3);
339 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
340}
341
342static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
343{
344 struct device_node *sata_node = dev->of_node;
345 int phy_count = 0, phy, port = 0, i;
346 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
347 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
348 u32 tx_atten[CPHY_PORT_COUNT] = {};
349
350 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
351
352 do {
353 u32 tmp;
354 struct of_phandle_args phy_data;
355 if (of_parse_phandle_with_args(sata_node,
356 "calxeda,port-phys", "#phy-cells",
357 port, &phy_data))
358 break;
359 for (phy = 0; phy < phy_count; phy++) {
360 if (phy_nodes[phy] == phy_data.np)
361 break;
362 }
363 if (phy_nodes[phy] == NULL) {
364 phy_nodes[phy] = phy_data.np;
365 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
366 if (cphy_base[phy] == NULL) {
367 return 0;
368 }
369 phy_count += 1;
370 }
371 port_data[port].lane_mapping = phy_data.args[0];
372 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
373 port_data[port].phy_devs = tmp;
374 port_data[port].phy_base = cphy_base[phy];
375 of_node_put(phy_data.np);
376 port += 1;
377 } while (port < CPHY_PORT_COUNT);
378 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
379 tx_atten, port);
380 for (i = 0; i < port; i++)
381 port_data[i].tx_atten = (u8) tx_atten[i];
382 return 0;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
402 unsigned long deadline)
403{
404 static const unsigned long timing[] = { 5, 100, 500};
405 struct ata_port *ap = link->ap;
406 struct ahci_port_priv *pp = ap->private_data;
407 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
408 struct ata_taskfile tf;
409 bool online;
410 u32 sstatus;
411 int rc;
412 int retry = 100;
413
414 ahci_stop_engine(ap);
415
416
417 ata_tf_init(link->device, &tf);
418 tf.command = ATA_BUSY;
419 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
420
421 do {
422 highbank_cphy_disable_overrides(link->ap->port_no);
423 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
424 highbank_cphy_override_lane(link->ap->port_no);
425
426
427
428
429 if (sata_scr_read(link, SCR_STATUS, &sstatus))
430 break;
431 if (!(sstatus & 0x3))
432 break;
433 } while (!online && retry--);
434
435 ahci_start_engine(ap);
436
437 if (online)
438 *class = ahci_dev_classify(ap);
439
440 return rc;
441}
442
443static struct ata_port_operations ahci_highbank_ops = {
444 .inherits = &ahci_ops,
445 .hardreset = ahci_highbank_hardreset,
446 .transmit_led_message = ecx_transmit_led_message,
447};
448
449static const struct ata_port_info ahci_highbank_port_info = {
450 .flags = AHCI_FLAG_COMMON,
451 .pio_mask = ATA_PIO4,
452 .udma_mask = ATA_UDMA6,
453 .port_ops = &ahci_highbank_ops,
454};
455
456static struct scsi_host_template ahci_highbank_platform_sht = {
457 AHCI_SHT("sata_highbank"),
458};
459
460static const struct of_device_id ahci_of_match[] = {
461 { .compatible = "calxeda,hb-ahci" },
462 {},
463};
464MODULE_DEVICE_TABLE(of, ahci_of_match);
465
466static int ahci_highbank_probe(struct platform_device *pdev)
467{
468 struct device *dev = &pdev->dev;
469 struct ahci_host_priv *hpriv;
470 struct ecx_plat_data *pdata;
471 struct ata_host *host;
472 struct resource *mem;
473 int irq;
474 int i;
475 int rc;
476 u32 n_ports;
477 struct ata_port_info pi = ahci_highbank_port_info;
478 const struct ata_port_info *ppi[] = { &pi, NULL };
479
480 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
481 if (!mem) {
482 dev_err(dev, "no mmio space\n");
483 return -EINVAL;
484 }
485
486 irq = platform_get_irq(pdev, 0);
487 if (irq <= 0) {
488 dev_err(dev, "no irq\n");
489 return -EINVAL;
490 }
491
492 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
493 if (!hpriv) {
494 dev_err(dev, "can't alloc ahci_host_priv\n");
495 return -ENOMEM;
496 }
497 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
498 if (!pdata) {
499 dev_err(dev, "can't alloc ecx_plat_data\n");
500 return -ENOMEM;
501 }
502
503 hpriv->flags |= (unsigned long)pi.private_data;
504
505 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
506 if (!hpriv->mmio) {
507 dev_err(dev, "can't map %pR\n", mem);
508 return -ENOMEM;
509 }
510
511 rc = highbank_initialize_phys(dev, hpriv->mmio);
512 if (rc)
513 return rc;
514
515
516 ahci_save_initial_config(dev, hpriv, 0, 0);
517
518
519 if (hpriv->cap & HOST_CAP_NCQ)
520 pi.flags |= ATA_FLAG_NCQ;
521
522 if (hpriv->cap & HOST_CAP_PMP)
523 pi.flags |= ATA_FLAG_PMP;
524
525 if (hpriv->cap & HOST_CAP_64)
526 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
527
528
529
530
531
532
533 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
534
535 pdata->n_ports = n_ports;
536 hpriv->plat_data = pdata;
537 highbank_set_em_messages(dev, hpriv, &pi);
538
539 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
540 if (!host) {
541 rc = -ENOMEM;
542 goto err0;
543 }
544
545 host->private_data = hpriv;
546
547 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
548 host->flags |= ATA_HOST_PARALLEL_SCAN;
549
550 for (i = 0; i < host->n_ports; i++) {
551 struct ata_port *ap = host->ports[i];
552
553 ata_port_desc(ap, "mmio %pR", mem);
554 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
555
556
557 if (ap->flags & ATA_FLAG_EM)
558 ap->em_message_type = hpriv->em_msg_type;
559
560
561 if (!(hpriv->port_map & (1 << i)))
562 ap->ops = &ata_dummy_port_ops;
563 }
564
565 rc = ahci_reset_controller(host);
566 if (rc)
567 goto err0;
568
569 ahci_init_controller(host);
570 ahci_print_info(host, "platform");
571
572 rc = ata_host_activate(host, irq, ahci_interrupt, 0,
573 &ahci_highbank_platform_sht);
574 if (rc)
575 goto err0;
576
577 return 0;
578err0:
579 return rc;
580}
581
582#ifdef CONFIG_PM_SLEEP
583static int ahci_highbank_suspend(struct device *dev)
584{
585 struct ata_host *host = dev_get_drvdata(dev);
586 struct ahci_host_priv *hpriv = host->private_data;
587 void __iomem *mmio = hpriv->mmio;
588 u32 ctl;
589 int rc;
590
591 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
592 dev_err(dev, "firmware update required for suspend/resume\n");
593 return -EIO;
594 }
595
596
597
598
599
600
601 ctl = readl(mmio + HOST_CTL);
602 ctl &= ~HOST_IRQ_EN;
603 writel(ctl, mmio + HOST_CTL);
604 readl(mmio + HOST_CTL);
605
606 rc = ata_host_suspend(host, PMSG_SUSPEND);
607 if (rc)
608 return rc;
609
610 return 0;
611}
612
613static int ahci_highbank_resume(struct device *dev)
614{
615 struct ata_host *host = dev_get_drvdata(dev);
616 int rc;
617
618 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
619 rc = ahci_reset_controller(host);
620 if (rc)
621 return rc;
622
623 ahci_init_controller(host);
624 }
625
626 ata_host_resume(host);
627
628 return 0;
629}
630#endif
631
632static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
633 ahci_highbank_suspend, ahci_highbank_resume);
634
635static struct platform_driver ahci_highbank_driver = {
636 .remove = ata_platform_remove_one,
637 .driver = {
638 .name = "highbank-ahci",
639 .owner = THIS_MODULE,
640 .of_match_table = ahci_of_match,
641 .pm = &ahci_highbank_pm_ops,
642 },
643 .probe = ahci_highbank_probe,
644};
645
646module_platform_driver(ahci_highbank_driver);
647
648MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
649MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
650MODULE_LICENSE("GPL");
651MODULE_ALIAS("sata:highbank");
652