1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/irqreturn.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_pci.h>
21#include <linux/pci.h>
22#include <linux/platform_device.h>
23
24#include "pcie-designware.h"
25#include "pci-keystone.h"
26
27
28#define LTSSM_EN_VAL 1
29#define LTSSM_STATE_MASK 0x1f
30#define LTSSM_STATE_L0 0x11
31#define DBI_CS2_EN_VAL 0x20
32#define OB_XLAT_EN_VAL 2
33
34
35#define CMD_STATUS 0x004
36#define CFG_SETUP 0x008
37#define OB_SIZE 0x030
38#define CFG_PCIM_WIN_SZ_IDX 3
39#define CFG_PCIM_WIN_CNT 32
40#define SPACE0_REMOTE_CFG_OFFSET 0x1000
41#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
42#define OB_OFFSET_HI(n) (0x204 + (8 * n))
43
44
45#define IRQ_EOI 0x050
46#define IRQ_STATUS 0x184
47#define IRQ_ENABLE_SET 0x188
48#define IRQ_ENABLE_CLR 0x18c
49
50#define MSI_IRQ 0x054
51#define MSI0_IRQ_STATUS 0x104
52#define MSI0_IRQ_ENABLE_SET 0x108
53#define MSI0_IRQ_ENABLE_CLR 0x10c
54#define IRQ_STATUS 0x184
55#define MSI_IRQ_OFFSET 4
56
57
58#define ERR_AER BIT(5)
59#define ERR_AXI BIT(4)
60#define ERR_CORR BIT(3)
61#define ERR_NONFATAL BIT(2)
62#define ERR_FATAL BIT(1)
63#define ERR_SYS BIT(0)
64#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
65 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
66#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
67#define ERR_IRQ_STATUS_RAW 0x1c0
68#define ERR_IRQ_STATUS 0x1c4
69#define ERR_IRQ_ENABLE_SET 0x1c8
70#define ERR_IRQ_ENABLE_CLR 0x1cc
71
72
73#define DEBUG0 0x728
74
75#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
76
77static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
78 u32 *bit_pos)
79{
80 *reg_offset = offset % 8;
81 *bit_pos = offset >> 3;
82}
83
84phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
85{
86 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
87
88 return ks_pcie->app.start + MSI_IRQ;
89}
90
91static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
92{
93 return readl(ks_pcie->va_app_base + offset);
94}
95
96static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
97{
98 writel(val, ks_pcie->va_app_base + offset);
99}
100
101void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
102{
103 struct pcie_port *pp = &ks_pcie->pp;
104 struct device *dev = pp->dev;
105 u32 pending, vector;
106 int src, virq;
107
108 pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
109
110
111
112
113
114 for (src = 0; src < 4; src++) {
115 if (BIT(src) & pending) {
116 vector = offset + (src << 3);
117 virq = irq_linear_revmap(pp->irq_domain, vector);
118 dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
119 src, vector, virq);
120 generic_handle_irq(virq);
121 }
122 }
123}
124
125static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
126{
127 u32 offset, reg_offset, bit_pos;
128 struct keystone_pcie *ks_pcie;
129 struct msi_desc *msi;
130 struct pcie_port *pp;
131
132 msi = irq_data_get_msi_desc(d);
133 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
134 ks_pcie = to_keystone_pcie(pp);
135 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
136 update_reg_offset_bit_pos(offset, ®_offset, &bit_pos);
137
138 ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
139 BIT(bit_pos));
140 ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
141}
142
143void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
144{
145 u32 reg_offset, bit_pos;
146 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
147
148 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
149 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
150 BIT(bit_pos));
151}
152
153void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
154{
155 u32 reg_offset, bit_pos;
156 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
157
158 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
159 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
160 BIT(bit_pos));
161}
162
163static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
164{
165 struct keystone_pcie *ks_pcie;
166 struct msi_desc *msi;
167 struct pcie_port *pp;
168 u32 offset;
169
170 msi = irq_data_get_msi_desc(d);
171 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
172 ks_pcie = to_keystone_pcie(pp);
173 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
174
175
176 if (IS_ENABLED(CONFIG_PCI_MSI)) {
177 if (msi->msi_attrib.maskbit)
178 pci_msi_mask_irq(d);
179 }
180
181 ks_dw_pcie_msi_clear_irq(pp, offset);
182}
183
184static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
185{
186 struct keystone_pcie *ks_pcie;
187 struct msi_desc *msi;
188 struct pcie_port *pp;
189 u32 offset;
190
191 msi = irq_data_get_msi_desc(d);
192 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
193 ks_pcie = to_keystone_pcie(pp);
194 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
195
196
197 if (IS_ENABLED(CONFIG_PCI_MSI)) {
198 if (msi->msi_attrib.maskbit)
199 pci_msi_unmask_irq(d);
200 }
201
202 ks_dw_pcie_msi_set_irq(pp, offset);
203}
204
205static struct irq_chip ks_dw_pcie_msi_irq_chip = {
206 .name = "Keystone-PCIe-MSI-IRQ",
207 .irq_ack = ks_dw_pcie_msi_irq_ack,
208 .irq_mask = ks_dw_pcie_msi_irq_mask,
209 .irq_unmask = ks_dw_pcie_msi_irq_unmask,
210};
211
212static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
213 irq_hw_number_t hwirq)
214{
215 irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
216 handle_level_irq);
217 irq_set_chip_data(irq, domain->host_data);
218
219 return 0;
220}
221
222static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
223 .map = ks_dw_pcie_msi_map,
224};
225
226int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
227{
228 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
229 struct device *dev = pp->dev;
230 int i;
231
232 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
233 MAX_MSI_IRQS,
234 &ks_dw_pcie_msi_domain_ops,
235 chip);
236 if (!pp->irq_domain) {
237 dev_err(dev, "irq domain init failed\n");
238 return -ENXIO;
239 }
240
241 for (i = 0; i < MAX_MSI_IRQS; i++)
242 irq_create_mapping(pp->irq_domain, i);
243
244 return 0;
245}
246
247void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
248{
249 int i;
250
251 for (i = 0; i < MAX_LEGACY_IRQS; i++)
252 ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
253}
254
255void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
256{
257 struct pcie_port *pp = &ks_pcie->pp;
258 struct device *dev = pp->dev;
259 u32 pending;
260 int virq;
261
262 pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
263
264 if (BIT(0) & pending) {
265 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
266 dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
267 generic_handle_irq(virq);
268 }
269
270
271 ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
272}
273
274void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
275{
276 ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
277}
278
279irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
280{
281 u32 status;
282
283 status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
284 if (!status)
285 return IRQ_NONE;
286
287 if (status & ERR_FATAL_IRQ)
288 dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
289 status);
290
291
292 ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
293 return IRQ_HANDLED;
294}
295
296static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
297{
298}
299
300static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
301{
302}
303
304static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
305{
306}
307
308static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
309 .name = "Keystone-PCI-Legacy-IRQ",
310 .irq_ack = ks_dw_pcie_ack_legacy_irq,
311 .irq_mask = ks_dw_pcie_mask_legacy_irq,
312 .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
313};
314
315static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
316 unsigned int irq, irq_hw_number_t hw_irq)
317{
318 irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
319 handle_level_irq);
320 irq_set_chip_data(irq, d->host_data);
321
322 return 0;
323}
324
325static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
326 .map = ks_dw_pcie_init_legacy_irq_map,
327 .xlate = irq_domain_xlate_onetwocell,
328};
329
330
331
332
333
334
335
336
337static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
338{
339 u32 val;
340
341 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
342 ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
343
344 do {
345 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
346 } while (!(val & DBI_CS2_EN_VAL));
347}
348
349
350
351
352
353
354
355static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
356{
357 u32 val;
358
359 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
360 ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
361
362 do {
363 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
364 } while (val & DBI_CS2_EN_VAL);
365}
366
367void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
368{
369 struct pcie_port *pp = &ks_pcie->pp;
370 u32 start = pp->mem->start, end = pp->mem->end;
371 int i, tr_size;
372 u32 val;
373
374
375 ks_dw_pcie_set_dbi_mode(ks_pcie);
376 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
377 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
378 ks_dw_pcie_clear_dbi_mode(ks_pcie);
379
380
381 ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
382
383 tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
384
385
386 for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
387 ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
388 ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
389 start += tr_size;
390 }
391
392
393 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
394 ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
415 unsigned int devfn)
416{
417 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
418 struct pcie_port *pp = &ks_pcie->pp;
419 u32 regval;
420
421 if (bus == 0)
422 return pp->dbi_base;
423
424 regval = (bus << 16) | (device << 8) | function;
425
426
427
428
429
430
431 if (bus != 1)
432 regval |= BIT(24);
433
434 ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
435 return pp->va_cfg0_base;
436}
437
438int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
439 unsigned int devfn, int where, int size, u32 *val)
440{
441 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
442 u8 bus_num = bus->number;
443 void __iomem *addr;
444
445 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
446
447 return dw_pcie_cfg_read(addr + where, size, val);
448}
449
450int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
451 unsigned int devfn, int where, int size, u32 val)
452{
453 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
454 u8 bus_num = bus->number;
455 void __iomem *addr;
456
457 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
458
459 return dw_pcie_cfg_write(addr + where, size, val);
460}
461
462
463
464
465
466
467void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
468{
469 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
470
471
472 ks_dw_pcie_set_dbi_mode(ks_pcie);
473
474
475 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
476 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
477
478 ks_dw_pcie_clear_dbi_mode(ks_pcie);
479
480
481
482
483
484 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
485}
486
487
488
489
490int ks_dw_pcie_link_up(struct pcie_port *pp)
491{
492 u32 val;
493
494 val = dw_pcie_readl_rc(pp, DEBUG0);
495 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
496}
497
498void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
499{
500 u32 val;
501
502
503 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
504 val &= ~LTSSM_EN_VAL;
505 ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
506
507
508 val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
509 ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
510}
511
512
513
514
515
516
517
518
519int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
520 struct device_node *msi_intc_np)
521{
522 struct pcie_port *pp = &ks_pcie->pp;
523 struct device *dev = pp->dev;
524 struct platform_device *pdev = to_platform_device(dev);
525 struct resource *res;
526
527
528 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
529 pp->dbi_base = devm_ioremap_resource(dev, res);
530 if (IS_ERR(pp->dbi_base))
531 return PTR_ERR(pp->dbi_base);
532
533
534
535
536
537 pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
538 pp->va_cfg1_base = pp->va_cfg0_base;
539
540
541 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
542 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
543 if (IS_ERR(ks_pcie->va_app_base))
544 return PTR_ERR(ks_pcie->va_app_base);
545
546 ks_pcie->app = *res;
547
548
549 ks_pcie->legacy_irq_domain =
550 irq_domain_add_linear(ks_pcie->legacy_intc_np,
551 MAX_LEGACY_IRQS,
552 &ks_dw_pcie_legacy_irq_domain_ops,
553 NULL);
554 if (!ks_pcie->legacy_irq_domain) {
555 dev_err(dev, "Failed to add irq domain for legacy irqs\n");
556 return -EINVAL;
557 }
558
559 return dw_pcie_host_init(pp);
560}
561