1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/of_address.h>
23#include <linux/io.h>
24#include <linux/smp.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_device.h>
27#include <asm/smp_plat.h>
28#include "armada-370-xp.h"
29
30
31
32
33
34
35
36
37static void __iomem *coherency_base = ARMADA_370_XP_REGS_VIRT_BASE + 0x20200;
38static void __iomem *coherency_cpu_base;
39
40
41#define COHERENCY_FABRIC_CFG_OFFSET 0x4
42
43#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
44
45static struct of_device_id of_coherency_table[] = {
46 {.compatible = "marvell,coherency-fabric"},
47 { },
48};
49
50#ifdef CONFIG_SMP
51int coherency_get_cpu_count(void)
52{
53 int reg, cnt;
54
55 reg = readl(coherency_base + COHERENCY_FABRIC_CFG_OFFSET);
56 cnt = (reg & 0xF) + 1;
57
58 return cnt;
59}
60#endif
61
62
63int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
64
65int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
66{
67 if (!coherency_base) {
68 pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
69 pr_warn("Coherency fabric is not initialized\n");
70 return 1;
71 }
72
73 return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
74}
75
76static inline void mvebu_hwcc_sync_io_barrier(void)
77{
78 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
79 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
80}
81
82static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction dir,
85 struct dma_attrs *attrs)
86{
87 if (dir != DMA_TO_DEVICE)
88 mvebu_hwcc_sync_io_barrier();
89 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
90}
91
92
93static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
94 size_t size, enum dma_data_direction dir,
95 struct dma_attrs *attrs)
96{
97 if (dir != DMA_TO_DEVICE)
98 mvebu_hwcc_sync_io_barrier();
99}
100
101static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
102 size_t size, enum dma_data_direction dir)
103{
104 if (dir != DMA_TO_DEVICE)
105 mvebu_hwcc_sync_io_barrier();
106}
107
108static struct dma_map_ops mvebu_hwcc_dma_ops = {
109 .alloc = arm_dma_alloc,
110 .free = arm_dma_free,
111 .mmap = arm_dma_mmap,
112 .map_page = mvebu_hwcc_dma_map_page,
113 .unmap_page = mvebu_hwcc_dma_unmap_page,
114 .get_sgtable = arm_dma_get_sgtable,
115 .map_sg = arm_dma_map_sg,
116 .unmap_sg = arm_dma_unmap_sg,
117 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
118 .sync_single_for_device = mvebu_hwcc_dma_sync,
119 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
120 .sync_sg_for_device = arm_dma_sync_sg_for_device,
121 .set_dma_mask = arm_dma_set_mask,
122};
123
124static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
125 unsigned long event, void *__dev)
126{
127 struct device *dev = __dev;
128
129 if (event != BUS_NOTIFY_ADD_DEVICE)
130 return NOTIFY_DONE;
131 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
132
133 return NOTIFY_OK;
134}
135
136static struct notifier_block mvebu_hwcc_platform_nb = {
137 .notifier_call = mvebu_hwcc_platform_notifier,
138};
139
140int __init coherency_init(void)
141{
142 struct device_node *np;
143
144 np = of_find_matching_node(NULL, of_coherency_table);
145 if (np) {
146 pr_info("Initializing Coherency fabric\n");
147 coherency_base = of_iomap(np, 0);
148 coherency_cpu_base = of_iomap(np, 1);
149 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
150 bus_register_notifier(&platform_bus_type,
151 &mvebu_hwcc_platform_nb);
152 }
153
154 return 0;
155}
156