1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/dma-mapping.h>
23#include <linux/dma-debug.h>
24#include <linux/export.h>
25#include <linux/pagewalk.h>
26
27#include <asm/cpuinfo.h>
28#include <asm/spr_defs.h>
29#include <asm/tlbflush.h>
30
31static int
32page_set_nocache(pte_t *pte, unsigned long addr,
33 unsigned long next, struct mm_walk *walk)
34{
35 unsigned long cl;
36 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
37
38 pte_val(*pte) |= _PAGE_CI;
39
40
41
42
43
44 flush_tlb_page(NULL, addr);
45
46
47 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
48 mtspr(SPR_DCBFR, cl);
49
50 return 0;
51}
52
53static const struct mm_walk_ops set_nocache_walk_ops = {
54 .pte_entry = page_set_nocache,
55};
56
57static int
58page_clear_nocache(pte_t *pte, unsigned long addr,
59 unsigned long next, struct mm_walk *walk)
60{
61 pte_val(*pte) &= ~_PAGE_CI;
62
63
64
65
66
67 flush_tlb_page(NULL, addr);
68
69 return 0;
70}
71
72static const struct mm_walk_ops clear_nocache_walk_ops = {
73 .pte_entry = page_clear_nocache,
74};
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92static void *
93or1k_dma_alloc(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t gfp,
95 unsigned long attrs)
96{
97 unsigned long va;
98 void *page;
99
100 page = alloc_pages_exact(size, gfp);
101 if (!page)
102 return NULL;
103
104
105 *dma_handle = __pa(page);
106
107 va = (unsigned long)page;
108
109 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
110
111
112
113
114 if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
115 NULL)) {
116 free_pages_exact(page, size);
117 return NULL;
118 }
119 }
120
121 return (void *)va;
122}
123
124static void
125or1k_dma_free(struct device *dev, size_t size, void *vaddr,
126 dma_addr_t dma_handle, unsigned long attrs)
127{
128 unsigned long va = (unsigned long)vaddr;
129
130 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
131
132 WARN_ON(walk_page_range(&init_mm, va, va + size,
133 &clear_nocache_walk_ops, NULL));
134 }
135
136 free_pages_exact(vaddr, size);
137}
138
139static dma_addr_t
140or1k_map_page(struct device *dev, struct page *page,
141 unsigned long offset, size_t size,
142 enum dma_data_direction dir,
143 unsigned long attrs)
144{
145 unsigned long cl;
146 dma_addr_t addr = page_to_phys(page) + offset;
147 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
148
149 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
150 return addr;
151
152 switch (dir) {
153 case DMA_TO_DEVICE:
154
155 for (cl = addr; cl < addr + size;
156 cl += cpuinfo->dcache_block_size)
157 mtspr(SPR_DCBFR, cl);
158 break;
159 case DMA_FROM_DEVICE:
160
161 for (cl = addr; cl < addr + size;
162 cl += cpuinfo->dcache_block_size)
163 mtspr(SPR_DCBIR, cl);
164 break;
165 default:
166
167
168
169
170
171 break;
172 }
173
174 return addr;
175}
176
177static void
178or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
179 size_t size, enum dma_data_direction dir,
180 unsigned long attrs)
181{
182
183}
184
185static int
186or1k_map_sg(struct device *dev, struct scatterlist *sg,
187 int nents, enum dma_data_direction dir,
188 unsigned long attrs)
189{
190 struct scatterlist *s;
191 int i;
192
193 for_each_sg(sg, s, nents, i) {
194 s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
195 s->length, dir, 0);
196 }
197
198 return nents;
199}
200
201static void
202or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
203 int nents, enum dma_data_direction dir,
204 unsigned long attrs)
205{
206 struct scatterlist *s;
207 int i;
208
209 for_each_sg(sg, s, nents, i) {
210 or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
211 }
212}
213
214static void
215or1k_sync_single_for_cpu(struct device *dev,
216 dma_addr_t dma_handle, size_t size,
217 enum dma_data_direction dir)
218{
219 unsigned long cl;
220 dma_addr_t addr = dma_handle;
221 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
222
223
224 for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
225 mtspr(SPR_DCBIR, cl);
226}
227
228static void
229or1k_sync_single_for_device(struct device *dev,
230 dma_addr_t dma_handle, size_t size,
231 enum dma_data_direction dir)
232{
233 unsigned long cl;
234 dma_addr_t addr = dma_handle;
235 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
236
237
238 for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
239 mtspr(SPR_DCBFR, cl);
240}
241
242const struct dma_map_ops or1k_dma_map_ops = {
243 .alloc = or1k_dma_alloc,
244 .free = or1k_dma_free,
245 .map_page = or1k_map_page,
246 .unmap_page = or1k_unmap_page,
247 .map_sg = or1k_map_sg,
248 .unmap_sg = or1k_unmap_sg,
249 .sync_single_for_cpu = or1k_sync_single_for_cpu,
250 .sync_single_for_device = or1k_sync_single_for_device,
251};
252EXPORT_SYMBOL(or1k_dma_map_ops);
253