1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/export.h>
28#include <linux/list.h>
29#include <linux/dma-direct.h>
30#include <asm/iommu.h>
31
32#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
33extern int swiotlb_late_init_with_default_size(size_t default_size);
34
35
36
37
38
39#define STA2X11_NR_EP 4
40#define STA2X11_NR_FUNCS 8
41#define STA2X11_AMBA_SIZE (512 << 20)
42
43struct sta2x11_ahb_regs {
44 u32 base, pexlbase, pexhbase, crw;
45};
46
47struct sta2x11_mapping {
48 u32 amba_base;
49 int is_suspended;
50 struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
51};
52
53struct sta2x11_instance {
54 struct list_head list;
55 int bus0;
56 struct sta2x11_mapping map[STA2X11_NR_EP];
57};
58
59static LIST_HEAD(sta2x11_instance_list);
60
61
62static void sta2x11_new_instance(struct pci_dev *pdev)
63{
64 struct sta2x11_instance *instance;
65
66 instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
67 if (!instance)
68 return;
69
70 instance->bus0 = pdev->subordinate->number + 1;
71
72 if (list_empty(&sta2x11_instance_list)) {
73 int size = STA2X11_SWIOTLB_SIZE;
74
75 dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
76 if (swiotlb_late_init_with_default_size(size))
77 dev_emerg(&pdev->dev, "init swiotlb failed\n");
78 }
79 list_add(&instance->list, &sta2x11_instance_list);
80}
81DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
82
83
84
85
86static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
87{
88 struct sta2x11_instance *instance;
89 int ep;
90
91 list_for_each_entry(instance, &sta2x11_instance_list, list) {
92 ep = pdev->bus->number - instance->bus0;
93 if (ep >= 0 && ep < STA2X11_NR_EP)
94 return instance;
95 }
96 return NULL;
97}
98
99static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
100{
101 struct sta2x11_instance *instance;
102
103 instance = sta2x11_pdev_to_instance(pdev);
104 if (!instance)
105 return -1;
106
107 return pdev->bus->number - instance->bus0;
108}
109
110static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
111{
112 struct sta2x11_instance *instance;
113 int ep;
114
115 instance = sta2x11_pdev_to_instance(pdev);
116 if (!instance)
117 return NULL;
118 ep = sta2x11_pdev_to_ep(pdev);
119 return instance->map + ep;
120}
121
122
123struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
124{
125 return sta2x11_pdev_to_instance(pdev);
126}
127EXPORT_SYMBOL(sta2x11_get_instance);
128
129
130
131
132
133
134
135
136static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
137{
138 struct sta2x11_mapping *map;
139 dma_addr_t a;
140
141 map = sta2x11_pdev_to_mapping(pdev);
142 a = p + map->amba_base;
143 return a;
144}
145
146
147
148
149
150
151
152static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
153{
154 struct sta2x11_mapping *map;
155 dma_addr_t p;
156
157 map = sta2x11_pdev_to_mapping(pdev);
158 p = a - map->amba_base;
159 return p;
160}
161
162
163
164
165
166
167
168
169
170static void *sta2x11_swiotlb_alloc_coherent(struct device *dev,
171 size_t size,
172 dma_addr_t *dma_handle,
173 gfp_t flags,
174 unsigned long attrs)
175{
176 void *vaddr;
177
178 vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, flags, attrs);
179 *dma_handle = p2a(*dma_handle, to_pci_dev(dev));
180 return vaddr;
181}
182
183
184static const struct dma_map_ops sta2x11_dma_ops = {
185 .alloc = sta2x11_swiotlb_alloc_coherent,
186 .free = x86_swiotlb_free_coherent,
187 .map_page = swiotlb_map_page,
188 .unmap_page = swiotlb_unmap_page,
189 .map_sg = swiotlb_map_sg_attrs,
190 .unmap_sg = swiotlb_unmap_sg_attrs,
191 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
192 .sync_single_for_device = swiotlb_sync_single_for_device,
193 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
194 .sync_sg_for_device = swiotlb_sync_sg_for_device,
195 .mapping_error = swiotlb_dma_mapping_error,
196 .dma_supported = x86_dma_supported,
197};
198
199
200static void sta2x11_setup_pdev(struct pci_dev *pdev)
201{
202 struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
203
204 if (!instance)
205 return;
206 pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
207 pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
208 pdev->dev.dma_ops = &sta2x11_dma_ops;
209
210
211 pci_set_master(pdev);
212}
213DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
214
215
216
217
218
219
220
221
222
223
224bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
225{
226 struct sta2x11_mapping *map;
227
228 if (dev->dma_ops != &sta2x11_dma_ops) {
229 if (!dev->dma_mask)
230 return false;
231 return addr + size - 1 <= *dev->dma_mask;
232 }
233
234 map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
235
236 if (!map || (addr < map->amba_base))
237 return false;
238 if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
239 return false;
240 }
241
242 return true;
243}
244
245
246
247
248
249
250dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
251{
252 if (dev->dma_ops != &sta2x11_dma_ops)
253 return paddr;
254 return p2a(paddr, to_pci_dev(dev));
255}
256
257
258
259
260
261
262phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
263{
264 if (dev->dma_ops != &sta2x11_dma_ops)
265 return daddr;
266 return a2p(daddr, to_pci_dev(dev));
267}
268
269
270
271
272
273
274
275#define AHB_MAPB 0xCA4
276#define AHB_CRW(i) (AHB_MAPB + 0 + (i) * 0x10)
277#define AHB_CRW_SZMASK 0xfffffc00UL
278#define AHB_CRW_ENABLE (1 << 0)
279#define AHB_CRW_WTYPE_MEM (2 << 1)
280#define AHB_CRW_ROE (1UL << 3)
281#define AHB_CRW_NSE (1UL << 4)
282#define AHB_BASE(i) (AHB_MAPB + 4 + (i) * 0x10)
283#define AHB_PEXLBASE(i) (AHB_MAPB + 8 + (i) * 0x10)
284#define AHB_PEXHBASE(i) (AHB_MAPB + 12 + (i) * 0x10)
285
286
287static void sta2x11_map_ep(struct pci_dev *pdev)
288{
289 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
290 int i;
291
292 if (!map)
293 return;
294 pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
295
296
297 pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
298 pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
299 pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
300 AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
301
302
303 for (i = 1; i < STA2X11_NR_FUNCS; i++)
304 pci_write_config_dword(pdev, AHB_CRW(i), 0);
305
306 dev_info(&pdev->dev,
307 "sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
308 sta2x11_pdev_to_ep(pdev), map->amba_base,
309 map->amba_base + STA2X11_AMBA_SIZE - 1);
310}
311DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
312
313#ifdef CONFIG_PM
314
315static void suspend_mapping(struct pci_dev *pdev)
316{
317 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
318 int i;
319
320 if (!map)
321 return;
322
323 if (map->is_suspended)
324 return;
325 map->is_suspended = 1;
326
327
328 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
329 struct sta2x11_ahb_regs *regs = map->regs + i;
330
331 pci_read_config_dword(pdev, AHB_BASE(i), ®s->base);
332 pci_read_config_dword(pdev, AHB_PEXLBASE(i), ®s->pexlbase);
333 pci_read_config_dword(pdev, AHB_PEXHBASE(i), ®s->pexhbase);
334 pci_read_config_dword(pdev, AHB_CRW(i), ®s->crw);
335 }
336}
337DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
338
339static void resume_mapping(struct pci_dev *pdev)
340{
341 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
342 int i;
343
344 if (!map)
345 return;
346
347
348 if (!map->is_suspended)
349 goto out;
350 map->is_suspended = 0;
351
352
353 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
354 struct sta2x11_ahb_regs *regs = map->regs + i;
355
356 pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
357 pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
358 pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
359 pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
360 }
361out:
362 pci_set_master(pdev);
363}
364DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
365
366#endif
367