1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/export.h>
28#include <linux/list.h>
29
30#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
31extern int swiotlb_late_init_with_default_size(size_t default_size);
32
33
34
35
36
37#define STA2X11_NR_EP 4
38#define STA2X11_NR_FUNCS 8
39#define STA2X11_AMBA_SIZE (512 << 20)
40
41struct sta2x11_ahb_regs {
42 u32 base, pexlbase, pexhbase, crw;
43};
44
45struct sta2x11_mapping {
46 u32 amba_base;
47 int is_suspended;
48 struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
49};
50
51struct sta2x11_instance {
52 struct list_head list;
53 int bus0;
54 struct sta2x11_mapping map[STA2X11_NR_EP];
55};
56
57static LIST_HEAD(sta2x11_instance_list);
58
59
60static void sta2x11_new_instance(struct pci_dev *pdev)
61{
62 struct sta2x11_instance *instance;
63
64 instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
65 if (!instance)
66 return;
67
68 instance->bus0 = pdev->subordinate->number + 1;
69
70 if (list_empty(&sta2x11_instance_list)) {
71 int size = STA2X11_SWIOTLB_SIZE;
72
73 dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
74 if (swiotlb_late_init_with_default_size(size))
75 dev_emerg(&pdev->dev, "init swiotlb failed\n");
76 }
77 list_add(&instance->list, &sta2x11_instance_list);
78}
79DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
80
81
82
83
84static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
85{
86 struct sta2x11_instance *instance;
87 int ep;
88
89 list_for_each_entry(instance, &sta2x11_instance_list, list) {
90 ep = pdev->bus->number - instance->bus0;
91 if (ep >= 0 && ep < STA2X11_NR_EP)
92 return instance;
93 }
94 return NULL;
95}
96
97static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
98{
99 struct sta2x11_instance *instance;
100
101 instance = sta2x11_pdev_to_instance(pdev);
102 if (!instance)
103 return -1;
104
105 return pdev->bus->number - instance->bus0;
106}
107
108static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
109{
110 struct sta2x11_instance *instance;
111 int ep;
112
113 instance = sta2x11_pdev_to_instance(pdev);
114 if (!instance)
115 return NULL;
116 ep = sta2x11_pdev_to_ep(pdev);
117 return instance->map + ep;
118}
119
120
121struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
122{
123 return sta2x11_pdev_to_instance(pdev);
124}
125EXPORT_SYMBOL(sta2x11_get_instance);
126
127
128
129
130
131
132
133
134static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
135{
136 struct sta2x11_mapping *map;
137 dma_addr_t a;
138
139 map = sta2x11_pdev_to_mapping(pdev);
140 a = p + map->amba_base;
141 return a;
142}
143
144
145
146
147
148
149
150static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
151{
152 struct sta2x11_mapping *map;
153 dma_addr_t p;
154
155 map = sta2x11_pdev_to_mapping(pdev);
156 p = a - map->amba_base;
157 return p;
158}
159
160
161
162
163
164
165
166
167
168static void *sta2x11_swiotlb_alloc_coherent(struct device *dev,
169 size_t size,
170 dma_addr_t *dma_handle,
171 gfp_t flags,
172 struct dma_attrs *attrs)
173{
174 void *vaddr;
175
176 vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, flags, attrs);
177 *dma_handle = p2a(*dma_handle, to_pci_dev(dev));
178 return vaddr;
179}
180
181
182static struct dma_map_ops sta2x11_dma_ops = {
183 .alloc = sta2x11_swiotlb_alloc_coherent,
184 .free = x86_swiotlb_free_coherent,
185 .map_page = swiotlb_map_page,
186 .unmap_page = swiotlb_unmap_page,
187 .map_sg = swiotlb_map_sg_attrs,
188 .unmap_sg = swiotlb_unmap_sg_attrs,
189 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
190 .sync_single_for_device = swiotlb_sync_single_for_device,
191 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
192 .sync_sg_for_device = swiotlb_sync_sg_for_device,
193 .mapping_error = swiotlb_dma_mapping_error,
194 .dma_supported = NULL,
195};
196
197
198static void sta2x11_setup_pdev(struct pci_dev *pdev)
199{
200 struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
201
202 if (!instance)
203 return;
204 pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
205 pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
206 pdev->dev.archdata.dma_ops = &sta2x11_dma_ops;
207
208
209 pci_set_master(pdev);
210}
211DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
212
213
214
215
216
217
218
219
220
221
222bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
223{
224 struct sta2x11_mapping *map;
225
226 if (dev->archdata.dma_ops != &sta2x11_dma_ops) {
227 if (!dev->dma_mask)
228 return false;
229 return addr + size - 1 <= *dev->dma_mask;
230 }
231
232 map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
233
234 if (!map || (addr < map->amba_base))
235 return false;
236 if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
237 return false;
238 }
239
240 return true;
241}
242
243
244
245
246
247
248dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
249{
250 if (dev->archdata.dma_ops != &sta2x11_dma_ops)
251 return paddr;
252 return p2a(paddr, to_pci_dev(dev));
253}
254
255
256
257
258
259
260phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
261{
262 if (dev->archdata.dma_ops != &sta2x11_dma_ops)
263 return daddr;
264 return a2p(daddr, to_pci_dev(dev));
265}
266
267
268
269
270
271
272
273#define AHB_MAPB 0xCA4
274#define AHB_CRW(i) (AHB_MAPB + 0 + (i) * 0x10)
275#define AHB_CRW_SZMASK 0xfffffc00UL
276#define AHB_CRW_ENABLE (1 << 0)
277#define AHB_CRW_WTYPE_MEM (2 << 1)
278#define AHB_CRW_ROE (1UL << 3)
279#define AHB_CRW_NSE (1UL << 4)
280#define AHB_BASE(i) (AHB_MAPB + 4 + (i) * 0x10)
281#define AHB_PEXLBASE(i) (AHB_MAPB + 8 + (i) * 0x10)
282#define AHB_PEXHBASE(i) (AHB_MAPB + 12 + (i) * 0x10)
283
284
285static void sta2x11_map_ep(struct pci_dev *pdev)
286{
287 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
288 int i;
289
290 if (!map)
291 return;
292 pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
293
294
295 pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
296 pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
297 pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
298 AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
299
300
301 for (i = 1; i < STA2X11_NR_FUNCS; i++)
302 pci_write_config_dword(pdev, AHB_CRW(i), 0);
303
304 dev_info(&pdev->dev,
305 "sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
306 sta2x11_pdev_to_ep(pdev), map->amba_base,
307 map->amba_base + STA2X11_AMBA_SIZE - 1);
308}
309DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
310
311#ifdef CONFIG_PM
312
313static void suspend_mapping(struct pci_dev *pdev)
314{
315 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
316 int i;
317
318 if (!map)
319 return;
320
321 if (map->is_suspended)
322 return;
323 map->is_suspended = 1;
324
325
326 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
327 struct sta2x11_ahb_regs *regs = map->regs + i;
328
329 pci_read_config_dword(pdev, AHB_BASE(i), ®s->base);
330 pci_read_config_dword(pdev, AHB_PEXLBASE(i), ®s->pexlbase);
331 pci_read_config_dword(pdev, AHB_PEXHBASE(i), ®s->pexhbase);
332 pci_read_config_dword(pdev, AHB_CRW(i), ®s->crw);
333 }
334}
335DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
336
337static void resume_mapping(struct pci_dev *pdev)
338{
339 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
340 int i;
341
342 if (!map)
343 return;
344
345
346 if (!map->is_suspended)
347 goto out;
348 map->is_suspended = 0;
349
350
351 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
352 struct sta2x11_ahb_regs *regs = map->regs + i;
353
354 pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
355 pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
356 pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
357 pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
358 }
359out:
360 pci_set_master(pdev);
361}
362DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
363
364#endif
365