1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
16#define _ASM_MICROBLAZE_DMA_MAPPING_H
17
18
19
20
21
22
23#include <linux/types.h>
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-debug.h>
28#include <linux/dma-attrs.h>
29#include <asm/io.h>
30#include <asm-generic/dma-coherent.h>
31#include <asm/cacheflush.h>
32
33#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
34
35#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
36#define __dma_free_coherent(size, addr) ((void)0)
37
38static inline unsigned long device_to_mask(struct device *dev)
39{
40 if (dev->dma_mask && *dev->dma_mask)
41 return *dev->dma_mask;
42
43 return 0xfffffffful;
44}
45
46extern struct dma_map_ops *dma_ops;
47
48
49
50
51extern struct dma_map_ops dma_direct_ops;
52
53static inline struct dma_map_ops *get_dma_ops(struct device *dev)
54{
55
56
57
58
59
60 if (unlikely(!dev) || !dev->archdata.dma_ops)
61 return NULL;
62
63 return dev->archdata.dma_ops;
64}
65
66static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
67{
68 dev->archdata.dma_ops = ops;
69}
70
71static inline int dma_supported(struct device *dev, u64 mask)
72{
73 struct dma_map_ops *ops = get_dma_ops(dev);
74
75 if (unlikely(!ops))
76 return 0;
77 if (!ops->dma_supported)
78 return 1;
79 return ops->dma_supported(dev, mask);
80}
81
82static inline int dma_set_mask(struct device *dev, u64 dma_mask)
83{
84 struct dma_map_ops *ops = get_dma_ops(dev);
85
86 if (unlikely(ops == NULL))
87 return -EIO;
88 if (ops->set_dma_mask)
89 return ops->set_dma_mask(dev, dma_mask);
90 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
91 return -EIO;
92 *dev->dma_mask = dma_mask;
93 return 0;
94}
95
96#include <asm-generic/dma-mapping-common.h>
97
98static inline void __dma_sync(unsigned long paddr,
99 size_t size, enum dma_data_direction direction)
100{
101 switch (direction) {
102 case DMA_TO_DEVICE:
103 case DMA_BIDIRECTIONAL:
104 flush_dcache_range(paddr, paddr + size);
105 break;
106 case DMA_FROM_DEVICE:
107 invalidate_dcache_range(paddr, paddr + size);
108 break;
109 default:
110 BUG();
111 }
112}
113
114static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
115{
116 struct dma_map_ops *ops = get_dma_ops(dev);
117
118 debug_dma_mapping_error(dev, dma_addr);
119 if (ops->mapping_error)
120 return ops->mapping_error(dev, dma_addr);
121
122 return (dma_addr == DMA_ERROR_CODE);
123}
124
125#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
126#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
127
128#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
129
130static inline void *dma_alloc_attrs(struct device *dev, size_t size,
131 dma_addr_t *dma_handle, gfp_t flag,
132 struct dma_attrs *attrs)
133{
134 struct dma_map_ops *ops = get_dma_ops(dev);
135 void *memory;
136
137 BUG_ON(!ops);
138
139 memory = ops->alloc(dev, size, dma_handle, flag, attrs);
140
141 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
142 return memory;
143}
144
145#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
146
147static inline void dma_free_attrs(struct device *dev, size_t size,
148 void *cpu_addr, dma_addr_t dma_handle,
149 struct dma_attrs *attrs)
150{
151 struct dma_map_ops *ops = get_dma_ops(dev);
152
153 BUG_ON(!ops);
154 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
155 ops->free(dev, size, cpu_addr, dma_handle, attrs);
156}
157
158static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
159 enum dma_data_direction direction)
160{
161 BUG_ON(direction == DMA_NONE);
162 __dma_sync(virt_to_phys(vaddr), size, (int)direction);
163}
164
165#endif
166