1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/types.h>
51#include <linux/scatterlist.h>
52
53#include "verbs.h"
54
55#define BAD_DMA_ADDRESS ((u64) 0)
56
57
58
59
60
61
62
63
64
65
66static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr)
67{
68 return dma_addr == BAD_DMA_ADDRESS;
69}
70
71static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr,
72 size_t size, enum dma_data_direction direction)
73{
74 if (WARN_ON(!valid_dma_direction(direction)))
75 return BAD_DMA_ADDRESS;
76
77 return (u64) cpu_addr;
78}
79
80static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
81 enum dma_data_direction direction)
82{
83
84}
85
86static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page,
87 unsigned long offset, size_t size,
88 enum dma_data_direction direction)
89{
90 u64 addr;
91
92 if (WARN_ON(!valid_dma_direction(direction)))
93 return BAD_DMA_ADDRESS;
94
95 if (offset + size > PAGE_SIZE)
96 return BAD_DMA_ADDRESS;
97
98 addr = (u64) page_address(page);
99 if (addr)
100 addr += offset;
101
102 return addr;
103}
104
105static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
106 enum dma_data_direction direction)
107{
108
109}
110
111static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl,
112 int nents, enum dma_data_direction direction)
113{
114 struct scatterlist *sg;
115 u64 addr;
116 int i;
117 int ret = nents;
118
119 if (WARN_ON(!valid_dma_direction(direction)))
120 return BAD_DMA_ADDRESS;
121
122 for_each_sg(sgl, sg, nents, i) {
123 addr = (u64) page_address(sg_page(sg));
124 if (!addr) {
125 ret = 0;
126 break;
127 }
128 sg->dma_address = addr + sg->offset;
129#ifdef CONFIG_NEED_SG_DMA_LENGTH
130 sg->dma_length = sg->length;
131#endif
132 }
133 return ret;
134}
135
136static void hfi1_unmap_sg(struct ib_device *dev,
137 struct scatterlist *sg, int nents,
138 enum dma_data_direction direction)
139{
140
141}
142
143static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr,
144 size_t size, enum dma_data_direction dir)
145{
146}
147
148static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr,
149 size_t size,
150 enum dma_data_direction dir)
151{
152}
153
154static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size,
155 u64 *dma_handle, gfp_t flag)
156{
157 struct page *p;
158 void *addr = NULL;
159
160 p = alloc_pages(flag, get_order(size));
161 if (p)
162 addr = page_address(p);
163 if (dma_handle)
164 *dma_handle = (u64) addr;
165 return addr;
166}
167
168static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size,
169 void *cpu_addr, u64 dma_handle)
170{
171 free_pages((unsigned long) cpu_addr, get_order(size));
172}
173
174struct ib_dma_mapping_ops hfi1_dma_mapping_ops = {
175 .mapping_error = hfi1_mapping_error,
176 .map_single = hfi1_dma_map_single,
177 .unmap_single = hfi1_dma_unmap_single,
178 .map_page = hfi1_dma_map_page,
179 .unmap_page = hfi1_dma_unmap_page,
180 .map_sg = hfi1_map_sg,
181 .unmap_sg = hfi1_unmap_sg,
182 .sync_single_for_cpu = hfi1_sync_single_for_cpu,
183 .sync_single_for_device = hfi1_sync_single_for_device,
184 .alloc_coherent = hfi1_dma_alloc_coherent,
185 .free_coherent = hfi1_dma_free_coherent
186};
187