1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/scatterlist.h>
34#include <linux/gfp.h>
35#include <rdma/ib_verbs.h>
36
37#include "ipath_verbs.h"
38
39#define BAD_DMA_ADDRESS ((u64) 0)
40
41
42
43
44
45
46
47
48
49
50static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
51{
52 return dma_addr == BAD_DMA_ADDRESS;
53}
54
55static u64 ipath_dma_map_single(struct ib_device *dev,
56 void *cpu_addr, size_t size,
57 enum dma_data_direction direction)
58{
59 BUG_ON(!valid_dma_direction(direction));
60 return (u64) cpu_addr;
61}
62
63static void ipath_dma_unmap_single(struct ib_device *dev,
64 u64 addr, size_t size,
65 enum dma_data_direction direction)
66{
67 BUG_ON(!valid_dma_direction(direction));
68}
69
70static u64 ipath_dma_map_page(struct ib_device *dev,
71 struct page *page,
72 unsigned long offset,
73 size_t size,
74 enum dma_data_direction direction)
75{
76 u64 addr;
77
78 BUG_ON(!valid_dma_direction(direction));
79
80 if (offset + size > PAGE_SIZE) {
81 addr = BAD_DMA_ADDRESS;
82 goto done;
83 }
84
85 addr = (u64) page_address(page);
86 if (addr)
87 addr += offset;
88
89
90done:
91 return addr;
92}
93
94static void ipath_dma_unmap_page(struct ib_device *dev,
95 u64 addr, size_t size,
96 enum dma_data_direction direction)
97{
98 BUG_ON(!valid_dma_direction(direction));
99}
100
101static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
102 int nents, enum dma_data_direction direction)
103{
104 struct scatterlist *sg;
105 u64 addr;
106 int i;
107 int ret = nents;
108
109 BUG_ON(!valid_dma_direction(direction));
110
111 for_each_sg(sgl, sg, nents, i) {
112 addr = (u64) page_address(sg_page(sg));
113
114 if (!addr) {
115 ret = 0;
116 break;
117 }
118 sg->dma_address = addr + sg->offset;
119#ifdef CONFIG_NEED_SG_DMA_LENGTH
120 sg->dma_length = sg->length;
121#endif
122 }
123 return ret;
124}
125
126static void ipath_unmap_sg(struct ib_device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction direction)
129{
130 BUG_ON(!valid_dma_direction(direction));
131}
132
133static void ipath_sync_single_for_cpu(struct ib_device *dev,
134 u64 addr,
135 size_t size,
136 enum dma_data_direction dir)
137{
138}
139
140static void ipath_sync_single_for_device(struct ib_device *dev,
141 u64 addr,
142 size_t size,
143 enum dma_data_direction dir)
144{
145}
146
147static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
148 u64 *dma_handle, gfp_t flag)
149{
150 struct page *p;
151 void *addr = NULL;
152
153 p = alloc_pages(flag, get_order(size));
154 if (p)
155 addr = page_address(p);
156 if (dma_handle)
157 *dma_handle = (u64) addr;
158 return addr;
159}
160
161static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
162 void *cpu_addr, u64 dma_handle)
163{
164 free_pages((unsigned long) cpu_addr, get_order(size));
165}
166
167struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
168 .mapping_error = ipath_mapping_error,
169 .map_single = ipath_dma_map_single,
170 .unmap_single = ipath_dma_unmap_single,
171 .map_page = ipath_dma_map_page,
172 .unmap_page = ipath_dma_unmap_page,
173 .map_sg = ipath_map_sg,
174 .unmap_sg = ipath_unmap_sg,
175 .sync_single_for_cpu = ipath_sync_single_for_cpu,
176 .sync_single_for_device = ipath_sync_single_for_device,
177 .alloc_coherent = ipath_dma_alloc_coherent,
178 .free_coherent = ipath_dma_free_coherent
179};
180