1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/mm.h>
30#include <linux/dma-mapping.h>
31#include <linux/raid/xor.h>
32#include <linux/async_tx.h>
33
34
35static __async_inline struct dma_async_tx_descriptor *
36do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
37 unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
38 struct async_submit_ctl *submit)
39{
40 struct dma_device *dma = chan->device;
41 struct dma_async_tx_descriptor *tx = NULL;
42 int src_off = 0;
43 int i;
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
45 void *cb_param_orig = submit->cb_param;
46 enum async_tx_flags flags_orig = submit->flags;
47 enum dma_ctrl_flags dma_flags;
48 int xor_src_cnt = 0;
49 dma_addr_t dma_dest;
50
51
52 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
53 for (i = 0; i < src_cnt; i++) {
54
55 if (!src_list[i])
56 continue;
57 if (unlikely(src_list[i] == dest)) {
58 dma_src[xor_src_cnt++] = dma_dest;
59 continue;
60 }
61 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
62 len, DMA_TO_DEVICE);
63 }
64 src_cnt = xor_src_cnt;
65
66 while (src_cnt) {
67 submit->flags = flags_orig;
68 dma_flags = 0;
69 xor_src_cnt = min(src_cnt, (int)dma->max_xor);
70
71
72
73
74 if (src_cnt > xor_src_cnt) {
75 submit->flags &= ~ASYNC_TX_ACK;
76 submit->flags |= ASYNC_TX_FENCE;
77 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
78 submit->cb_fn = NULL;
79 submit->cb_param = NULL;
80 } else {
81 submit->cb_fn = cb_fn_orig;
82 submit->cb_param = cb_param_orig;
83 }
84 if (submit->cb_fn)
85 dma_flags |= DMA_PREP_INTERRUPT;
86 if (submit->flags & ASYNC_TX_FENCE)
87 dma_flags |= DMA_PREP_FENCE;
88
89
90
91
92 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
93 xor_src_cnt, len, dma_flags);
94
95 if (unlikely(!tx))
96 async_tx_quiesce(&submit->depend_tx);
97
98
99 while (unlikely(!tx)) {
100 dma_async_issue_pending(chan);
101 tx = dma->device_prep_dma_xor(chan, dma_dest,
102 &dma_src[src_off],
103 xor_src_cnt, len,
104 dma_flags);
105 }
106
107 async_tx_submit(chan, tx, submit);
108 submit->depend_tx = tx;
109
110 if (src_cnt > xor_src_cnt) {
111
112 src_cnt -= xor_src_cnt;
113 src_off += xor_src_cnt;
114
115
116 dma_src[--src_off] = dma_dest;
117 src_cnt++;
118 } else
119 break;
120 }
121
122 return tx;
123}
124
125static void
126do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
127 int src_cnt, size_t len, struct async_submit_ctl *submit)
128{
129 int i;
130 int xor_src_cnt = 0;
131 int src_off = 0;
132 void *dest_buf;
133 void **srcs;
134
135 if (submit->scribble)
136 srcs = submit->scribble;
137 else
138 srcs = (void **) src_list;
139
140
141 for (i = 0; i < src_cnt; i++)
142 if (src_list[i])
143 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
144 src_cnt = xor_src_cnt;
145
146 dest_buf = page_address(dest) + offset;
147
148 if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
149 memset(dest_buf, 0, len);
150
151 while (src_cnt > 0) {
152
153 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
154 xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
155
156
157 src_cnt -= xor_src_cnt;
158 src_off += xor_src_cnt;
159 }
160
161 async_tx_sync_epilog(submit);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct dma_async_tx_descriptor *
186async_xor(struct page *dest, struct page **src_list, unsigned int offset,
187 int src_cnt, size_t len, struct async_submit_ctl *submit)
188{
189 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
190 &dest, 1, src_list,
191 src_cnt, len);
192 dma_addr_t *dma_src = NULL;
193
194 BUG_ON(src_cnt <= 1);
195
196 if (submit->scribble)
197 dma_src = submit->scribble;
198 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
199 dma_src = (dma_addr_t *) src_list;
200
201 if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
202
203 pr_debug("%s (async): len: %zu\n", __func__, len);
204
205 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
206 dma_src, submit);
207 } else {
208
209 pr_debug("%s (sync): len: %zu\n", __func__, len);
210 WARN_ONCE(chan, "%s: no space for dma address conversion\n",
211 __func__);
212
213
214
215
216 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
217 src_cnt--;
218 src_list++;
219 }
220
221
222 async_tx_quiesce(&submit->depend_tx);
223
224 do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
225
226 return NULL;
227 }
228}
229EXPORT_SYMBOL_GPL(async_xor);
230
231static int page_is_zero(struct page *p, unsigned int offset, size_t len)
232{
233 char *a = page_address(p) + offset;
234 return ((*(u32 *) a) == 0 &&
235 memcmp(a, a + 4, len - 4) == 0);
236}
237
238static inline struct dma_chan *
239xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
240 struct page **src_list, int src_cnt, size_t len)
241{
242 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
243 return NULL;
244 #endif
245 return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
246 src_cnt, len);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265struct dma_async_tx_descriptor *
266async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
267 int src_cnt, size_t len, enum sum_check_flags *result,
268 struct async_submit_ctl *submit)
269{
270 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
271 struct dma_device *device = chan ? chan->device : NULL;
272 struct dma_async_tx_descriptor *tx = NULL;
273 dma_addr_t *dma_src = NULL;
274
275 BUG_ON(src_cnt <= 1);
276
277 if (submit->scribble)
278 dma_src = submit->scribble;
279 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
280 dma_src = (dma_addr_t *) src_list;
281
282 if (dma_src && device && src_cnt <= device->max_xor &&
283 is_dma_xor_aligned(device, offset, 0, len)) {
284 unsigned long dma_prep_flags = 0;
285 int i;
286
287 pr_debug("%s: (async) len: %zu\n", __func__, len);
288
289 if (submit->cb_fn)
290 dma_prep_flags |= DMA_PREP_INTERRUPT;
291 if (submit->flags & ASYNC_TX_FENCE)
292 dma_prep_flags |= DMA_PREP_FENCE;
293 for (i = 0; i < src_cnt; i++)
294 dma_src[i] = dma_map_page(device->dev, src_list[i],
295 offset, len, DMA_TO_DEVICE);
296
297 tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
298 len, result,
299 dma_prep_flags);
300 if (unlikely(!tx)) {
301 async_tx_quiesce(&submit->depend_tx);
302
303 while (!tx) {
304 dma_async_issue_pending(chan);
305 tx = device->device_prep_dma_xor_val(chan,
306 dma_src, src_cnt, len, result,
307 dma_prep_flags);
308 }
309 }
310
311 async_tx_submit(chan, tx, submit);
312 } else {
313 enum async_tx_flags flags_orig = submit->flags;
314
315 pr_debug("%s: (sync) len: %zu\n", __func__, len);
316 WARN_ONCE(device && src_cnt <= device->max_xor,
317 "%s: no space for dma address conversion\n",
318 __func__);
319
320 submit->flags |= ASYNC_TX_XOR_DROP_DST;
321 submit->flags &= ~ASYNC_TX_ACK;
322
323 tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
324
325 async_tx_quiesce(&tx);
326
327 *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
328
329 async_tx_sync_epilog(submit);
330 submit->flags = flags_orig;
331 }
332
333 return tx;
334}
335EXPORT_SYMBOL_GPL(async_xor_val);
336
337MODULE_AUTHOR("Intel Corporation");
338MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
339MODULE_LICENSE("GPL");
340