1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/dma-mapping.h>
26#include <linux/raid/pq.h>
27#include <linux/async_tx.h>
28#include <linux/gfp.h>
29
30
31
32
33
34static struct page *pq_scribble_page;
35
36
37
38
39
40
41
42#define P(b, d) (b[d-2])
43#define Q(b, d) (b[d-1])
44
45
46
47
48static __async_inline struct dma_async_tx_descriptor *
49do_async_gen_syndrome(struct dma_chan *chan,
50 const unsigned char *scfs, int disks,
51 struct dmaengine_unmap_data *unmap,
52 enum dma_ctrl_flags dma_flags,
53 struct async_submit_ctl *submit)
54{
55 struct dma_async_tx_descriptor *tx = NULL;
56 struct dma_device *dma = chan->device;
57 enum async_tx_flags flags_orig = submit->flags;
58 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
59 dma_async_tx_callback cb_param_orig = submit->cb_param;
60 int src_cnt = disks - 2;
61 unsigned short pq_src_cnt;
62 dma_addr_t dma_dest[2];
63 int src_off = 0;
64
65 while (src_cnt > 0) {
66 submit->flags = flags_orig;
67 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
68
69
70
71
72 if (src_cnt > pq_src_cnt) {
73 submit->flags &= ~ASYNC_TX_ACK;
74 submit->flags |= ASYNC_TX_FENCE;
75 submit->cb_fn = NULL;
76 submit->cb_param = NULL;
77 } else {
78 submit->cb_fn = cb_fn_orig;
79 submit->cb_param = cb_param_orig;
80 if (cb_fn_orig)
81 dma_flags |= DMA_PREP_INTERRUPT;
82 }
83 if (submit->flags & ASYNC_TX_FENCE)
84 dma_flags |= DMA_PREP_FENCE;
85
86
87
88
89 for (;;) {
90 dma_dest[0] = unmap->addr[disks - 2];
91 dma_dest[1] = unmap->addr[disks - 1];
92 tx = dma->device_prep_dma_pq(chan, dma_dest,
93 &unmap->addr[src_off],
94 pq_src_cnt,
95 &scfs[src_off], unmap->len,
96 dma_flags);
97 if (likely(tx))
98 break;
99 async_tx_quiesce(&submit->depend_tx);
100 dma_async_issue_pending(chan);
101 }
102
103 dma_set_unmap(tx, unmap);
104 async_tx_submit(chan, tx, submit);
105 submit->depend_tx = tx;
106
107
108 src_cnt -= pq_src_cnt;
109 src_off += pq_src_cnt;
110
111 dma_flags |= DMA_PREP_CONTINUE;
112 }
113
114 return tx;
115}
116
117
118
119
120static void
121do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
122 size_t len, struct async_submit_ctl *submit)
123{
124 void **srcs;
125 int i;
126 int start = -1, stop = disks - 3;
127
128 if (submit->scribble)
129 srcs = submit->scribble;
130 else
131 srcs = (void **) blocks;
132
133 for (i = 0; i < disks; i++) {
134 if (blocks[i] == NULL) {
135 BUG_ON(i > disks - 3);
136 srcs[i] = (void*)raid6_empty_zero_page;
137 } else {
138 srcs[i] = page_address(blocks[i]) + offset;
139 if (i < disks - 2) {
140 stop = i;
141 if (start == -1)
142 start = i;
143 }
144 }
145 }
146 if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
147 BUG_ON(!raid6_call.xor_syndrome);
148 if (start >= 0)
149 raid6_call.xor_syndrome(disks, start, stop, len, srcs);
150 } else
151 raid6_call.gen_syndrome(disks, len, srcs);
152 async_tx_sync_epilog(submit);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176struct dma_async_tx_descriptor *
177async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
178 size_t len, struct async_submit_ctl *submit)
179{
180 int src_cnt = disks - 2;
181 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182 &P(blocks, disks), 2,
183 blocks, src_cnt, len);
184 struct dma_device *device = chan ? chan->device : NULL;
185 struct dmaengine_unmap_data *unmap = NULL;
186
187 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
188
189 if (device)
190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191
192
193 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194 (src_cnt <= dma_maxpq(device, 0) ||
195 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196 is_dma_pq_aligned(device, offset, 0, len)) {
197 struct dma_async_tx_descriptor *tx;
198 enum dma_ctrl_flags dma_flags = 0;
199 unsigned char coefs[src_cnt];
200 int i, j;
201
202
203 pr_debug("%s: (async) disks: %d len: %zu\n",
204 __func__, disks, len);
205
206
207
208
209 unmap->len = len;
210 for (i = 0, j = 0; i < src_cnt; i++) {
211 if (blocks[i] == NULL)
212 continue;
213 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
214 len, DMA_TO_DEVICE);
215 coefs[j] = raid6_gfexp[i];
216 unmap->to_cnt++;
217 j++;
218 }
219
220
221
222
223
224 unmap->bidi_cnt++;
225 if (P(blocks, disks))
226 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227 offset, len, DMA_BIDIRECTIONAL);
228 else {
229 unmap->addr[j++] = 0;
230 dma_flags |= DMA_PREP_PQ_DISABLE_P;
231 }
232
233 unmap->bidi_cnt++;
234 if (Q(blocks, disks))
235 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
236 offset, len, DMA_BIDIRECTIONAL);
237 else {
238 unmap->addr[j++] = 0;
239 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
240 }
241
242 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
243 dmaengine_unmap_put(unmap);
244 return tx;
245 }
246
247 dmaengine_unmap_put(unmap);
248
249
250 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
251
252
253 async_tx_quiesce(&submit->depend_tx);
254
255 if (!P(blocks, disks)) {
256 P(blocks, disks) = pq_scribble_page;
257 BUG_ON(len + offset > PAGE_SIZE);
258 }
259 if (!Q(blocks, disks)) {
260 Q(blocks, disks) = pq_scribble_page;
261 BUG_ON(len + offset > PAGE_SIZE);
262 }
263 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
264
265 return NULL;
266}
267EXPORT_SYMBOL_GPL(async_gen_syndrome);
268
269static inline struct dma_chan *
270pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
271{
272 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
273 return NULL;
274 #endif
275 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
276 disks, len);
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294struct dma_async_tx_descriptor *
295async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
296 size_t len, enum sum_check_flags *pqres, struct page *spare,
297 struct async_submit_ctl *submit)
298{
299 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
300 struct dma_device *device = chan ? chan->device : NULL;
301 struct dma_async_tx_descriptor *tx;
302 unsigned char coefs[disks-2];
303 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
304 struct dmaengine_unmap_data *unmap = NULL;
305
306 BUG_ON(disks < 4);
307
308 if (device)
309 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
310
311 if (unmap && disks <= dma_maxpq(device, 0) &&
312 is_dma_pq_aligned(device, offset, 0, len)) {
313 struct device *dev = device->dev;
314 dma_addr_t pq[2];
315 int i, j = 0, src_cnt = 0;
316
317 pr_debug("%s: (async) disks: %d len: %zu\n",
318 __func__, disks, len);
319
320 unmap->len = len;
321 for (i = 0; i < disks-2; i++)
322 if (likely(blocks[i])) {
323 unmap->addr[j] = dma_map_page(dev, blocks[i],
324 offset, len,
325 DMA_TO_DEVICE);
326 coefs[j] = raid6_gfexp[i];
327 unmap->to_cnt++;
328 src_cnt++;
329 j++;
330 }
331
332 if (!P(blocks, disks)) {
333 pq[0] = 0;
334 dma_flags |= DMA_PREP_PQ_DISABLE_P;
335 } else {
336 pq[0] = dma_map_page(dev, P(blocks, disks),
337 offset, len,
338 DMA_TO_DEVICE);
339 unmap->addr[j++] = pq[0];
340 unmap->to_cnt++;
341 }
342 if (!Q(blocks, disks)) {
343 pq[1] = 0;
344 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
345 } else {
346 pq[1] = dma_map_page(dev, Q(blocks, disks),
347 offset, len,
348 DMA_TO_DEVICE);
349 unmap->addr[j++] = pq[1];
350 unmap->to_cnt++;
351 }
352
353 if (submit->flags & ASYNC_TX_FENCE)
354 dma_flags |= DMA_PREP_FENCE;
355 for (;;) {
356 tx = device->device_prep_dma_pq_val(chan, pq,
357 unmap->addr,
358 src_cnt,
359 coefs,
360 len, pqres,
361 dma_flags);
362 if (likely(tx))
363 break;
364 async_tx_quiesce(&submit->depend_tx);
365 dma_async_issue_pending(chan);
366 }
367
368 dma_set_unmap(tx, unmap);
369 async_tx_submit(chan, tx, submit);
370 } else {
371 struct page *p_src = P(blocks, disks);
372 struct page *q_src = Q(blocks, disks);
373 enum async_tx_flags flags_orig = submit->flags;
374 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
375 void *scribble = submit->scribble;
376 void *cb_param_orig = submit->cb_param;
377 void *p, *q, *s;
378
379 pr_debug("%s: (sync) disks: %d len: %zu\n",
380 __func__, disks, len);
381
382
383
384
385 BUG_ON(!spare || !scribble);
386
387
388 async_tx_quiesce(&submit->depend_tx);
389
390
391
392
393 tx = NULL;
394 *pqres = 0;
395 if (p_src) {
396 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
397 NULL, NULL, scribble);
398 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
399 async_tx_quiesce(&tx);
400 p = page_address(p_src) + offset;
401 s = page_address(spare) + offset;
402 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
403 }
404
405 if (q_src) {
406 P(blocks, disks) = NULL;
407 Q(blocks, disks) = spare;
408 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
409 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
410 async_tx_quiesce(&tx);
411 q = page_address(q_src) + offset;
412 s = page_address(spare) + offset;
413 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
414 }
415
416
417 P(blocks, disks) = p_src;
418 Q(blocks, disks) = q_src;
419
420 submit->cb_fn = cb_fn_orig;
421 submit->cb_param = cb_param_orig;
422 submit->flags = flags_orig;
423 async_tx_sync_epilog(submit);
424 tx = NULL;
425 }
426 dmaengine_unmap_put(unmap);
427
428 return tx;
429}
430EXPORT_SYMBOL_GPL(async_syndrome_val);
431
432static int __init async_pq_init(void)
433{
434 pq_scribble_page = alloc_page(GFP_KERNEL);
435
436 if (pq_scribble_page)
437 return 0;
438
439 pr_err("%s: failed to allocate required spare page\n", __func__);
440
441 return -ENOMEM;
442}
443
444static void __exit async_pq_exit(void)
445{
446 __free_page(pq_scribble_page);
447}
448
449module_init(async_pq_init);
450module_exit(async_pq_exit);
451
452MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
453MODULE_LICENSE("GPL");
454