linux/crypto/async_tx/async_pq.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
   3 * Copyright(c) 2009 Intel Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the Free
   7 * Software Foundation; either version 2 of the License, or (at your option)
   8 * any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program; if not, write to the Free Software Foundation, Inc., 59
  17 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  18 *
  19 * The full GNU General Public License is included in this distribution in the
  20 * file called COPYING.
  21 */
  22#include <linux/kernel.h>
  23#include <linux/interrupt.h>
  24#include <linux/module.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/raid/pq.h>
  27#include <linux/async_tx.h>
  28#include <linux/gfp.h>
  29
  30/**
  31 * pq_scribble_page - space to hold throwaway P or Q buffer for
  32 * synchronous gen_syndrome
  33 */
  34static struct page *pq_scribble_page;
  35
  36/* the struct page *blocks[] parameter passed to async_gen_syndrome()
  37 * and async_syndrome_val() contains the 'P' destination address at
  38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
  39 *
  40 * note: these are macros as they are used as lvalues
  41 */
  42#define P(b, d) (b[d-2])
  43#define Q(b, d) (b[d-1])
  44
  45/**
  46 * do_async_gen_syndrome - asynchronously calculate P and/or Q
  47 */
  48static __async_inline struct dma_async_tx_descriptor *
  49do_async_gen_syndrome(struct dma_chan *chan,
  50                      const unsigned char *scfs, int disks,
  51                      struct dmaengine_unmap_data *unmap,
  52                      enum dma_ctrl_flags dma_flags,
  53                      struct async_submit_ctl *submit)
  54{
  55        struct dma_async_tx_descriptor *tx = NULL;
  56        struct dma_device *dma = chan->device;
  57        enum async_tx_flags flags_orig = submit->flags;
  58        dma_async_tx_callback cb_fn_orig = submit->cb_fn;
  59        dma_async_tx_callback cb_param_orig = submit->cb_param;
  60        int src_cnt = disks - 2;
  61        unsigned short pq_src_cnt;
  62        dma_addr_t dma_dest[2];
  63        int src_off = 0;
  64
  65        if (submit->flags & ASYNC_TX_FENCE)
  66                dma_flags |= DMA_PREP_FENCE;
  67
  68        while (src_cnt > 0) {
  69                submit->flags = flags_orig;
  70                pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
  71                /* if we are submitting additional pqs, leave the chain open,
  72                 * clear the callback parameters, and leave the destination
  73                 * buffers mapped
  74                 */
  75                if (src_cnt > pq_src_cnt) {
  76                        submit->flags &= ~ASYNC_TX_ACK;
  77                        submit->flags |= ASYNC_TX_FENCE;
  78                        submit->cb_fn = NULL;
  79                        submit->cb_param = NULL;
  80                } else {
  81                        submit->cb_fn = cb_fn_orig;
  82                        submit->cb_param = cb_param_orig;
  83                        if (cb_fn_orig)
  84                                dma_flags |= DMA_PREP_INTERRUPT;
  85                }
  86
  87                /* Drivers force forward progress in case they can not provide
  88                 * a descriptor
  89                 */
  90                for (;;) {
  91                        dma_dest[0] = unmap->addr[disks - 2];
  92                        dma_dest[1] = unmap->addr[disks - 1];
  93                        tx = dma->device_prep_dma_pq(chan, dma_dest,
  94                                                     &unmap->addr[src_off],
  95                                                     pq_src_cnt,
  96                                                     &scfs[src_off], unmap->len,
  97                                                     dma_flags);
  98                        if (likely(tx))
  99                                break;
 100                        async_tx_quiesce(&submit->depend_tx);
 101                        dma_async_issue_pending(chan);
 102                }
 103
 104                dma_set_unmap(tx, unmap);
 105                async_tx_submit(chan, tx, submit);
 106                submit->depend_tx = tx;
 107
 108                /* drop completed sources */
 109                src_cnt -= pq_src_cnt;
 110                src_off += pq_src_cnt;
 111
 112                dma_flags |= DMA_PREP_CONTINUE;
 113        }
 114
 115        return tx;
 116}
 117
 118/**
 119 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
 120 */
 121static void
 122do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 123                     size_t len, struct async_submit_ctl *submit)
 124{
 125        void **srcs;
 126        int i;
 127        int start = -1, stop = disks - 3;
 128
 129        if (submit->scribble)
 130                srcs = submit->scribble;
 131        else
 132                srcs = (void **) blocks;
 133
 134        for (i = 0; i < disks; i++) {
 135                if (blocks[i] == NULL) {
 136                        BUG_ON(i > disks - 3); /* P or Q can't be zero */
 137                        srcs[i] = (void*)raid6_empty_zero_page;
 138                } else {
 139                        srcs[i] = page_address(blocks[i]) + offset;
 140                        if (i < disks - 2) {
 141                                stop = i;
 142                                if (start == -1)
 143                                        start = i;
 144                        }
 145                }
 146        }
 147        if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
 148                BUG_ON(!raid6_call.xor_syndrome);
 149                if (start >= 0)
 150                        raid6_call.xor_syndrome(disks, start, stop, len, srcs);
 151        } else
 152                raid6_call.gen_syndrome(disks, len, srcs);
 153        async_tx_sync_epilog(submit);
 154}
 155
 156/**
 157 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
 158 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
 159 * @offset: common offset into each block (src and dest) to start transaction
 160 * @disks: number of blocks (including missing P or Q, see below)
 161 * @len: length of operation in bytes
 162 * @submit: submission/completion modifiers
 163 *
 164 * General note: This routine assumes a field of GF(2^8) with a
 165 * primitive polynomial of 0x11d and a generator of {02}.
 166 *
 167 * 'disks' note: callers can optionally omit either P or Q (but not
 168 * both) from the calculation by setting blocks[disks-2] or
 169 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
 170 * PAGE_SIZE as a temporary buffer of this size is used in the
 171 * synchronous path.  'disks' always accounts for both destination
 172 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
 173 * set to NULL those buffers will be replaced with the raid6_zero_page
 174 * in the synchronous path and omitted in the hardware-asynchronous
 175 * path.
 176 */
 177struct dma_async_tx_descriptor *
 178async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 179                   size_t len, struct async_submit_ctl *submit)
 180{
 181        int src_cnt = disks - 2;
 182        struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
 183                                                      &P(blocks, disks), 2,
 184                                                      blocks, src_cnt, len);
 185        struct dma_device *device = chan ? chan->device : NULL;
 186        struct dmaengine_unmap_data *unmap = NULL;
 187
 188        BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
 189
 190        if (device)
 191                unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
 192
 193        /* XORing P/Q is only implemented in software */
 194        if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
 195            (src_cnt <= dma_maxpq(device, 0) ||
 196             dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
 197            is_dma_pq_aligned(device, offset, 0, len)) {
 198                struct dma_async_tx_descriptor *tx;
 199                enum dma_ctrl_flags dma_flags = 0;
 200                unsigned char coefs[src_cnt];
 201                int i, j;
 202
 203                /* run the p+q asynchronously */
 204                pr_debug("%s: (async) disks: %d len: %zu\n",
 205                         __func__, disks, len);
 206
 207                /* convert source addresses being careful to collapse 'empty'
 208                 * sources and update the coefficients accordingly
 209                 */
 210                unmap->len = len;
 211                for (i = 0, j = 0; i < src_cnt; i++) {
 212                        if (blocks[i] == NULL)
 213                                continue;
 214                        unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
 215                                                      len, DMA_TO_DEVICE);
 216                        coefs[j] = raid6_gfexp[i];
 217                        unmap->to_cnt++;
 218                        j++;
 219                }
 220
 221                /*
 222                 * DMAs use destinations as sources,
 223                 * so use BIDIRECTIONAL mapping
 224                 */
 225                unmap->bidi_cnt++;
 226                if (P(blocks, disks))
 227                        unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
 228                                                        offset, len, DMA_BIDIRECTIONAL);
 229                else {
 230                        unmap->addr[j++] = 0;
 231                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
 232                }
 233
 234                unmap->bidi_cnt++;
 235                if (Q(blocks, disks))
 236                        unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
 237                                                       offset, len, DMA_BIDIRECTIONAL);
 238                else {
 239                        unmap->addr[j++] = 0;
 240                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
 241                }
 242
 243                tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
 244                dmaengine_unmap_put(unmap);
 245                return tx;
 246        }
 247
 248        dmaengine_unmap_put(unmap);
 249
 250        /* run the pq synchronously */
 251        pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
 252
 253        /* wait for any prerequisite operations */
 254        async_tx_quiesce(&submit->depend_tx);
 255
 256        if (!P(blocks, disks)) {
 257                P(blocks, disks) = pq_scribble_page;
 258                BUG_ON(len + offset > PAGE_SIZE);
 259        }
 260        if (!Q(blocks, disks)) {
 261                Q(blocks, disks) = pq_scribble_page;
 262                BUG_ON(len + offset > PAGE_SIZE);
 263        }
 264        do_sync_gen_syndrome(blocks, offset, disks, len, submit);
 265
 266        return NULL;
 267}
 268EXPORT_SYMBOL_GPL(async_gen_syndrome);
 269
 270static inline struct dma_chan *
 271pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
 272{
 273        #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 274        return NULL;
 275        #endif
 276        return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
 277                                     disks, len);
 278}
 279
 280/**
 281 * async_syndrome_val - asynchronously validate a raid6 syndrome
 282 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
 283 * @offset: common offset into each block (src and dest) to start transaction
 284 * @disks: number of blocks (including missing P or Q, see below)
 285 * @len: length of operation in bytes
 286 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
 287 * @spare: temporary result buffer for the synchronous case
 288 * @submit: submission / completion modifiers
 289 *
 290 * The same notes from async_gen_syndrome apply to the 'blocks',
 291 * and 'disks' parameters of this routine.  The synchronous path
 292 * requires a temporary result buffer and submit->scribble to be
 293 * specified.
 294 */
 295struct dma_async_tx_descriptor *
 296async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
 297                   size_t len, enum sum_check_flags *pqres, struct page *spare,
 298                   struct async_submit_ctl *submit)
 299{
 300        struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
 301        struct dma_device *device = chan ? chan->device : NULL;
 302        struct dma_async_tx_descriptor *tx;
 303        unsigned char coefs[disks-2];
 304        enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
 305        struct dmaengine_unmap_data *unmap = NULL;
 306
 307        BUG_ON(disks < 4);
 308
 309        if (device)
 310                unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
 311
 312        if (unmap && disks <= dma_maxpq(device, 0) &&
 313            is_dma_pq_aligned(device, offset, 0, len)) {
 314                struct device *dev = device->dev;
 315                dma_addr_t pq[2];
 316                int i, j = 0, src_cnt = 0;
 317
 318                pr_debug("%s: (async) disks: %d len: %zu\n",
 319                         __func__, disks, len);
 320
 321                unmap->len = len;
 322                for (i = 0; i < disks-2; i++)
 323                        if (likely(blocks[i])) {
 324                                unmap->addr[j] = dma_map_page(dev, blocks[i],
 325                                                              offset, len,
 326                                                              DMA_TO_DEVICE);
 327                                coefs[j] = raid6_gfexp[i];
 328                                unmap->to_cnt++;
 329                                src_cnt++;
 330                                j++;
 331                        }
 332
 333                if (!P(blocks, disks)) {
 334                        pq[0] = 0;
 335                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
 336                } else {
 337                        pq[0] = dma_map_page(dev, P(blocks, disks),
 338                                             offset, len,
 339                                             DMA_TO_DEVICE);
 340                        unmap->addr[j++] = pq[0];
 341                        unmap->to_cnt++;
 342                }
 343                if (!Q(blocks, disks)) {
 344                        pq[1] = 0;
 345                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
 346                } else {
 347                        pq[1] = dma_map_page(dev, Q(blocks, disks),
 348                                             offset, len,
 349                                             DMA_TO_DEVICE);
 350                        unmap->addr[j++] = pq[1];
 351                        unmap->to_cnt++;
 352                }
 353
 354                if (submit->flags & ASYNC_TX_FENCE)
 355                        dma_flags |= DMA_PREP_FENCE;
 356                for (;;) {
 357                        tx = device->device_prep_dma_pq_val(chan, pq,
 358                                                            unmap->addr,
 359                                                            src_cnt,
 360                                                            coefs,
 361                                                            len, pqres,
 362                                                            dma_flags);
 363                        if (likely(tx))
 364                                break;
 365                        async_tx_quiesce(&submit->depend_tx);
 366                        dma_async_issue_pending(chan);
 367                }
 368
 369                dma_set_unmap(tx, unmap);
 370                async_tx_submit(chan, tx, submit);
 371
 372                return tx;
 373        } else {
 374                struct page *p_src = P(blocks, disks);
 375                struct page *q_src = Q(blocks, disks);
 376                enum async_tx_flags flags_orig = submit->flags;
 377                dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 378                void *scribble = submit->scribble;
 379                void *cb_param_orig = submit->cb_param;
 380                void *p, *q, *s;
 381
 382                pr_debug("%s: (sync) disks: %d len: %zu\n",
 383                         __func__, disks, len);
 384
 385                /* caller must provide a temporary result buffer and
 386                 * allow the input parameters to be preserved
 387                 */
 388                BUG_ON(!spare || !scribble);
 389
 390                /* wait for any prerequisite operations */
 391                async_tx_quiesce(&submit->depend_tx);
 392
 393                /* recompute p and/or q into the temporary buffer and then
 394                 * check to see the result matches the current value
 395                 */
 396                tx = NULL;
 397                *pqres = 0;
 398                if (p_src) {
 399                        init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
 400                                          NULL, NULL, scribble);
 401                        tx = async_xor(spare, blocks, offset, disks-2, len, submit);
 402                        async_tx_quiesce(&tx);
 403                        p = page_address(p_src) + offset;
 404                        s = page_address(spare) + offset;
 405                        *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
 406                }
 407
 408                if (q_src) {
 409                        P(blocks, disks) = NULL;
 410                        Q(blocks, disks) = spare;
 411                        init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
 412                        tx = async_gen_syndrome(blocks, offset, disks, len, submit);
 413                        async_tx_quiesce(&tx);
 414                        q = page_address(q_src) + offset;
 415                        s = page_address(spare) + offset;
 416                        *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
 417                }
 418
 419                /* restore P, Q and submit */
 420                P(blocks, disks) = p_src;
 421                Q(blocks, disks) = q_src;
 422
 423                submit->cb_fn = cb_fn_orig;
 424                submit->cb_param = cb_param_orig;
 425                submit->flags = flags_orig;
 426                async_tx_sync_epilog(submit);
 427
 428                return NULL;
 429        }
 430}
 431EXPORT_SYMBOL_GPL(async_syndrome_val);
 432
 433static int __init async_pq_init(void)
 434{
 435        pq_scribble_page = alloc_page(GFP_KERNEL);
 436
 437        if (pq_scribble_page)
 438                return 0;
 439
 440        pr_err("%s: failed to allocate required spare page\n", __func__);
 441
 442        return -ENOMEM;
 443}
 444
 445static void __exit async_pq_exit(void)
 446{
 447        put_page(pq_scribble_page);
 448}
 449
 450module_init(async_pq_init);
 451module_exit(async_pq_exit);
 452
 453MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
 454MODULE_LICENSE("GPL");
 455