linux/crypto/async_tx/async_pq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
   4 * Copyright(c) 2009 Intel Corporation
   5 */
   6#include <linux/kernel.h>
   7#include <linux/interrupt.h>
   8#include <linux/module.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/raid/pq.h>
  11#include <linux/async_tx.h>
  12#include <linux/gfp.h>
  13
  14/**
  15 * pq_scribble_page - space to hold throwaway P or Q buffer for
  16 * synchronous gen_syndrome
  17 */
  18static struct page *pq_scribble_page;
  19
  20/* the struct page *blocks[] parameter passed to async_gen_syndrome()
  21 * and async_syndrome_val() contains the 'P' destination address at
  22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
  23 *
  24 * note: these are macros as they are used as lvalues
  25 */
  26#define P(b, d) (b[d-2])
  27#define Q(b, d) (b[d-1])
  28
  29#define MAX_DISKS 255
  30
  31/**
  32 * do_async_gen_syndrome - asynchronously calculate P and/or Q
  33 */
  34static __async_inline struct dma_async_tx_descriptor *
  35do_async_gen_syndrome(struct dma_chan *chan,
  36                      const unsigned char *scfs, int disks,
  37                      struct dmaengine_unmap_data *unmap,
  38                      enum dma_ctrl_flags dma_flags,
  39                      struct async_submit_ctl *submit)
  40{
  41        struct dma_async_tx_descriptor *tx = NULL;
  42        struct dma_device *dma = chan->device;
  43        enum async_tx_flags flags_orig = submit->flags;
  44        dma_async_tx_callback cb_fn_orig = submit->cb_fn;
  45        dma_async_tx_callback cb_param_orig = submit->cb_param;
  46        int src_cnt = disks - 2;
  47        unsigned short pq_src_cnt;
  48        dma_addr_t dma_dest[2];
  49        int src_off = 0;
  50
  51        while (src_cnt > 0) {
  52                submit->flags = flags_orig;
  53                pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
  54                /* if we are submitting additional pqs, leave the chain open,
  55                 * clear the callback parameters, and leave the destination
  56                 * buffers mapped
  57                 */
  58                if (src_cnt > pq_src_cnt) {
  59                        submit->flags &= ~ASYNC_TX_ACK;
  60                        submit->flags |= ASYNC_TX_FENCE;
  61                        submit->cb_fn = NULL;
  62                        submit->cb_param = NULL;
  63                } else {
  64                        submit->cb_fn = cb_fn_orig;
  65                        submit->cb_param = cb_param_orig;
  66                        if (cb_fn_orig)
  67                                dma_flags |= DMA_PREP_INTERRUPT;
  68                }
  69                if (submit->flags & ASYNC_TX_FENCE)
  70                        dma_flags |= DMA_PREP_FENCE;
  71
  72                /* Drivers force forward progress in case they can not provide
  73                 * a descriptor
  74                 */
  75                for (;;) {
  76                        dma_dest[0] = unmap->addr[disks - 2];
  77                        dma_dest[1] = unmap->addr[disks - 1];
  78                        tx = dma->device_prep_dma_pq(chan, dma_dest,
  79                                                     &unmap->addr[src_off],
  80                                                     pq_src_cnt,
  81                                                     &scfs[src_off], unmap->len,
  82                                                     dma_flags);
  83                        if (likely(tx))
  84                                break;
  85                        async_tx_quiesce(&submit->depend_tx);
  86                        dma_async_issue_pending(chan);
  87                }
  88
  89                dma_set_unmap(tx, unmap);
  90                async_tx_submit(chan, tx, submit);
  91                submit->depend_tx = tx;
  92
  93                /* drop completed sources */
  94                src_cnt -= pq_src_cnt;
  95                src_off += pq_src_cnt;
  96
  97                dma_flags |= DMA_PREP_CONTINUE;
  98        }
  99
 100        return tx;
 101}
 102
 103/**
 104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
 105 */
 106static void
 107do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 108                     size_t len, struct async_submit_ctl *submit)
 109{
 110        void **srcs;
 111        int i;
 112        int start = -1, stop = disks - 3;
 113
 114        if (submit->scribble)
 115                srcs = submit->scribble;
 116        else
 117                srcs = (void **) blocks;
 118
 119        for (i = 0; i < disks; i++) {
 120                if (blocks[i] == NULL) {
 121                        BUG_ON(i > disks - 3); /* P or Q can't be zero */
 122                        srcs[i] = (void*)raid6_empty_zero_page;
 123                } else {
 124                        srcs[i] = page_address(blocks[i]) + offset;
 125                        if (i < disks - 2) {
 126                                stop = i;
 127                                if (start == -1)
 128                                        start = i;
 129                        }
 130                }
 131        }
 132        if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
 133                BUG_ON(!raid6_call.xor_syndrome);
 134                if (start >= 0)
 135                        raid6_call.xor_syndrome(disks, start, stop, len, srcs);
 136        } else
 137                raid6_call.gen_syndrome(disks, len, srcs);
 138        async_tx_sync_epilog(submit);
 139}
 140
 141/**
 142 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
 143 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
 144 * @offset: common offset into each block (src and dest) to start transaction
 145 * @disks: number of blocks (including missing P or Q, see below)
 146 * @len: length of operation in bytes
 147 * @submit: submission/completion modifiers
 148 *
 149 * General note: This routine assumes a field of GF(2^8) with a
 150 * primitive polynomial of 0x11d and a generator of {02}.
 151 *
 152 * 'disks' note: callers can optionally omit either P or Q (but not
 153 * both) from the calculation by setting blocks[disks-2] or
 154 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
 155 * PAGE_SIZE as a temporary buffer of this size is used in the
 156 * synchronous path.  'disks' always accounts for both destination
 157 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
 158 * set to NULL those buffers will be replaced with the raid6_zero_page
 159 * in the synchronous path and omitted in the hardware-asynchronous
 160 * path.
 161 */
 162struct dma_async_tx_descriptor *
 163async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 164                   size_t len, struct async_submit_ctl *submit)
 165{
 166        int src_cnt = disks - 2;
 167        struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
 168                                                      &P(blocks, disks), 2,
 169                                                      blocks, src_cnt, len);
 170        struct dma_device *device = chan ? chan->device : NULL;
 171        struct dmaengine_unmap_data *unmap = NULL;
 172
 173        BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
 174
 175        if (device)
 176                unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
 177
 178        /* XORing P/Q is only implemented in software */
 179        if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
 180            (src_cnt <= dma_maxpq(device, 0) ||
 181             dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
 182            is_dma_pq_aligned(device, offset, 0, len)) {
 183                struct dma_async_tx_descriptor *tx;
 184                enum dma_ctrl_flags dma_flags = 0;
 185                unsigned char coefs[MAX_DISKS];
 186                int i, j;
 187
 188                /* run the p+q asynchronously */
 189                pr_debug("%s: (async) disks: %d len: %zu\n",
 190                         __func__, disks, len);
 191
 192                /* convert source addresses being careful to collapse 'empty'
 193                 * sources and update the coefficients accordingly
 194                 */
 195                unmap->len = len;
 196                for (i = 0, j = 0; i < src_cnt; i++) {
 197                        if (blocks[i] == NULL)
 198                                continue;
 199                        unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
 200                                                      len, DMA_TO_DEVICE);
 201                        coefs[j] = raid6_gfexp[i];
 202                        unmap->to_cnt++;
 203                        j++;
 204                }
 205
 206                /*
 207                 * DMAs use destinations as sources,
 208                 * so use BIDIRECTIONAL mapping
 209                 */
 210                unmap->bidi_cnt++;
 211                if (P(blocks, disks))
 212                        unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
 213                                                        offset, len, DMA_BIDIRECTIONAL);
 214                else {
 215                        unmap->addr[j++] = 0;
 216                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
 217                }
 218
 219                unmap->bidi_cnt++;
 220                if (Q(blocks, disks))
 221                        unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
 222                                                       offset, len, DMA_BIDIRECTIONAL);
 223                else {
 224                        unmap->addr[j++] = 0;
 225                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
 226                }
 227
 228                tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
 229                dmaengine_unmap_put(unmap);
 230                return tx;
 231        }
 232
 233        dmaengine_unmap_put(unmap);
 234
 235        /* run the pq synchronously */
 236        pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
 237
 238        /* wait for any prerequisite operations */
 239        async_tx_quiesce(&submit->depend_tx);
 240
 241        if (!P(blocks, disks)) {
 242                P(blocks, disks) = pq_scribble_page;
 243                BUG_ON(len + offset > PAGE_SIZE);
 244        }
 245        if (!Q(blocks, disks)) {
 246                Q(blocks, disks) = pq_scribble_page;
 247                BUG_ON(len + offset > PAGE_SIZE);
 248        }
 249        do_sync_gen_syndrome(blocks, offset, disks, len, submit);
 250
 251        return NULL;
 252}
 253EXPORT_SYMBOL_GPL(async_gen_syndrome);
 254
 255static inline struct dma_chan *
 256pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
 257{
 258        #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 259        return NULL;
 260        #endif
 261        return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
 262                                     disks, len);
 263}
 264
 265/**
 266 * async_syndrome_val - asynchronously validate a raid6 syndrome
 267 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
 268 * @offset: common offset into each block (src and dest) to start transaction
 269 * @disks: number of blocks (including missing P or Q, see below)
 270 * @len: length of operation in bytes
 271 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
 272 * @spare: temporary result buffer for the synchronous case
 273 * @submit: submission / completion modifiers
 274 *
 275 * The same notes from async_gen_syndrome apply to the 'blocks',
 276 * and 'disks' parameters of this routine.  The synchronous path
 277 * requires a temporary result buffer and submit->scribble to be
 278 * specified.
 279 */
 280struct dma_async_tx_descriptor *
 281async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
 282                   size_t len, enum sum_check_flags *pqres, struct page *spare,
 283                   struct async_submit_ctl *submit)
 284{
 285        struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
 286        struct dma_device *device = chan ? chan->device : NULL;
 287        struct dma_async_tx_descriptor *tx;
 288        unsigned char coefs[MAX_DISKS];
 289        enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
 290        struct dmaengine_unmap_data *unmap = NULL;
 291
 292        BUG_ON(disks < 4 || disks > MAX_DISKS);
 293
 294        if (device)
 295                unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
 296
 297        if (unmap && disks <= dma_maxpq(device, 0) &&
 298            is_dma_pq_aligned(device, offset, 0, len)) {
 299                struct device *dev = device->dev;
 300                dma_addr_t pq[2];
 301                int i, j = 0, src_cnt = 0;
 302
 303                pr_debug("%s: (async) disks: %d len: %zu\n",
 304                         __func__, disks, len);
 305
 306                unmap->len = len;
 307                for (i = 0; i < disks-2; i++)
 308                        if (likely(blocks[i])) {
 309                                unmap->addr[j] = dma_map_page(dev, blocks[i],
 310                                                              offset, len,
 311                                                              DMA_TO_DEVICE);
 312                                coefs[j] = raid6_gfexp[i];
 313                                unmap->to_cnt++;
 314                                src_cnt++;
 315                                j++;
 316                        }
 317
 318                if (!P(blocks, disks)) {
 319                        pq[0] = 0;
 320                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
 321                } else {
 322                        pq[0] = dma_map_page(dev, P(blocks, disks),
 323                                             offset, len,
 324                                             DMA_TO_DEVICE);
 325                        unmap->addr[j++] = pq[0];
 326                        unmap->to_cnt++;
 327                }
 328                if (!Q(blocks, disks)) {
 329                        pq[1] = 0;
 330                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
 331                } else {
 332                        pq[1] = dma_map_page(dev, Q(blocks, disks),
 333                                             offset, len,
 334                                             DMA_TO_DEVICE);
 335                        unmap->addr[j++] = pq[1];
 336                        unmap->to_cnt++;
 337                }
 338
 339                if (submit->flags & ASYNC_TX_FENCE)
 340                        dma_flags |= DMA_PREP_FENCE;
 341                for (;;) {
 342                        tx = device->device_prep_dma_pq_val(chan, pq,
 343                                                            unmap->addr,
 344                                                            src_cnt,
 345                                                            coefs,
 346                                                            len, pqres,
 347                                                            dma_flags);
 348                        if (likely(tx))
 349                                break;
 350                        async_tx_quiesce(&submit->depend_tx);
 351                        dma_async_issue_pending(chan);
 352                }
 353
 354                dma_set_unmap(tx, unmap);
 355                async_tx_submit(chan, tx, submit);
 356        } else {
 357                struct page *p_src = P(blocks, disks);
 358                struct page *q_src = Q(blocks, disks);
 359                enum async_tx_flags flags_orig = submit->flags;
 360                dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 361                void *scribble = submit->scribble;
 362                void *cb_param_orig = submit->cb_param;
 363                void *p, *q, *s;
 364
 365                pr_debug("%s: (sync) disks: %d len: %zu\n",
 366                         __func__, disks, len);
 367
 368                /* caller must provide a temporary result buffer and
 369                 * allow the input parameters to be preserved
 370                 */
 371                BUG_ON(!spare || !scribble);
 372
 373                /* wait for any prerequisite operations */
 374                async_tx_quiesce(&submit->depend_tx);
 375
 376                /* recompute p and/or q into the temporary buffer and then
 377                 * check to see the result matches the current value
 378                 */
 379                tx = NULL;
 380                *pqres = 0;
 381                if (p_src) {
 382                        init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
 383                                          NULL, NULL, scribble);
 384                        tx = async_xor(spare, blocks, offset, disks-2, len, submit);
 385                        async_tx_quiesce(&tx);
 386                        p = page_address(p_src) + offset;
 387                        s = page_address(spare) + offset;
 388                        *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
 389                }
 390
 391                if (q_src) {
 392                        P(blocks, disks) = NULL;
 393                        Q(blocks, disks) = spare;
 394                        init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
 395                        tx = async_gen_syndrome(blocks, offset, disks, len, submit);
 396                        async_tx_quiesce(&tx);
 397                        q = page_address(q_src) + offset;
 398                        s = page_address(spare) + offset;
 399                        *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
 400                }
 401
 402                /* restore P, Q and submit */
 403                P(blocks, disks) = p_src;
 404                Q(blocks, disks) = q_src;
 405
 406                submit->cb_fn = cb_fn_orig;
 407                submit->cb_param = cb_param_orig;
 408                submit->flags = flags_orig;
 409                async_tx_sync_epilog(submit);
 410                tx = NULL;
 411        }
 412        dmaengine_unmap_put(unmap);
 413
 414        return tx;
 415}
 416EXPORT_SYMBOL_GPL(async_syndrome_val);
 417
 418static int __init async_pq_init(void)
 419{
 420        pq_scribble_page = alloc_page(GFP_KERNEL);
 421
 422        if (pq_scribble_page)
 423                return 0;
 424
 425        pr_err("%s: failed to allocate required spare page\n", __func__);
 426
 427        return -ENOMEM;
 428}
 429
 430static void __exit async_pq_exit(void)
 431{
 432        __free_page(pq_scribble_page);
 433}
 434
 435module_init(async_pq_init);
 436module_exit(async_pq_exit);
 437
 438MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
 439MODULE_LICENSE("GPL");
 440