linux/drivers/crypto/omap-crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * OMAP Crypto driver common support routines.
   4 *
   5 * Copyright (c) 2017 Texas Instruments Incorporated
   6 *   Tero Kristo <t-kristo@ti.com>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/scatterlist.h>
  12#include <crypto/scatterwalk.h>
  13
  14#include "omap-crypto.h"
  15
  16static int omap_crypto_copy_sg_lists(int total, int bs,
  17                                     struct scatterlist **sg,
  18                                     struct scatterlist *new_sg, u16 flags)
  19{
  20        int n = sg_nents(*sg);
  21        struct scatterlist *tmp;
  22
  23        if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
  24                new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  25                if (!new_sg)
  26                        return -ENOMEM;
  27
  28                sg_init_table(new_sg, n);
  29        }
  30
  31        tmp = new_sg;
  32
  33        while (*sg && total) {
  34                int len = (*sg)->length;
  35
  36                if (total < len)
  37                        len = total;
  38
  39                if (len > 0) {
  40                        total -= len;
  41                        sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
  42                        if (total <= 0)
  43                                sg_mark_end(tmp);
  44                        tmp = sg_next(tmp);
  45                }
  46
  47                *sg = sg_next(*sg);
  48        }
  49
  50        *sg = new_sg;
  51
  52        return 0;
  53}
  54
  55static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
  56                                struct scatterlist *new_sg, u16 flags)
  57{
  58        void *buf;
  59        int pages;
  60        int new_len;
  61
  62        new_len = ALIGN(total, bs);
  63        pages = get_order(new_len);
  64
  65        buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  66        if (!buf) {
  67                pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
  68                       __func__);
  69                return -ENOMEM;
  70        }
  71
  72        if (flags & OMAP_CRYPTO_COPY_DATA) {
  73                scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
  74                if (flags & OMAP_CRYPTO_ZERO_BUF)
  75                        memset(buf + total, 0, new_len - total);
  76        }
  77
  78        if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  79                sg_init_table(new_sg, 1);
  80
  81        sg_set_buf(new_sg, buf, new_len);
  82
  83        *sg = new_sg;
  84
  85        return 0;
  86}
  87
  88static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
  89                                u16 flags)
  90{
  91        int len = 0;
  92        int num_sg = 0;
  93
  94        if (!IS_ALIGNED(total, bs))
  95                return OMAP_CRYPTO_NOT_ALIGNED;
  96
  97        while (sg) {
  98                num_sg++;
  99
 100                if (!IS_ALIGNED(sg->offset, 4))
 101                        return OMAP_CRYPTO_NOT_ALIGNED;
 102                if (!IS_ALIGNED(sg->length, bs))
 103                        return OMAP_CRYPTO_NOT_ALIGNED;
 104#ifdef CONFIG_ZONE_DMA
 105                if (page_zonenum(sg_page(sg)) != ZONE_DMA)
 106                        return OMAP_CRYPTO_NOT_ALIGNED;
 107#endif
 108
 109                len += sg->length;
 110                sg = sg_next(sg);
 111
 112                if (len >= total)
 113                        break;
 114        }
 115
 116        if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
 117                return OMAP_CRYPTO_NOT_ALIGNED;
 118
 119        if (len != total)
 120                return OMAP_CRYPTO_BAD_DATA_LENGTH;
 121
 122        return 0;
 123}
 124
 125int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
 126                         struct scatterlist *new_sg, u16 flags,
 127                         u8 flags_shift, unsigned long *dd_flags)
 128{
 129        int ret;
 130
 131        *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
 132
 133        if (flags & OMAP_CRYPTO_FORCE_COPY)
 134                ret = OMAP_CRYPTO_NOT_ALIGNED;
 135        else
 136                ret = omap_crypto_check_sg(*sg, total, bs, flags);
 137
 138        if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
 139                ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
 140                if (ret)
 141                        return ret;
 142                *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
 143        } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
 144                ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
 145                if (ret)
 146                        return ret;
 147                if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
 148                        *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
 149        } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
 150                sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
 151        }
 152
 153        return 0;
 154}
 155EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
 156
 157static void omap_crypto_copy_data(struct scatterlist *src,
 158                                  struct scatterlist *dst,
 159                                  int offset, int len)
 160{
 161        int amt;
 162        void *srcb, *dstb;
 163        int srco = 0, dsto = offset;
 164
 165        while (src && dst && len) {
 166                if (srco >= src->length) {
 167                        srco -= src->length;
 168                        src = sg_next(src);
 169                        continue;
 170                }
 171
 172                if (dsto >= dst->length) {
 173                        dsto -= dst->length;
 174                        dst = sg_next(dst);
 175                        continue;
 176                }
 177
 178                amt = min(src->length - srco, dst->length - dsto);
 179                amt = min(len, amt);
 180
 181                srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
 182                dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
 183
 184                memcpy(dstb, srcb, amt);
 185
 186                if (!PageSlab(sg_page(dst)))
 187                        flush_kernel_dcache_page(sg_page(dst));
 188
 189                kunmap_atomic(srcb);
 190                kunmap_atomic(dstb);
 191
 192                srco += amt;
 193                dsto += amt;
 194                len -= amt;
 195        }
 196}
 197
 198void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
 199                         int offset, int len, u8 flags_shift,
 200                         unsigned long flags)
 201{
 202        void *buf;
 203        int pages;
 204
 205        flags >>= flags_shift;
 206        flags &= OMAP_CRYPTO_COPY_MASK;
 207
 208        if (!flags)
 209                return;
 210
 211        buf = sg_virt(sg);
 212        pages = get_order(len);
 213
 214        if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
 215                omap_crypto_copy_data(sg, orig, offset, len);
 216
 217        if (flags & OMAP_CRYPTO_DATA_COPIED)
 218                free_pages((unsigned long)buf, pages);
 219        else if (flags & OMAP_CRYPTO_SG_COPIED)
 220                kfree(sg);
 221}
 222EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
 223
 224MODULE_DESCRIPTION("OMAP crypto support library.");
 225MODULE_LICENSE("GPL v2");
 226MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
 227