linux/drivers/crypto/talitos.c
<<
>>
Prefs
   1/*
   2 * talitos - Freescale Integrated Security Engine (SEC) device driver
   3 *
   4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   5 *
   6 * Scatterlist Crypto API glue code copied from files with the following:
   7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * Crypto algorithm registration code copied from hifn driver:
  10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  11 * All rights reserved.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with this program; if not, write to the Free Software
  25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/mod_devicetable.h>
  31#include <linux/device.h>
  32#include <linux/interrupt.h>
  33#include <linux/crypto.h>
  34#include <linux/hw_random.h>
  35#include <linux/of_address.h>
  36#include <linux/of_irq.h>
  37#include <linux/of_platform.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/io.h>
  40#include <linux/spinlock.h>
  41#include <linux/rtnetlink.h>
  42#include <linux/slab.h>
  43
  44#include <crypto/algapi.h>
  45#include <crypto/aes.h>
  46#include <crypto/des.h>
  47#include <crypto/sha.h>
  48#include <crypto/md5.h>
  49#include <crypto/internal/aead.h>
  50#include <crypto/authenc.h>
  51#include <crypto/skcipher.h>
  52#include <crypto/hash.h>
  53#include <crypto/internal/hash.h>
  54#include <crypto/scatterwalk.h>
  55
  56#include "talitos.h"
  57
  58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  59                           bool is_sec1)
  60{
  61        ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  62        if (!is_sec1)
  63                ptr->eptr = upper_32_bits(dma_addr);
  64}
  65
  66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
  67                               bool is_sec1)
  68{
  69        if (is_sec1) {
  70                ptr->res = 0;
  71                ptr->len1 = cpu_to_be16(len);
  72        } else {
  73                ptr->len = cpu_to_be16(len);
  74        }
  75}
  76
  77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  78                                           bool is_sec1)
  79{
  80        if (is_sec1)
  81                return be16_to_cpu(ptr->len1);
  82        else
  83                return be16_to_cpu(ptr->len);
  84}
  85
  86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
  87{
  88        if (!is_sec1)
  89                ptr->j_extent = 0;
  90}
  91
  92/*
  93 * map virtual single (contiguous) pointer to h/w descriptor pointer
  94 */
  95static void map_single_talitos_ptr(struct device *dev,
  96                                   struct talitos_ptr *ptr,
  97                                   unsigned int len, void *data,
  98                                   enum dma_data_direction dir)
  99{
 100        dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
 101        struct talitos_private *priv = dev_get_drvdata(dev);
 102        bool is_sec1 = has_ftr_sec1(priv);
 103
 104        to_talitos_ptr_len(ptr, len, is_sec1);
 105        to_talitos_ptr(ptr, dma_addr, is_sec1);
 106        to_talitos_ptr_extent_clear(ptr, is_sec1);
 107}
 108
 109/*
 110 * unmap bus single (contiguous) h/w descriptor pointer
 111 */
 112static void unmap_single_talitos_ptr(struct device *dev,
 113                                     struct talitos_ptr *ptr,
 114                                     enum dma_data_direction dir)
 115{
 116        struct talitos_private *priv = dev_get_drvdata(dev);
 117        bool is_sec1 = has_ftr_sec1(priv);
 118
 119        dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
 120                         from_talitos_ptr_len(ptr, is_sec1), dir);
 121}
 122
 123static int reset_channel(struct device *dev, int ch)
 124{
 125        struct talitos_private *priv = dev_get_drvdata(dev);
 126        unsigned int timeout = TALITOS_TIMEOUT;
 127        bool is_sec1 = has_ftr_sec1(priv);
 128
 129        if (is_sec1) {
 130                setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 131                          TALITOS1_CCCR_LO_RESET);
 132
 133                while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
 134                        TALITOS1_CCCR_LO_RESET) && --timeout)
 135                        cpu_relax();
 136        } else {
 137                setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 138                          TALITOS2_CCCR_RESET);
 139
 140                while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 141                        TALITOS2_CCCR_RESET) && --timeout)
 142                        cpu_relax();
 143        }
 144
 145        if (timeout == 0) {
 146                dev_err(dev, "failed to reset channel %d\n", ch);
 147                return -EIO;
 148        }
 149
 150        /* set 36-bit addressing, done writeback enable and done IRQ enable */
 151        setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 152                  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 153
 154        /* and ICCR writeback, if available */
 155        if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 156                setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 157                          TALITOS_CCCR_LO_IWSE);
 158
 159        return 0;
 160}
 161
 162static int reset_device(struct device *dev)
 163{
 164        struct talitos_private *priv = dev_get_drvdata(dev);
 165        unsigned int timeout = TALITOS_TIMEOUT;
 166        bool is_sec1 = has_ftr_sec1(priv);
 167        u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
 168
 169        setbits32(priv->reg + TALITOS_MCR, mcr);
 170
 171        while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
 172               && --timeout)
 173                cpu_relax();
 174
 175        if (priv->irq[1]) {
 176                mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 177                setbits32(priv->reg + TALITOS_MCR, mcr);
 178        }
 179
 180        if (timeout == 0) {
 181                dev_err(dev, "failed to reset device\n");
 182                return -EIO;
 183        }
 184
 185        return 0;
 186}
 187
 188/*
 189 * Reset and initialize the device
 190 */
 191static int init_device(struct device *dev)
 192{
 193        struct talitos_private *priv = dev_get_drvdata(dev);
 194        int ch, err;
 195        bool is_sec1 = has_ftr_sec1(priv);
 196
 197        /*
 198         * Master reset
 199         * errata documentation: warning: certain SEC interrupts
 200         * are not fully cleared by writing the MCR:SWR bit,
 201         * set bit twice to completely reset
 202         */
 203        err = reset_device(dev);
 204        if (err)
 205                return err;
 206
 207        err = reset_device(dev);
 208        if (err)
 209                return err;
 210
 211        /* reset channels */
 212        for (ch = 0; ch < priv->num_channels; ch++) {
 213                err = reset_channel(dev, ch);
 214                if (err)
 215                        return err;
 216        }
 217
 218        /* enable channel done and error interrupts */
 219        if (is_sec1) {
 220                clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
 221                clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
 222                /* disable parity error check in DEU (erroneous? test vect.) */
 223                setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
 224        } else {
 225                setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
 226                setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
 227        }
 228
 229        /* disable integrity check error interrupts (use writeback instead) */
 230        if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 231                setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
 232                          TALITOS_MDEUICR_LO_ICE);
 233
 234        return 0;
 235}
 236
 237/**
 238 * talitos_submit - submits a descriptor to the device for processing
 239 * @dev:        the SEC device to be used
 240 * @ch:         the SEC device channel to be used
 241 * @desc:       the descriptor to be processed by the device
 242 * @callback:   whom to call when processing is complete
 243 * @context:    a handle for use by caller (optional)
 244 *
 245 * desc must contain valid dma-mapped (bus physical) address pointers.
 246 * callback must check err and feedback in descriptor header
 247 * for device processing status.
 248 */
 249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 250                   void (*callback)(struct device *dev,
 251                                    struct talitos_desc *desc,
 252                                    void *context, int error),
 253                   void *context)
 254{
 255        struct talitos_private *priv = dev_get_drvdata(dev);
 256        struct talitos_request *request;
 257        unsigned long flags;
 258        int head;
 259        bool is_sec1 = has_ftr_sec1(priv);
 260
 261        spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 262
 263        if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 264                /* h/w fifo is full */
 265                spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 266                return -EAGAIN;
 267        }
 268
 269        head = priv->chan[ch].head;
 270        request = &priv->chan[ch].fifo[head];
 271
 272        /* map descriptor and save caller data */
 273        if (is_sec1) {
 274                desc->hdr1 = desc->hdr;
 275                desc->next_desc = 0;
 276                request->dma_desc = dma_map_single(dev, &desc->hdr1,
 277                                                   TALITOS_DESC_SIZE,
 278                                                   DMA_BIDIRECTIONAL);
 279        } else {
 280                request->dma_desc = dma_map_single(dev, desc,
 281                                                   TALITOS_DESC_SIZE,
 282                                                   DMA_BIDIRECTIONAL);
 283        }
 284        request->callback = callback;
 285        request->context = context;
 286
 287        /* increment fifo head */
 288        priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 289
 290        smp_wmb();
 291        request->desc = desc;
 292
 293        /* GO! */
 294        wmb();
 295        out_be32(priv->chan[ch].reg + TALITOS_FF,
 296                 upper_32_bits(request->dma_desc));
 297        out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 298                 lower_32_bits(request->dma_desc));
 299
 300        spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 301
 302        return -EINPROGRESS;
 303}
 304EXPORT_SYMBOL(talitos_submit);
 305
 306/*
 307 * process what was done, notify callback of error if not
 308 */
 309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 310{
 311        struct talitos_private *priv = dev_get_drvdata(dev);
 312        struct talitos_request *request, saved_req;
 313        unsigned long flags;
 314        int tail, status;
 315        bool is_sec1 = has_ftr_sec1(priv);
 316
 317        spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 318
 319        tail = priv->chan[ch].tail;
 320        while (priv->chan[ch].fifo[tail].desc) {
 321                __be32 hdr;
 322
 323                request = &priv->chan[ch].fifo[tail];
 324
 325                /* descriptors with their done bits set don't get the error */
 326                rmb();
 327                hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
 328
 329                if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 330                        status = 0;
 331                else
 332                        if (!error)
 333                                break;
 334                        else
 335                                status = error;
 336
 337                dma_unmap_single(dev, request->dma_desc,
 338                                 TALITOS_DESC_SIZE,
 339                                 DMA_BIDIRECTIONAL);
 340
 341                /* copy entries so we can call callback outside lock */
 342                saved_req.desc = request->desc;
 343                saved_req.callback = request->callback;
 344                saved_req.context = request->context;
 345
 346                /* release request entry in fifo */
 347                smp_wmb();
 348                request->desc = NULL;
 349
 350                /* increment fifo tail */
 351                priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 352
 353                spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 354
 355                atomic_dec(&priv->chan[ch].submit_count);
 356
 357                saved_req.callback(dev, saved_req.desc, saved_req.context,
 358                                   status);
 359                /* channel may resume processing in single desc error case */
 360                if (error && !reset_ch && status == error)
 361                        return;
 362                spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 363                tail = priv->chan[ch].tail;
 364        }
 365
 366        spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 367}
 368
 369/*
 370 * process completed requests for channels that have done status
 371 */
 372#define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
 373static void talitos1_done_##name(unsigned long data)                    \
 374{                                                                       \
 375        struct device *dev = (struct device *)data;                     \
 376        struct talitos_private *priv = dev_get_drvdata(dev);            \
 377        unsigned long flags;                                            \
 378                                                                        \
 379        if (ch_done_mask & 0x10000000)                                  \
 380                flush_channel(dev, 0, 0, 0);                    \
 381        if (priv->num_channels == 1)                                    \
 382                goto out;                                               \
 383        if (ch_done_mask & 0x40000000)                                  \
 384                flush_channel(dev, 1, 0, 0);                    \
 385        if (ch_done_mask & 0x00010000)                                  \
 386                flush_channel(dev, 2, 0, 0);                    \
 387        if (ch_done_mask & 0x00040000)                                  \
 388                flush_channel(dev, 3, 0, 0);                    \
 389                                                                        \
 390out:                                                                    \
 391        /* At this point, all completed channels have been processed */ \
 392        /* Unmask done interrupts for channels completed later on. */   \
 393        spin_lock_irqsave(&priv->reg_lock, flags);                      \
 394        clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
 395        clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
 396        spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
 397}
 398
 399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
 400
 401#define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
 402static void talitos2_done_##name(unsigned long data)                    \
 403{                                                                       \
 404        struct device *dev = (struct device *)data;                     \
 405        struct talitos_private *priv = dev_get_drvdata(dev);            \
 406        unsigned long flags;                                            \
 407                                                                        \
 408        if (ch_done_mask & 1)                                           \
 409                flush_channel(dev, 0, 0, 0);                            \
 410        if (priv->num_channels == 1)                                    \
 411                goto out;                                               \
 412        if (ch_done_mask & (1 << 2))                                    \
 413                flush_channel(dev, 1, 0, 0);                            \
 414        if (ch_done_mask & (1 << 4))                                    \
 415                flush_channel(dev, 2, 0, 0);                            \
 416        if (ch_done_mask & (1 << 6))                                    \
 417                flush_channel(dev, 3, 0, 0);                            \
 418                                                                        \
 419out:                                                                    \
 420        /* At this point, all completed channels have been processed */ \
 421        /* Unmask done interrupts for channels completed later on. */   \
 422        spin_lock_irqsave(&priv->reg_lock, flags);                      \
 423        setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
 424        setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
 425        spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
 426}
 427
 428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
 429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
 430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
 431
 432/*
 433 * locate current (offending) descriptor
 434 */
 435static u32 current_desc_hdr(struct device *dev, int ch)
 436{
 437        struct talitos_private *priv = dev_get_drvdata(dev);
 438        int tail, iter;
 439        dma_addr_t cur_desc;
 440
 441        cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
 442        cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 443
 444        if (!cur_desc) {
 445                dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
 446                return 0;
 447        }
 448
 449        tail = priv->chan[ch].tail;
 450
 451        iter = tail;
 452        while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
 453                iter = (iter + 1) & (priv->fifo_len - 1);
 454                if (iter == tail) {
 455                        dev_err(dev, "couldn't locate current descriptor\n");
 456                        return 0;
 457                }
 458        }
 459
 460        return priv->chan[ch].fifo[iter].desc->hdr;
 461}
 462
 463/*
 464 * user diagnostics; report root cause of error based on execution unit status
 465 */
 466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 467{
 468        struct talitos_private *priv = dev_get_drvdata(dev);
 469        int i;
 470
 471        if (!desc_hdr)
 472                desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
 473
 474        switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 475        case DESC_HDR_SEL0_AFEU:
 476                dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 477                        in_be32(priv->reg_afeu + TALITOS_EUISR),
 478                        in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
 479                break;
 480        case DESC_HDR_SEL0_DEU:
 481                dev_err(dev, "DEUISR 0x%08x_%08x\n",
 482                        in_be32(priv->reg_deu + TALITOS_EUISR),
 483                        in_be32(priv->reg_deu + TALITOS_EUISR_LO));
 484                break;
 485        case DESC_HDR_SEL0_MDEUA:
 486        case DESC_HDR_SEL0_MDEUB:
 487                dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 488                        in_be32(priv->reg_mdeu + TALITOS_EUISR),
 489                        in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 490                break;
 491        case DESC_HDR_SEL0_RNG:
 492                dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 493                        in_be32(priv->reg_rngu + TALITOS_ISR),
 494                        in_be32(priv->reg_rngu + TALITOS_ISR_LO));
 495                break;
 496        case DESC_HDR_SEL0_PKEU:
 497                dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 498                        in_be32(priv->reg_pkeu + TALITOS_EUISR),
 499                        in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 500                break;
 501        case DESC_HDR_SEL0_AESU:
 502                dev_err(dev, "AESUISR 0x%08x_%08x\n",
 503                        in_be32(priv->reg_aesu + TALITOS_EUISR),
 504                        in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
 505                break;
 506        case DESC_HDR_SEL0_CRCU:
 507                dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 508                        in_be32(priv->reg_crcu + TALITOS_EUISR),
 509                        in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 510                break;
 511        case DESC_HDR_SEL0_KEU:
 512                dev_err(dev, "KEUISR 0x%08x_%08x\n",
 513                        in_be32(priv->reg_pkeu + TALITOS_EUISR),
 514                        in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 515                break;
 516        }
 517
 518        switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 519        case DESC_HDR_SEL1_MDEUA:
 520        case DESC_HDR_SEL1_MDEUB:
 521                dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 522                        in_be32(priv->reg_mdeu + TALITOS_EUISR),
 523                        in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 524                break;
 525        case DESC_HDR_SEL1_CRCU:
 526                dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 527                        in_be32(priv->reg_crcu + TALITOS_EUISR),
 528                        in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 529                break;
 530        }
 531
 532        for (i = 0; i < 8; i++)
 533                dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 534                        in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 535                        in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 536}
 537
 538/*
 539 * recover from error interrupts
 540 */
 541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 542{
 543        struct talitos_private *priv = dev_get_drvdata(dev);
 544        unsigned int timeout = TALITOS_TIMEOUT;
 545        int ch, error, reset_dev = 0;
 546        u32 v_lo;
 547        bool is_sec1 = has_ftr_sec1(priv);
 548        int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
 549
 550        for (ch = 0; ch < priv->num_channels; ch++) {
 551                /* skip channels without errors */
 552                if (is_sec1) {
 553                        /* bits 29, 31, 17, 19 */
 554                        if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
 555                                continue;
 556                } else {
 557                        if (!(isr & (1 << (ch * 2 + 1))))
 558                                continue;
 559                }
 560
 561                error = -EINVAL;
 562
 563                v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 564
 565                if (v_lo & TALITOS_CCPSR_LO_DOF) {
 566                        dev_err(dev, "double fetch fifo overflow error\n");
 567                        error = -EAGAIN;
 568                        reset_ch = 1;
 569                }
 570                if (v_lo & TALITOS_CCPSR_LO_SOF) {
 571                        /* h/w dropped descriptor */
 572                        dev_err(dev, "single fetch fifo overflow error\n");
 573                        error = -EAGAIN;
 574                }
 575                if (v_lo & TALITOS_CCPSR_LO_MDTE)
 576                        dev_err(dev, "master data transfer error\n");
 577                if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 578                        dev_err(dev, is_sec1 ? "pointeur not complete error\n"
 579                                             : "s/g data length zero error\n");
 580                if (v_lo & TALITOS_CCPSR_LO_FPZ)
 581                        dev_err(dev, is_sec1 ? "parity error\n"
 582                                             : "fetch pointer zero error\n");
 583                if (v_lo & TALITOS_CCPSR_LO_IDH)
 584                        dev_err(dev, "illegal descriptor header error\n");
 585                if (v_lo & TALITOS_CCPSR_LO_IEU)
 586                        dev_err(dev, is_sec1 ? "static assignment error\n"
 587                                             : "invalid exec unit error\n");
 588                if (v_lo & TALITOS_CCPSR_LO_EU)
 589                        report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 590                if (!is_sec1) {
 591                        if (v_lo & TALITOS_CCPSR_LO_GB)
 592                                dev_err(dev, "gather boundary error\n");
 593                        if (v_lo & TALITOS_CCPSR_LO_GRL)
 594                                dev_err(dev, "gather return/length error\n");
 595                        if (v_lo & TALITOS_CCPSR_LO_SB)
 596                                dev_err(dev, "scatter boundary error\n");
 597                        if (v_lo & TALITOS_CCPSR_LO_SRL)
 598                                dev_err(dev, "scatter return/length error\n");
 599                }
 600
 601                flush_channel(dev, ch, error, reset_ch);
 602
 603                if (reset_ch) {
 604                        reset_channel(dev, ch);
 605                } else {
 606                        setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 607                                  TALITOS2_CCCR_CONT);
 608                        setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 609                        while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 610                               TALITOS2_CCCR_CONT) && --timeout)
 611                                cpu_relax();
 612                        if (timeout == 0) {
 613                                dev_err(dev, "failed to restart channel %d\n",
 614                                        ch);
 615                                reset_dev = 1;
 616                        }
 617                }
 618        }
 619        if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
 620            (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
 621                if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
 622                        dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
 623                                isr, isr_lo);
 624                else
 625                        dev_err(dev, "done overflow, internal time out, or "
 626                                "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
 627
 628                /* purge request queues */
 629                for (ch = 0; ch < priv->num_channels; ch++)
 630                        flush_channel(dev, ch, -EIO, 1);
 631
 632                /* reset and reinitialize the device */
 633                init_device(dev);
 634        }
 635}
 636
 637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
 638static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
 639{                                                                              \
 640        struct device *dev = data;                                             \
 641        struct talitos_private *priv = dev_get_drvdata(dev);                   \
 642        u32 isr, isr_lo;                                                       \
 643        unsigned long flags;                                                   \
 644                                                                               \
 645        spin_lock_irqsave(&priv->reg_lock, flags);                             \
 646        isr = in_be32(priv->reg + TALITOS_ISR);                                \
 647        isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
 648        /* Acknowledge interrupt */                                            \
 649        out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 650        out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
 651                                                                               \
 652        if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
 653                spin_unlock_irqrestore(&priv->reg_lock, flags);                \
 654                talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
 655        }                                                                      \
 656        else {                                                                 \
 657                if (likely(isr & ch_done_mask)) {                              \
 658                        /* mask further done interrupts. */                    \
 659                        setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 660                        /* done_task will unmask done interrupts at exit */    \
 661                        tasklet_schedule(&priv->done_task[tlet]);              \
 662                }                                                              \
 663                spin_unlock_irqrestore(&priv->reg_lock, flags);                \
 664        }                                                                      \
 665                                                                               \
 666        return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 667                                                                IRQ_NONE;      \
 668}
 669
 670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
 671
 672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
 673static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
 674{                                                                              \
 675        struct device *dev = data;                                             \
 676        struct talitos_private *priv = dev_get_drvdata(dev);                   \
 677        u32 isr, isr_lo;                                                       \
 678        unsigned long flags;                                                   \
 679                                                                               \
 680        spin_lock_irqsave(&priv->reg_lock, flags);                             \
 681        isr = in_be32(priv->reg + TALITOS_ISR);                                \
 682        isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
 683        /* Acknowledge interrupt */                                            \
 684        out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 685        out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
 686                                                                               \
 687        if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
 688                spin_unlock_irqrestore(&priv->reg_lock, flags);                \
 689                talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
 690        }                                                                      \
 691        else {                                                                 \
 692                if (likely(isr & ch_done_mask)) {                              \
 693                        /* mask further done interrupts. */                    \
 694                        clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 695                        /* done_task will unmask done interrupts at exit */    \
 696                        tasklet_schedule(&priv->done_task[tlet]);              \
 697                }                                                              \
 698                spin_unlock_irqrestore(&priv->reg_lock, flags);                \
 699        }                                                                      \
 700                                                                               \
 701        return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 702                                                                IRQ_NONE;      \
 703}
 704
 705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
 706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
 707                       0)
 708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
 709                       1)
 710
 711/*
 712 * hwrng
 713 */
 714static int talitos_rng_data_present(struct hwrng *rng, int wait)
 715{
 716        struct device *dev = (struct device *)rng->priv;
 717        struct talitos_private *priv = dev_get_drvdata(dev);
 718        u32 ofl;
 719        int i;
 720
 721        for (i = 0; i < 20; i++) {
 722                ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
 723                      TALITOS_RNGUSR_LO_OFL;
 724                if (ofl || !wait)
 725                        break;
 726                udelay(10);
 727        }
 728
 729        return !!ofl;
 730}
 731
 732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 733{
 734        struct device *dev = (struct device *)rng->priv;
 735        struct talitos_private *priv = dev_get_drvdata(dev);
 736
 737        /* rng fifo requires 64-bit accesses */
 738        *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
 739        *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
 740
 741        return sizeof(u32);
 742}
 743
 744static int talitos_rng_init(struct hwrng *rng)
 745{
 746        struct device *dev = (struct device *)rng->priv;
 747        struct talitos_private *priv = dev_get_drvdata(dev);
 748        unsigned int timeout = TALITOS_TIMEOUT;
 749
 750        setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
 751        while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
 752                 & TALITOS_RNGUSR_LO_RD)
 753               && --timeout)
 754                cpu_relax();
 755        if (timeout == 0) {
 756                dev_err(dev, "failed to reset rng hw\n");
 757                return -ENODEV;
 758        }
 759
 760        /* start generating */
 761        setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
 762
 763        return 0;
 764}
 765
 766static int talitos_register_rng(struct device *dev)
 767{
 768        struct talitos_private *priv = dev_get_drvdata(dev);
 769        int err;
 770
 771        priv->rng.name          = dev_driver_string(dev),
 772        priv->rng.init          = talitos_rng_init,
 773        priv->rng.data_present  = talitos_rng_data_present,
 774        priv->rng.data_read     = talitos_rng_data_read,
 775        priv->rng.priv          = (unsigned long)dev;
 776
 777        err = hwrng_register(&priv->rng);
 778        if (!err)
 779                priv->rng_registered = true;
 780
 781        return err;
 782}
 783
 784static void talitos_unregister_rng(struct device *dev)
 785{
 786        struct talitos_private *priv = dev_get_drvdata(dev);
 787
 788        if (!priv->rng_registered)
 789                return;
 790
 791        hwrng_unregister(&priv->rng);
 792        priv->rng_registered = false;
 793}
 794
 795/*
 796 * crypto alg
 797 */
 798#define TALITOS_CRA_PRIORITY            3000
 799#define TALITOS_MAX_KEY_SIZE            96
 800#define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 801
 802struct talitos_ctx {
 803        struct device *dev;
 804        int ch;
 805        __be32 desc_hdr_template;
 806        u8 key[TALITOS_MAX_KEY_SIZE];
 807        u8 iv[TALITOS_MAX_IV_LENGTH];
 808        unsigned int keylen;
 809        unsigned int enckeylen;
 810        unsigned int authkeylen;
 811};
 812
 813#define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
 814#define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 815
 816struct talitos_ahash_req_ctx {
 817        u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 818        unsigned int hw_context_size;
 819        u8 buf[HASH_MAX_BLOCK_SIZE];
 820        u8 bufnext[HASH_MAX_BLOCK_SIZE];
 821        unsigned int swinit;
 822        unsigned int first;
 823        unsigned int last;
 824        unsigned int to_hash_later;
 825        unsigned int nbuf;
 826        struct scatterlist bufsl[2];
 827        struct scatterlist *psrc;
 828};
 829
 830static int aead_setkey(struct crypto_aead *authenc,
 831                       const u8 *key, unsigned int keylen)
 832{
 833        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 834        struct crypto_authenc_keys keys;
 835
 836        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 837                goto badkey;
 838
 839        if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 840                goto badkey;
 841
 842        memcpy(ctx->key, keys.authkey, keys.authkeylen);
 843        memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 844
 845        ctx->keylen = keys.authkeylen + keys.enckeylen;
 846        ctx->enckeylen = keys.enckeylen;
 847        ctx->authkeylen = keys.authkeylen;
 848
 849        return 0;
 850
 851badkey:
 852        crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 853        return -EINVAL;
 854}
 855
 856/*
 857 * talitos_edesc - s/w-extended descriptor
 858 * @src_nents: number of segments in input scatterlist
 859 * @dst_nents: number of segments in output scatterlist
 860 * @icv_ool: whether ICV is out-of-line
 861 * @iv_dma: dma address of iv for checking continuity and link table
 862 * @dma_len: length of dma mapped link_tbl space
 863 * @dma_link_tbl: bus physical address of link_tbl/buf
 864 * @desc: h/w descriptor
 865 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
 866 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
 867 *
 868 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 869 * is greater than 1, an integrity check value is concatenated to the end
 870 * of link_tbl data
 871 */
 872struct talitos_edesc {
 873        int src_nents;
 874        int dst_nents;
 875        bool icv_ool;
 876        dma_addr_t iv_dma;
 877        int dma_len;
 878        dma_addr_t dma_link_tbl;
 879        struct talitos_desc desc;
 880        union {
 881                struct talitos_ptr link_tbl[0];
 882                u8 buf[0];
 883        };
 884};
 885
 886static void talitos_sg_unmap(struct device *dev,
 887                             struct talitos_edesc *edesc,
 888                             struct scatterlist *src,
 889                             struct scatterlist *dst)
 890{
 891        unsigned int src_nents = edesc->src_nents ? : 1;
 892        unsigned int dst_nents = edesc->dst_nents ? : 1;
 893
 894        if (src != dst) {
 895                dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 896
 897                if (dst) {
 898                        dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 899                }
 900        } else
 901                dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 902}
 903
 904static void ipsec_esp_unmap(struct device *dev,
 905                            struct talitos_edesc *edesc,
 906                            struct aead_request *areq)
 907{
 908        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
 909        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
 910        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 911        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
 912
 913        talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 914
 915        if (edesc->dma_len)
 916                dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
 917                                 DMA_BIDIRECTIONAL);
 918}
 919
 920/*
 921 * ipsec_esp descriptor callbacks
 922 */
 923static void ipsec_esp_encrypt_done(struct device *dev,
 924                                   struct talitos_desc *desc, void *context,
 925                                   int err)
 926{
 927        struct aead_request *areq = context;
 928        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 929        unsigned int authsize = crypto_aead_authsize(authenc);
 930        struct talitos_edesc *edesc;
 931        struct scatterlist *sg;
 932        void *icvdata;
 933
 934        edesc = container_of(desc, struct talitos_edesc, desc);
 935
 936        ipsec_esp_unmap(dev, edesc, areq);
 937
 938        /* copy the generated ICV to dst */
 939        if (edesc->icv_ool) {
 940                icvdata = &edesc->link_tbl[edesc->src_nents +
 941                                           edesc->dst_nents + 2];
 942                sg = sg_last(areq->dst, edesc->dst_nents);
 943                memcpy((char *)sg_virt(sg) + sg->length - authsize,
 944                       icvdata, authsize);
 945        }
 946
 947        kfree(edesc);
 948
 949        aead_request_complete(areq, err);
 950}
 951
 952static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 953                                          struct talitos_desc *desc,
 954                                          void *context, int err)
 955{
 956        struct aead_request *req = context;
 957        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 958        unsigned int authsize = crypto_aead_authsize(authenc);
 959        struct talitos_edesc *edesc;
 960        struct scatterlist *sg;
 961        char *oicv, *icv;
 962
 963        edesc = container_of(desc, struct talitos_edesc, desc);
 964
 965        ipsec_esp_unmap(dev, edesc, req);
 966
 967        if (!err) {
 968                /* auth check */
 969                sg = sg_last(req->dst, edesc->dst_nents ? : 1);
 970                icv = (char *)sg_virt(sg) + sg->length - authsize;
 971
 972                if (edesc->dma_len) {
 973                        oicv = (char *)&edesc->link_tbl[edesc->src_nents +
 974                                                        edesc->dst_nents + 2];
 975                        if (edesc->icv_ool)
 976                                icv = oicv + authsize;
 977                } else
 978                        oicv = (char *)&edesc->link_tbl[0];
 979
 980                err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
 981        }
 982
 983        kfree(edesc);
 984
 985        aead_request_complete(req, err);
 986}
 987
 988static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
 989                                          struct talitos_desc *desc,
 990                                          void *context, int err)
 991{
 992        struct aead_request *req = context;
 993        struct talitos_edesc *edesc;
 994
 995        edesc = container_of(desc, struct talitos_edesc, desc);
 996
 997        ipsec_esp_unmap(dev, edesc, req);
 998
 999        /* check ICV auth status */
1000        if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1001                     DESC_HDR_LO_ICCR1_PASS))
1002                err = -EBADMSG;
1003
1004        kfree(edesc);
1005
1006        aead_request_complete(req, err);
1007}
1008
1009/*
1010 * convert scatterlist to SEC h/w link table format
1011 * stop at cryptlen bytes
1012 */
1013static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1014                                 unsigned int offset, int cryptlen,
1015                                 struct talitos_ptr *link_tbl_ptr)
1016{
1017        int n_sg = sg_count;
1018        int count = 0;
1019
1020        while (cryptlen && sg && n_sg--) {
1021                unsigned int len = sg_dma_len(sg);
1022
1023                if (offset >= len) {
1024                        offset -= len;
1025                        goto next;
1026                }
1027
1028                len -= offset;
1029
1030                if (len > cryptlen)
1031                        len = cryptlen;
1032
1033                to_talitos_ptr(link_tbl_ptr + count,
1034                               sg_dma_address(sg) + offset, 0);
1035                link_tbl_ptr[count].len = cpu_to_be16(len);
1036                link_tbl_ptr[count].j_extent = 0;
1037                count++;
1038                cryptlen -= len;
1039                offset = 0;
1040
1041next:
1042                sg = sg_next(sg);
1043        }
1044
1045        /* tag end of link table */
1046        if (count > 0)
1047                link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1048
1049        return count;
1050}
1051
1052static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1053                                 int cryptlen,
1054                                 struct talitos_ptr *link_tbl_ptr)
1055{
1056        return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1057                                     link_tbl_ptr);
1058}
1059
1060/*
1061 * fill in and submit ipsec_esp descriptor
1062 */
1063static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1064                     void (*callback)(struct device *dev,
1065                                      struct talitos_desc *desc,
1066                                      void *context, int error))
1067{
1068        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1069        unsigned int authsize = crypto_aead_authsize(aead);
1070        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1071        struct device *dev = ctx->dev;
1072        struct talitos_desc *desc = &edesc->desc;
1073        unsigned int cryptlen = areq->cryptlen;
1074        unsigned int ivsize = crypto_aead_ivsize(aead);
1075        int tbl_off = 0;
1076        int sg_count, ret;
1077        int sg_link_tbl_len;
1078
1079        /* hmac key */
1080        map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1081                               DMA_TO_DEVICE);
1082
1083        sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084                              (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085                                                           : DMA_TO_DEVICE);
1086
1087        /* hmac data */
1088        desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089        if (sg_count > 1 &&
1090            (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091                                         areq->assoclen,
1092                                         &edesc->link_tbl[tbl_off])) > 1) {
1093                tbl_off += ret;
1094
1095                to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1096                               sizeof(struct talitos_ptr), 0);
1097                desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098
1099                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100                                           edesc->dma_len, DMA_BIDIRECTIONAL);
1101        } else {
1102                to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1103                desc->ptr[1].j_extent = 0;
1104        }
1105
1106        /* cipher iv */
1107        to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1108        desc->ptr[2].len = cpu_to_be16(ivsize);
1109        desc->ptr[2].j_extent = 0;
1110
1111        /* cipher key */
1112        map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1113                               (char *)&ctx->key + ctx->authkeylen,
1114                               DMA_TO_DEVICE);
1115
1116        /*
1117         * cipher in
1118         * map and adjust cipher len to aead request cryptlen.
1119         * extent is bytes of HMAC postpended to ciphertext,
1120         * typically 12 for ipsec
1121         */
1122        desc->ptr[4].len = cpu_to_be16(cryptlen);
1123        desc->ptr[4].j_extent = authsize;
1124
1125        sg_link_tbl_len = cryptlen;
1126        if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127                sg_link_tbl_len += authsize;
1128
1129        if (sg_count > 1 &&
1130            (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1131                                         sg_link_tbl_len,
1132                                         &edesc->link_tbl[tbl_off])) > 1) {
1133                tbl_off += ret;
1134                desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135                to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136                                              tbl_off *
1137                                              sizeof(struct talitos_ptr), 0);
1138                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139                                           edesc->dma_len,
1140                                           DMA_BIDIRECTIONAL);
1141        } else
1142                to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1143
1144        /* cipher out */
1145        desc->ptr[5].len = cpu_to_be16(cryptlen);
1146        desc->ptr[5].j_extent = authsize;
1147
1148        if (areq->src != areq->dst)
1149                sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1150                                      DMA_FROM_DEVICE);
1151
1152        edesc->icv_ool = false;
1153
1154        if (sg_count > 1 &&
1155            (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1156                                              areq->assoclen, cryptlen,
1157                                              &edesc->link_tbl[tbl_off])) >
1158            1) {
1159                struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1160
1161                to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1162                               tbl_off * sizeof(struct talitos_ptr), 0);
1163
1164                /* Add an entry to the link table for ICV data */
1165                tbl_ptr += sg_count - 1;
1166                tbl_ptr->j_extent = 0;
1167                tbl_ptr++;
1168                tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1169                tbl_ptr->len = cpu_to_be16(authsize);
1170
1171                /* icv data follows link tables */
1172                to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1173                                        (edesc->src_nents + edesc->dst_nents +
1174                                         2) * sizeof(struct talitos_ptr) +
1175                                        authsize, 0);
1176                desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1177                dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1178                                           edesc->dma_len, DMA_BIDIRECTIONAL);
1179
1180                edesc->icv_ool = true;
1181        } else
1182                to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1183
1184        /* iv out */
1185        map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1186                               DMA_FROM_DEVICE);
1187
1188        ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1189        if (ret != -EINPROGRESS) {
1190                ipsec_esp_unmap(dev, edesc, areq);
1191                kfree(edesc);
1192        }
1193        return ret;
1194}
1195
1196/*
1197 * allocate and map the extended descriptor
1198 */
1199static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1200                                                 struct scatterlist *src,
1201                                                 struct scatterlist *dst,
1202                                                 u8 *iv,
1203                                                 unsigned int assoclen,
1204                                                 unsigned int cryptlen,
1205                                                 unsigned int authsize,
1206                                                 unsigned int ivsize,
1207                                                 int icv_stashing,
1208                                                 u32 cryptoflags,
1209                                                 bool encrypt)
1210{
1211        struct talitos_edesc *edesc;
1212        int src_nents, dst_nents, alloc_len, dma_len;
1213        dma_addr_t iv_dma = 0;
1214        gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1215                      GFP_ATOMIC;
1216        struct talitos_private *priv = dev_get_drvdata(dev);
1217        bool is_sec1 = has_ftr_sec1(priv);
1218        int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1219        void *err;
1220
1221        if (cryptlen + authsize > max_len) {
1222                dev_err(dev, "length exceeds h/w max limit\n");
1223                return ERR_PTR(-EINVAL);
1224        }
1225
1226        if (ivsize)
1227                iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1228
1229        if (!dst || dst == src) {
1230                src_nents = sg_nents_for_len(src,
1231                                             assoclen + cryptlen + authsize);
1232                if (src_nents < 0) {
1233                        dev_err(dev, "Invalid number of src SG.\n");
1234                        err = ERR_PTR(-EINVAL);
1235                        goto error_sg;
1236                }
1237                src_nents = (src_nents == 1) ? 0 : src_nents;
1238                dst_nents = dst ? src_nents : 0;
1239        } else { /* dst && dst != src*/
1240                src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1241                                                 (encrypt ? 0 : authsize));
1242                if (src_nents < 0) {
1243                        dev_err(dev, "Invalid number of src SG.\n");
1244                        err = ERR_PTR(-EINVAL);
1245                        goto error_sg;
1246                }
1247                src_nents = (src_nents == 1) ? 0 : src_nents;
1248                dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1249                                                 (encrypt ? authsize : 0));
1250                if (dst_nents < 0) {
1251                        dev_err(dev, "Invalid number of dst SG.\n");
1252                        err = ERR_PTR(-EINVAL);
1253                        goto error_sg;
1254                }
1255                dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1256        }
1257
1258        /*
1259         * allocate space for base edesc plus the link tables,
1260         * allowing for two separate entries for AD and generated ICV (+ 2),
1261         * and space for two sets of ICVs (stashed and generated)
1262         */
1263        alloc_len = sizeof(struct talitos_edesc);
1264        if (src_nents || dst_nents) {
1265                if (is_sec1)
1266                        dma_len = (src_nents ? cryptlen : 0) +
1267                                  (dst_nents ? cryptlen : 0);
1268                else
1269                        dma_len = (src_nents + dst_nents + 2) *
1270                                  sizeof(struct talitos_ptr) + authsize * 2;
1271                alloc_len += dma_len;
1272        } else {
1273                dma_len = 0;
1274                alloc_len += icv_stashing ? authsize : 0;
1275        }
1276
1277        edesc = kmalloc(alloc_len, GFP_DMA | flags);
1278        if (!edesc) {
1279                dev_err(dev, "could not allocate edescriptor\n");
1280                err = ERR_PTR(-ENOMEM);
1281                goto error_sg;
1282        }
1283
1284        edesc->src_nents = src_nents;
1285        edesc->dst_nents = dst_nents;
1286        edesc->iv_dma = iv_dma;
1287        edesc->dma_len = dma_len;
1288        if (dma_len)
1289                edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1290                                                     edesc->dma_len,
1291                                                     DMA_BIDIRECTIONAL);
1292
1293        return edesc;
1294error_sg:
1295        if (iv_dma)
1296                dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1297        return err;
1298}
1299
1300static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1301                                              int icv_stashing, bool encrypt)
1302{
1303        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1304        unsigned int authsize = crypto_aead_authsize(authenc);
1305        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1306        unsigned int ivsize = crypto_aead_ivsize(authenc);
1307
1308        return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1309                                   iv, areq->assoclen, areq->cryptlen,
1310                                   authsize, ivsize, icv_stashing,
1311                                   areq->base.flags, encrypt);
1312}
1313
1314static int aead_encrypt(struct aead_request *req)
1315{
1316        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1317        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1318        struct talitos_edesc *edesc;
1319
1320        /* allocate extended descriptor */
1321        edesc = aead_edesc_alloc(req, req->iv, 0, true);
1322        if (IS_ERR(edesc))
1323                return PTR_ERR(edesc);
1324
1325        /* set encrypt */
1326        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1327
1328        return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1329}
1330
1331static int aead_decrypt(struct aead_request *req)
1332{
1333        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1334        unsigned int authsize = crypto_aead_authsize(authenc);
1335        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1336        struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1337        struct talitos_edesc *edesc;
1338        struct scatterlist *sg;
1339        void *icvdata;
1340
1341        req->cryptlen -= authsize;
1342
1343        /* allocate extended descriptor */
1344        edesc = aead_edesc_alloc(req, req->iv, 1, false);
1345        if (IS_ERR(edesc))
1346                return PTR_ERR(edesc);
1347
1348        if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1349            ((!edesc->src_nents && !edesc->dst_nents) ||
1350             priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1351
1352                /* decrypt and check the ICV */
1353                edesc->desc.hdr = ctx->desc_hdr_template |
1354                                  DESC_HDR_DIR_INBOUND |
1355                                  DESC_HDR_MODE1_MDEU_CICV;
1356
1357                /* reset integrity check result bits */
1358                edesc->desc.hdr_lo = 0;
1359
1360                return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1361        }
1362
1363        /* Have to check the ICV with software */
1364        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1365
1366        /* stash incoming ICV for later cmp with ICV generated by the h/w */
1367        if (edesc->dma_len)
1368                icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1369                                                   edesc->dst_nents + 2];
1370        else
1371                icvdata = &edesc->link_tbl[0];
1372
1373        sg = sg_last(req->src, edesc->src_nents ? : 1);
1374
1375        memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1376
1377        return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1378}
1379
1380static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1381                             const u8 *key, unsigned int keylen)
1382{
1383        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1384
1385        memcpy(&ctx->key, key, keylen);
1386        ctx->keylen = keylen;
1387
1388        return 0;
1389}
1390
1391static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1392                                 struct scatterlist *dst, unsigned int len,
1393                                 struct talitos_edesc *edesc)
1394{
1395        struct talitos_private *priv = dev_get_drvdata(dev);
1396        bool is_sec1 = has_ftr_sec1(priv);
1397
1398        if (is_sec1) {
1399                if (!edesc->src_nents) {
1400                        dma_unmap_sg(dev, src, 1,
1401                                     dst != src ? DMA_TO_DEVICE
1402                                                : DMA_BIDIRECTIONAL);
1403                }
1404                if (dst && edesc->dst_nents) {
1405                        dma_sync_single_for_device(dev,
1406                                                   edesc->dma_link_tbl + len,
1407                                                   len, DMA_FROM_DEVICE);
1408                        sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1409                                            edesc->buf + len, len);
1410                } else if (dst && dst != src) {
1411                        dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1412                }
1413        } else {
1414                talitos_sg_unmap(dev, edesc, src, dst);
1415        }
1416}
1417
1418static void common_nonsnoop_unmap(struct device *dev,
1419                                  struct talitos_edesc *edesc,
1420                                  struct ablkcipher_request *areq)
1421{
1422        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1423
1424        unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1425        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1426        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1427
1428        if (edesc->dma_len)
1429                dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1430                                 DMA_BIDIRECTIONAL);
1431}
1432
1433static void ablkcipher_done(struct device *dev,
1434                            struct talitos_desc *desc, void *context,
1435                            int err)
1436{
1437        struct ablkcipher_request *areq = context;
1438        struct talitos_edesc *edesc;
1439
1440        edesc = container_of(desc, struct talitos_edesc, desc);
1441
1442        common_nonsnoop_unmap(dev, edesc, areq);
1443
1444        kfree(edesc);
1445
1446        areq->base.complete(&areq->base, err);
1447}
1448
1449int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1450                          unsigned int len, struct talitos_edesc *edesc,
1451                          enum dma_data_direction dir, struct talitos_ptr *ptr)
1452{
1453        int sg_count;
1454        struct talitos_private *priv = dev_get_drvdata(dev);
1455        bool is_sec1 = has_ftr_sec1(priv);
1456
1457        to_talitos_ptr_len(ptr, len, is_sec1);
1458
1459        if (is_sec1) {
1460                sg_count = edesc->src_nents ? : 1;
1461
1462                if (sg_count == 1) {
1463                        dma_map_sg(dev, src, 1, dir);
1464                        to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1465                } else {
1466                        sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1467                        to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1468                        dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1469                                                   len, DMA_TO_DEVICE);
1470                }
1471        } else {
1472                to_talitos_ptr_extent_clear(ptr, is_sec1);
1473
1474                sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1475
1476                if (sg_count == 1) {
1477                        to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1478                } else {
1479                        sg_count = sg_to_link_tbl(src, sg_count, len,
1480                                                  &edesc->link_tbl[0]);
1481                        if (sg_count > 1) {
1482                                to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1483                                ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1484                                dma_sync_single_for_device(dev,
1485                                                           edesc->dma_link_tbl,
1486                                                           edesc->dma_len,
1487                                                           DMA_BIDIRECTIONAL);
1488                        } else {
1489                                /* Only one segment now, so no link tbl needed*/
1490                                to_talitos_ptr(ptr, sg_dma_address(src),
1491                                               is_sec1);
1492                        }
1493                }
1494        }
1495        return sg_count;
1496}
1497
1498void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1499                            unsigned int len, struct talitos_edesc *edesc,
1500                            enum dma_data_direction dir,
1501                            struct talitos_ptr *ptr, int sg_count)
1502{
1503        struct talitos_private *priv = dev_get_drvdata(dev);
1504        bool is_sec1 = has_ftr_sec1(priv);
1505
1506        if (dir != DMA_NONE)
1507                sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1508
1509        to_talitos_ptr_len(ptr, len, is_sec1);
1510
1511        if (is_sec1) {
1512                if (sg_count == 1) {
1513                        if (dir != DMA_NONE)
1514                                dma_map_sg(dev, dst, 1, dir);
1515                        to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1516                } else {
1517                        to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1518                        dma_sync_single_for_device(dev,
1519                                                   edesc->dma_link_tbl + len,
1520                                                   len, DMA_FROM_DEVICE);
1521                }
1522        } else {
1523                to_talitos_ptr_extent_clear(ptr, is_sec1);
1524
1525                if (sg_count == 1) {
1526                        to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1527                } else {
1528                        struct talitos_ptr *link_tbl_ptr =
1529                                &edesc->link_tbl[edesc->src_nents + 1];
1530
1531                        to_talitos_ptr(ptr, edesc->dma_link_tbl +
1532                                            (edesc->src_nents + 1) *
1533                                             sizeof(struct talitos_ptr), 0);
1534                        ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1535                        sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1536                        dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1537                                                   edesc->dma_len,
1538                                                   DMA_BIDIRECTIONAL);
1539                }
1540        }
1541}
1542
1543static int common_nonsnoop(struct talitos_edesc *edesc,
1544                           struct ablkcipher_request *areq,
1545                           void (*callback) (struct device *dev,
1546                                             struct talitos_desc *desc,
1547                                             void *context, int error))
1548{
1549        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1550        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1551        struct device *dev = ctx->dev;
1552        struct talitos_desc *desc = &edesc->desc;
1553        unsigned int cryptlen = areq->nbytes;
1554        unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1555        int sg_count, ret;
1556        struct talitos_private *priv = dev_get_drvdata(dev);
1557        bool is_sec1 = has_ftr_sec1(priv);
1558
1559        /* first DWORD empty */
1560        desc->ptr[0] = zero_entry;
1561
1562        /* cipher iv */
1563        to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1564        to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1565        to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1566
1567        /* cipher key */
1568        map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1569                               (char *)&ctx->key, DMA_TO_DEVICE);
1570
1571        /*
1572         * cipher in
1573         */
1574        sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1575                                         (areq->src == areq->dst) ?
1576                                          DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1577                                          &desc->ptr[3]);
1578
1579        /* cipher out */
1580        map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1581                               (areq->src == areq->dst) ? DMA_NONE
1582                                                        : DMA_FROM_DEVICE,
1583                               &desc->ptr[4], sg_count);
1584
1585        /* iv out */
1586        map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1587                               DMA_FROM_DEVICE);
1588
1589        /* last DWORD empty */
1590        desc->ptr[6] = zero_entry;
1591
1592        ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1593        if (ret != -EINPROGRESS) {
1594                common_nonsnoop_unmap(dev, edesc, areq);
1595                kfree(edesc);
1596        }
1597        return ret;
1598}
1599
1600static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1601                                                    areq, bool encrypt)
1602{
1603        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1604        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1605        unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1606
1607        return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1608                                   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1609                                   areq->base.flags, encrypt);
1610}
1611
1612static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1613{
1614        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1615        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1616        struct talitos_edesc *edesc;
1617
1618        /* allocate extended descriptor */
1619        edesc = ablkcipher_edesc_alloc(areq, true);
1620        if (IS_ERR(edesc))
1621                return PTR_ERR(edesc);
1622
1623        /* set encrypt */
1624        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1625
1626        return common_nonsnoop(edesc, areq, ablkcipher_done);
1627}
1628
1629static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1630{
1631        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1632        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1633        struct talitos_edesc *edesc;
1634
1635        /* allocate extended descriptor */
1636        edesc = ablkcipher_edesc_alloc(areq, false);
1637        if (IS_ERR(edesc))
1638                return PTR_ERR(edesc);
1639
1640        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1641
1642        return common_nonsnoop(edesc, areq, ablkcipher_done);
1643}
1644
1645static void common_nonsnoop_hash_unmap(struct device *dev,
1646                                       struct talitos_edesc *edesc,
1647                                       struct ahash_request *areq)
1648{
1649        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1650        struct talitos_private *priv = dev_get_drvdata(dev);
1651        bool is_sec1 = has_ftr_sec1(priv);
1652
1653        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1654
1655        unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1656
1657        /* When using hashctx-in, must unmap it. */
1658        if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1659                unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1660                                         DMA_TO_DEVICE);
1661
1662        if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1663                unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1664                                         DMA_TO_DEVICE);
1665
1666        if (edesc->dma_len)
1667                dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1668                                 DMA_BIDIRECTIONAL);
1669
1670}
1671
1672static void ahash_done(struct device *dev,
1673                       struct talitos_desc *desc, void *context,
1674                       int err)
1675{
1676        struct ahash_request *areq = context;
1677        struct talitos_edesc *edesc =
1678                 container_of(desc, struct talitos_edesc, desc);
1679        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1680
1681        if (!req_ctx->last && req_ctx->to_hash_later) {
1682                /* Position any partial block for next update/final/finup */
1683                memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1684                req_ctx->nbuf = req_ctx->to_hash_later;
1685        }
1686        common_nonsnoop_hash_unmap(dev, edesc, areq);
1687
1688        kfree(edesc);
1689
1690        areq->base.complete(&areq->base, err);
1691}
1692
1693/*
1694 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1695 * ourself and submit a padded block
1696 */
1697void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1698                               struct talitos_edesc *edesc,
1699                               struct talitos_ptr *ptr)
1700{
1701        static u8 padded_hash[64] = {
1702                0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1703                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1704                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1705                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1706        };
1707
1708        pr_err_once("Bug in SEC1, padding ourself\n");
1709        edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1710        map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1711                               (char *)padded_hash, DMA_TO_DEVICE);
1712}
1713
1714static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1715                                struct ahash_request *areq, unsigned int length,
1716                                void (*callback) (struct device *dev,
1717                                                  struct talitos_desc *desc,
1718                                                  void *context, int error))
1719{
1720        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1721        struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1722        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1723        struct device *dev = ctx->dev;
1724        struct talitos_desc *desc = &edesc->desc;
1725        int ret;
1726        struct talitos_private *priv = dev_get_drvdata(dev);
1727        bool is_sec1 = has_ftr_sec1(priv);
1728
1729        /* first DWORD empty */
1730        desc->ptr[0] = zero_entry;
1731
1732        /* hash context in */
1733        if (!req_ctx->first || req_ctx->swinit) {
1734                map_single_talitos_ptr(dev, &desc->ptr[1],
1735                                       req_ctx->hw_context_size,
1736                                       (char *)req_ctx->hw_context,
1737                                       DMA_TO_DEVICE);
1738                req_ctx->swinit = 0;
1739        } else {
1740                desc->ptr[1] = zero_entry;
1741                /* Indicate next op is not the first. */
1742                req_ctx->first = 0;
1743        }
1744
1745        /* HMAC key */
1746        if (ctx->keylen)
1747                map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1748                                       (char *)&ctx->key, DMA_TO_DEVICE);
1749        else
1750                desc->ptr[2] = zero_entry;
1751
1752        /*
1753         * data in
1754         */
1755        map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1756                              DMA_TO_DEVICE, &desc->ptr[3]);
1757
1758        /* fifth DWORD empty */
1759        desc->ptr[4] = zero_entry;
1760
1761        /* hash/HMAC out -or- hash context out */
1762        if (req_ctx->last)
1763                map_single_talitos_ptr(dev, &desc->ptr[5],
1764                                       crypto_ahash_digestsize(tfm),
1765                                       areq->result, DMA_FROM_DEVICE);
1766        else
1767                map_single_talitos_ptr(dev, &desc->ptr[5],
1768                                       req_ctx->hw_context_size,
1769                                       req_ctx->hw_context, DMA_FROM_DEVICE);
1770
1771        /* last DWORD empty */
1772        desc->ptr[6] = zero_entry;
1773
1774        if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1775                talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1776
1777        ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1778        if (ret != -EINPROGRESS) {
1779                common_nonsnoop_hash_unmap(dev, edesc, areq);
1780                kfree(edesc);
1781        }
1782        return ret;
1783}
1784
1785static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1786                                               unsigned int nbytes)
1787{
1788        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1789        struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1790        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1791
1792        return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1793                                   nbytes, 0, 0, 0, areq->base.flags, false);
1794}
1795
1796static int ahash_init(struct ahash_request *areq)
1797{
1798        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1799        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1800
1801        /* Initialize the context */
1802        req_ctx->nbuf = 0;
1803        req_ctx->first = 1; /* first indicates h/w must init its context */
1804        req_ctx->swinit = 0; /* assume h/w init of context */
1805        req_ctx->hw_context_size =
1806                (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1807                        ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1808                        : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1809
1810        return 0;
1811}
1812
1813/*
1814 * on h/w without explicit sha224 support, we initialize h/w context
1815 * manually with sha224 constants, and tell it to run sha256.
1816 */
1817static int ahash_init_sha224_swinit(struct ahash_request *areq)
1818{
1819        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1820
1821        ahash_init(areq);
1822        req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1823
1824        req_ctx->hw_context[0] = SHA224_H0;
1825        req_ctx->hw_context[1] = SHA224_H1;
1826        req_ctx->hw_context[2] = SHA224_H2;
1827        req_ctx->hw_context[3] = SHA224_H3;
1828        req_ctx->hw_context[4] = SHA224_H4;
1829        req_ctx->hw_context[5] = SHA224_H5;
1830        req_ctx->hw_context[6] = SHA224_H6;
1831        req_ctx->hw_context[7] = SHA224_H7;
1832
1833        /* init 64-bit count */
1834        req_ctx->hw_context[8] = 0;
1835        req_ctx->hw_context[9] = 0;
1836
1837        return 0;
1838}
1839
1840static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1841{
1842        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1843        struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1844        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1845        struct talitos_edesc *edesc;
1846        unsigned int blocksize =
1847                        crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1848        unsigned int nbytes_to_hash;
1849        unsigned int to_hash_later;
1850        unsigned int nsg;
1851        int nents;
1852
1853        if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1854                /* Buffer up to one whole block */
1855                nents = sg_nents_for_len(areq->src, nbytes);
1856                if (nents < 0) {
1857                        dev_err(ctx->dev, "Invalid number of src SG.\n");
1858                        return nents;
1859                }
1860                sg_copy_to_buffer(areq->src, nents,
1861                                  req_ctx->buf + req_ctx->nbuf, nbytes);
1862                req_ctx->nbuf += nbytes;
1863                return 0;
1864        }
1865
1866        /* At least (blocksize + 1) bytes are available to hash */
1867        nbytes_to_hash = nbytes + req_ctx->nbuf;
1868        to_hash_later = nbytes_to_hash & (blocksize - 1);
1869
1870        if (req_ctx->last)
1871                to_hash_later = 0;
1872        else if (to_hash_later)
1873                /* There is a partial block. Hash the full block(s) now */
1874                nbytes_to_hash -= to_hash_later;
1875        else {
1876                /* Keep one block buffered */
1877                nbytes_to_hash -= blocksize;
1878                to_hash_later = blocksize;
1879        }
1880
1881        /* Chain in any previously buffered data */
1882        if (req_ctx->nbuf) {
1883                nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1884                sg_init_table(req_ctx->bufsl, nsg);
1885                sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1886                if (nsg > 1)
1887                        sg_chain(req_ctx->bufsl, 2, areq->src);
1888                req_ctx->psrc = req_ctx->bufsl;
1889        } else
1890                req_ctx->psrc = areq->src;
1891
1892        if (to_hash_later) {
1893                nents = sg_nents_for_len(areq->src, nbytes);
1894                if (nents < 0) {
1895                        dev_err(ctx->dev, "Invalid number of src SG.\n");
1896                        return nents;
1897                }
1898                sg_pcopy_to_buffer(areq->src, nents,
1899                                      req_ctx->bufnext,
1900                                      to_hash_later,
1901                                      nbytes - to_hash_later);
1902        }
1903        req_ctx->to_hash_later = to_hash_later;
1904
1905        /* Allocate extended descriptor */
1906        edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1907        if (IS_ERR(edesc))
1908                return PTR_ERR(edesc);
1909
1910        edesc->desc.hdr = ctx->desc_hdr_template;
1911
1912        /* On last one, request SEC to pad; otherwise continue */
1913        if (req_ctx->last)
1914                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1915        else
1916                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1917
1918        /* request SEC to INIT hash. */
1919        if (req_ctx->first && !req_ctx->swinit)
1920                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1921
1922        /* When the tfm context has a keylen, it's an HMAC.
1923         * A first or last (ie. not middle) descriptor must request HMAC.
1924         */
1925        if (ctx->keylen && (req_ctx->first || req_ctx->last))
1926                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1927
1928        return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1929                                    ahash_done);
1930}
1931
1932static int ahash_update(struct ahash_request *areq)
1933{
1934        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1935
1936        req_ctx->last = 0;
1937
1938        return ahash_process_req(areq, areq->nbytes);
1939}
1940
1941static int ahash_final(struct ahash_request *areq)
1942{
1943        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1944
1945        req_ctx->last = 1;
1946
1947        return ahash_process_req(areq, 0);
1948}
1949
1950static int ahash_finup(struct ahash_request *areq)
1951{
1952        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1953
1954        req_ctx->last = 1;
1955
1956        return ahash_process_req(areq, areq->nbytes);
1957}
1958
1959static int ahash_digest(struct ahash_request *areq)
1960{
1961        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1962        struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1963
1964        ahash->init(areq);
1965        req_ctx->last = 1;
1966
1967        return ahash_process_req(areq, areq->nbytes);
1968}
1969
1970struct keyhash_result {
1971        struct completion completion;
1972        int err;
1973};
1974
1975static void keyhash_complete(struct crypto_async_request *req, int err)
1976{
1977        struct keyhash_result *res = req->data;
1978
1979        if (err == -EINPROGRESS)
1980                return;
1981
1982        res->err = err;
1983        complete(&res->completion);
1984}
1985
1986static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1987                   u8 *hash)
1988{
1989        struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1990
1991        struct scatterlist sg[1];
1992        struct ahash_request *req;
1993        struct keyhash_result hresult;
1994        int ret;
1995
1996        init_completion(&hresult.completion);
1997
1998        req = ahash_request_alloc(tfm, GFP_KERNEL);
1999        if (!req)
2000                return -ENOMEM;
2001
2002        /* Keep tfm keylen == 0 during hash of the long key */
2003        ctx->keylen = 0;
2004        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2005                                   keyhash_complete, &hresult);
2006
2007        sg_init_one(&sg[0], key, keylen);
2008
2009        ahash_request_set_crypt(req, sg, hash, keylen);
2010        ret = crypto_ahash_digest(req);
2011        switch (ret) {
2012        case 0:
2013                break;
2014        case -EINPROGRESS:
2015        case -EBUSY:
2016                ret = wait_for_completion_interruptible(
2017                        &hresult.completion);
2018                if (!ret)
2019                        ret = hresult.err;
2020                break;
2021        default:
2022                break;
2023        }
2024        ahash_request_free(req);
2025
2026        return ret;
2027}
2028
2029static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2030                        unsigned int keylen)
2031{
2032        struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2033        unsigned int blocksize =
2034                        crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2035        unsigned int digestsize = crypto_ahash_digestsize(tfm);
2036        unsigned int keysize = keylen;
2037        u8 hash[SHA512_DIGEST_SIZE];
2038        int ret;
2039
2040        if (keylen <= blocksize)
2041                memcpy(ctx->key, key, keysize);
2042        else {
2043                /* Must get the hash of the long key */
2044                ret = keyhash(tfm, key, keylen, hash);
2045
2046                if (ret) {
2047                        crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2048                        return -EINVAL;
2049                }
2050
2051                keysize = digestsize;
2052                memcpy(ctx->key, hash, digestsize);
2053        }
2054
2055        ctx->keylen = keysize;
2056
2057        return 0;
2058}
2059
2060
2061struct talitos_alg_template {
2062        u32 type;
2063        union {
2064                struct crypto_alg crypto;
2065                struct ahash_alg hash;
2066                struct aead_alg aead;
2067        } alg;
2068        __be32 desc_hdr_template;
2069};
2070
2071static struct talitos_alg_template driver_algs[] = {
2072        /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2073        {       .type = CRYPTO_ALG_TYPE_AEAD,
2074                .alg.aead = {
2075                        .base = {
2076                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
2077                                .cra_driver_name = "authenc-hmac-sha1-"
2078                                                   "cbc-aes-talitos",
2079                                .cra_blocksize = AES_BLOCK_SIZE,
2080                                .cra_flags = CRYPTO_ALG_ASYNC,
2081                        },
2082                        .ivsize = AES_BLOCK_SIZE,
2083                        .maxauthsize = SHA1_DIGEST_SIZE,
2084                },
2085                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2086                                     DESC_HDR_SEL0_AESU |
2087                                     DESC_HDR_MODE0_AESU_CBC |
2088                                     DESC_HDR_SEL1_MDEUA |
2089                                     DESC_HDR_MODE1_MDEU_INIT |
2090                                     DESC_HDR_MODE1_MDEU_PAD |
2091                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2092        },
2093        {       .type = CRYPTO_ALG_TYPE_AEAD,
2094                .alg.aead = {
2095                        .base = {
2096                                .cra_name = "authenc(hmac(sha1),"
2097                                            "cbc(des3_ede))",
2098                                .cra_driver_name = "authenc-hmac-sha1-"
2099                                                   "cbc-3des-talitos",
2100                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2101                                .cra_flags = CRYPTO_ALG_ASYNC,
2102                        },
2103                        .ivsize = DES3_EDE_BLOCK_SIZE,
2104                        .maxauthsize = SHA1_DIGEST_SIZE,
2105                },
2106                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2107                                     DESC_HDR_SEL0_DEU |
2108                                     DESC_HDR_MODE0_DEU_CBC |
2109                                     DESC_HDR_MODE0_DEU_3DES |
2110                                     DESC_HDR_SEL1_MDEUA |
2111                                     DESC_HDR_MODE1_MDEU_INIT |
2112                                     DESC_HDR_MODE1_MDEU_PAD |
2113                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2114        },
2115        {       .type = CRYPTO_ALG_TYPE_AEAD,
2116                .alg.aead = {
2117                        .base = {
2118                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
2119                                .cra_driver_name = "authenc-hmac-sha224-"
2120                                                   "cbc-aes-talitos",
2121                                .cra_blocksize = AES_BLOCK_SIZE,
2122                                .cra_flags = CRYPTO_ALG_ASYNC,
2123                        },
2124                        .ivsize = AES_BLOCK_SIZE,
2125                        .maxauthsize = SHA224_DIGEST_SIZE,
2126                },
2127                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2128                                     DESC_HDR_SEL0_AESU |
2129                                     DESC_HDR_MODE0_AESU_CBC |
2130                                     DESC_HDR_SEL1_MDEUA |
2131                                     DESC_HDR_MODE1_MDEU_INIT |
2132                                     DESC_HDR_MODE1_MDEU_PAD |
2133                                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2134        },
2135        {       .type = CRYPTO_ALG_TYPE_AEAD,
2136                .alg.aead = {
2137                        .base = {
2138                                .cra_name = "authenc(hmac(sha224),"
2139                                            "cbc(des3_ede))",
2140                                .cra_driver_name = "authenc-hmac-sha224-"
2141                                                   "cbc-3des-talitos",
2142                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2143                                .cra_flags = CRYPTO_ALG_ASYNC,
2144                        },
2145                        .ivsize = DES3_EDE_BLOCK_SIZE,
2146                        .maxauthsize = SHA224_DIGEST_SIZE,
2147                },
2148                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2149                                     DESC_HDR_SEL0_DEU |
2150                                     DESC_HDR_MODE0_DEU_CBC |
2151                                     DESC_HDR_MODE0_DEU_3DES |
2152                                     DESC_HDR_SEL1_MDEUA |
2153                                     DESC_HDR_MODE1_MDEU_INIT |
2154                                     DESC_HDR_MODE1_MDEU_PAD |
2155                                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2156        },
2157        {       .type = CRYPTO_ALG_TYPE_AEAD,
2158                .alg.aead = {
2159                        .base = {
2160                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
2161                                .cra_driver_name = "authenc-hmac-sha256-"
2162                                                   "cbc-aes-talitos",
2163                                .cra_blocksize = AES_BLOCK_SIZE,
2164                                .cra_flags = CRYPTO_ALG_ASYNC,
2165                        },
2166                        .ivsize = AES_BLOCK_SIZE,
2167                        .maxauthsize = SHA256_DIGEST_SIZE,
2168                },
2169                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2170                                     DESC_HDR_SEL0_AESU |
2171                                     DESC_HDR_MODE0_AESU_CBC |
2172                                     DESC_HDR_SEL1_MDEUA |
2173                                     DESC_HDR_MODE1_MDEU_INIT |
2174                                     DESC_HDR_MODE1_MDEU_PAD |
2175                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2176        },
2177        {       .type = CRYPTO_ALG_TYPE_AEAD,
2178                .alg.aead = {
2179                        .base = {
2180                                .cra_name = "authenc(hmac(sha256),"
2181                                            "cbc(des3_ede))",
2182                                .cra_driver_name = "authenc-hmac-sha256-"
2183                                                   "cbc-3des-talitos",
2184                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2185                                .cra_flags = CRYPTO_ALG_ASYNC,
2186                        },
2187                        .ivsize = DES3_EDE_BLOCK_SIZE,
2188                        .maxauthsize = SHA256_DIGEST_SIZE,
2189                },
2190                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2191                                     DESC_HDR_SEL0_DEU |
2192                                     DESC_HDR_MODE0_DEU_CBC |
2193                                     DESC_HDR_MODE0_DEU_3DES |
2194                                     DESC_HDR_SEL1_MDEUA |
2195                                     DESC_HDR_MODE1_MDEU_INIT |
2196                                     DESC_HDR_MODE1_MDEU_PAD |
2197                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2198        },
2199        {       .type = CRYPTO_ALG_TYPE_AEAD,
2200                .alg.aead = {
2201                        .base = {
2202                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
2203                                .cra_driver_name = "authenc-hmac-sha384-"
2204                                                   "cbc-aes-talitos",
2205                                .cra_blocksize = AES_BLOCK_SIZE,
2206                                .cra_flags = CRYPTO_ALG_ASYNC,
2207                        },
2208                        .ivsize = AES_BLOCK_SIZE,
2209                        .maxauthsize = SHA384_DIGEST_SIZE,
2210                },
2211                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2212                                     DESC_HDR_SEL0_AESU |
2213                                     DESC_HDR_MODE0_AESU_CBC |
2214                                     DESC_HDR_SEL1_MDEUB |
2215                                     DESC_HDR_MODE1_MDEU_INIT |
2216                                     DESC_HDR_MODE1_MDEU_PAD |
2217                                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2218        },
2219        {       .type = CRYPTO_ALG_TYPE_AEAD,
2220                .alg.aead = {
2221                        .base = {
2222                                .cra_name = "authenc(hmac(sha384),"
2223                                            "cbc(des3_ede))",
2224                                .cra_driver_name = "authenc-hmac-sha384-"
2225                                                   "cbc-3des-talitos",
2226                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2227                                .cra_flags = CRYPTO_ALG_ASYNC,
2228                        },
2229                        .ivsize = DES3_EDE_BLOCK_SIZE,
2230                        .maxauthsize = SHA384_DIGEST_SIZE,
2231                },
2232                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2233                                     DESC_HDR_SEL0_DEU |
2234                                     DESC_HDR_MODE0_DEU_CBC |
2235                                     DESC_HDR_MODE0_DEU_3DES |
2236                                     DESC_HDR_SEL1_MDEUB |
2237                                     DESC_HDR_MODE1_MDEU_INIT |
2238                                     DESC_HDR_MODE1_MDEU_PAD |
2239                                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2240        },
2241        {       .type = CRYPTO_ALG_TYPE_AEAD,
2242                .alg.aead = {
2243                        .base = {
2244                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
2245                                .cra_driver_name = "authenc-hmac-sha512-"
2246                                                   "cbc-aes-talitos",
2247                                .cra_blocksize = AES_BLOCK_SIZE,
2248                                .cra_flags = CRYPTO_ALG_ASYNC,
2249                        },
2250                        .ivsize = AES_BLOCK_SIZE,
2251                        .maxauthsize = SHA512_DIGEST_SIZE,
2252                },
2253                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2254                                     DESC_HDR_SEL0_AESU |
2255                                     DESC_HDR_MODE0_AESU_CBC |
2256                                     DESC_HDR_SEL1_MDEUB |
2257                                     DESC_HDR_MODE1_MDEU_INIT |
2258                                     DESC_HDR_MODE1_MDEU_PAD |
2259                                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2260        },
2261        {       .type = CRYPTO_ALG_TYPE_AEAD,
2262                .alg.aead = {
2263                        .base = {
2264                                .cra_name = "authenc(hmac(sha512),"
2265                                            "cbc(des3_ede))",
2266                                .cra_driver_name = "authenc-hmac-sha512-"
2267                                                   "cbc-3des-talitos",
2268                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2269                                .cra_flags = CRYPTO_ALG_ASYNC,
2270                        },
2271                        .ivsize = DES3_EDE_BLOCK_SIZE,
2272                        .maxauthsize = SHA512_DIGEST_SIZE,
2273                },
2274                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2275                                     DESC_HDR_SEL0_DEU |
2276                                     DESC_HDR_MODE0_DEU_CBC |
2277                                     DESC_HDR_MODE0_DEU_3DES |
2278                                     DESC_HDR_SEL1_MDEUB |
2279                                     DESC_HDR_MODE1_MDEU_INIT |
2280                                     DESC_HDR_MODE1_MDEU_PAD |
2281                                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2282        },
2283        {       .type = CRYPTO_ALG_TYPE_AEAD,
2284                .alg.aead = {
2285                        .base = {
2286                                .cra_name = "authenc(hmac(md5),cbc(aes))",
2287                                .cra_driver_name = "authenc-hmac-md5-"
2288                                                   "cbc-aes-talitos",
2289                                .cra_blocksize = AES_BLOCK_SIZE,
2290                                .cra_flags = CRYPTO_ALG_ASYNC,
2291                        },
2292                        .ivsize = AES_BLOCK_SIZE,
2293                        .maxauthsize = MD5_DIGEST_SIZE,
2294                },
2295                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2296                                     DESC_HDR_SEL0_AESU |
2297                                     DESC_HDR_MODE0_AESU_CBC |
2298                                     DESC_HDR_SEL1_MDEUA |
2299                                     DESC_HDR_MODE1_MDEU_INIT |
2300                                     DESC_HDR_MODE1_MDEU_PAD |
2301                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2302        },
2303        {       .type = CRYPTO_ALG_TYPE_AEAD,
2304                .alg.aead = {
2305                        .base = {
2306                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2307                                .cra_driver_name = "authenc-hmac-md5-"
2308                                                   "cbc-3des-talitos",
2309                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310                                .cra_flags = CRYPTO_ALG_ASYNC,
2311                        },
2312                        .ivsize = DES3_EDE_BLOCK_SIZE,
2313                        .maxauthsize = MD5_DIGEST_SIZE,
2314                },
2315                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2316                                     DESC_HDR_SEL0_DEU |
2317                                     DESC_HDR_MODE0_DEU_CBC |
2318                                     DESC_HDR_MODE0_DEU_3DES |
2319                                     DESC_HDR_SEL1_MDEUA |
2320                                     DESC_HDR_MODE1_MDEU_INIT |
2321                                     DESC_HDR_MODE1_MDEU_PAD |
2322                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2323        },
2324        /* ABLKCIPHER algorithms. */
2325        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2326                .alg.crypto = {
2327                        .cra_name = "ecb(aes)",
2328                        .cra_driver_name = "ecb-aes-talitos",
2329                        .cra_blocksize = AES_BLOCK_SIZE,
2330                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2331                                     CRYPTO_ALG_ASYNC,
2332                        .cra_ablkcipher = {
2333                                .min_keysize = AES_MIN_KEY_SIZE,
2334                                .max_keysize = AES_MAX_KEY_SIZE,
2335                                .ivsize = AES_BLOCK_SIZE,
2336                        }
2337                },
2338                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2339                                     DESC_HDR_SEL0_AESU,
2340        },
2341        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2342                .alg.crypto = {
2343                        .cra_name = "cbc(aes)",
2344                        .cra_driver_name = "cbc-aes-talitos",
2345                        .cra_blocksize = AES_BLOCK_SIZE,
2346                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2347                                     CRYPTO_ALG_ASYNC,
2348                        .cra_ablkcipher = {
2349                                .min_keysize = AES_MIN_KEY_SIZE,
2350                                .max_keysize = AES_MAX_KEY_SIZE,
2351                                .ivsize = AES_BLOCK_SIZE,
2352                        }
2353                },
2354                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2355                                     DESC_HDR_SEL0_AESU |
2356                                     DESC_HDR_MODE0_AESU_CBC,
2357        },
2358        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2359                .alg.crypto = {
2360                        .cra_name = "ctr(aes)",
2361                        .cra_driver_name = "ctr-aes-talitos",
2362                        .cra_blocksize = AES_BLOCK_SIZE,
2363                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2364                                     CRYPTO_ALG_ASYNC,
2365                        .cra_ablkcipher = {
2366                                .min_keysize = AES_MIN_KEY_SIZE,
2367                                .max_keysize = AES_MAX_KEY_SIZE,
2368                                .ivsize = AES_BLOCK_SIZE,
2369                        }
2370                },
2371                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2372                                     DESC_HDR_SEL0_AESU |
2373                                     DESC_HDR_MODE0_AESU_CTR,
2374        },
2375        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2376                .alg.crypto = {
2377                        .cra_name = "ecb(des)",
2378                        .cra_driver_name = "ecb-des-talitos",
2379                        .cra_blocksize = DES_BLOCK_SIZE,
2380                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2381                                     CRYPTO_ALG_ASYNC,
2382                        .cra_ablkcipher = {
2383                                .min_keysize = DES_KEY_SIZE,
2384                                .max_keysize = DES_KEY_SIZE,
2385                                .ivsize = DES_BLOCK_SIZE,
2386                        }
2387                },
2388                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2389                                     DESC_HDR_SEL0_DEU,
2390        },
2391        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2392                .alg.crypto = {
2393                        .cra_name = "cbc(des)",
2394                        .cra_driver_name = "cbc-des-talitos",
2395                        .cra_blocksize = DES_BLOCK_SIZE,
2396                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2397                                     CRYPTO_ALG_ASYNC,
2398                        .cra_ablkcipher = {
2399                                .min_keysize = DES_KEY_SIZE,
2400                                .max_keysize = DES_KEY_SIZE,
2401                                .ivsize = DES_BLOCK_SIZE,
2402                        }
2403                },
2404                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405                                     DESC_HDR_SEL0_DEU |
2406                                     DESC_HDR_MODE0_DEU_CBC,
2407        },
2408        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2409                .alg.crypto = {
2410                        .cra_name = "ecb(des3_ede)",
2411                        .cra_driver_name = "ecb-3des-talitos",
2412                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2413                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2414                                     CRYPTO_ALG_ASYNC,
2415                        .cra_ablkcipher = {
2416                                .min_keysize = DES3_EDE_KEY_SIZE,
2417                                .max_keysize = DES3_EDE_KEY_SIZE,
2418                                .ivsize = DES3_EDE_BLOCK_SIZE,
2419                        }
2420                },
2421                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2422                                     DESC_HDR_SEL0_DEU |
2423                                     DESC_HDR_MODE0_DEU_3DES,
2424        },
2425        {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2426                .alg.crypto = {
2427                        .cra_name = "cbc(des3_ede)",
2428                        .cra_driver_name = "cbc-3des-talitos",
2429                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2430                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2431                                     CRYPTO_ALG_ASYNC,
2432                        .cra_ablkcipher = {
2433                                .min_keysize = DES3_EDE_KEY_SIZE,
2434                                .max_keysize = DES3_EDE_KEY_SIZE,
2435                                .ivsize = DES3_EDE_BLOCK_SIZE,
2436                        }
2437                },
2438                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2439                                     DESC_HDR_SEL0_DEU |
2440                                     DESC_HDR_MODE0_DEU_CBC |
2441                                     DESC_HDR_MODE0_DEU_3DES,
2442        },
2443        /* AHASH algorithms. */
2444        {       .type = CRYPTO_ALG_TYPE_AHASH,
2445                .alg.hash = {
2446                        .halg.digestsize = MD5_DIGEST_SIZE,
2447                        .halg.base = {
2448                                .cra_name = "md5",
2449                                .cra_driver_name = "md5-talitos",
2450                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2451                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2452                                             CRYPTO_ALG_ASYNC,
2453                        }
2454                },
2455                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2456                                     DESC_HDR_SEL0_MDEUA |
2457                                     DESC_HDR_MODE0_MDEU_MD5,
2458        },
2459        {       .type = CRYPTO_ALG_TYPE_AHASH,
2460                .alg.hash = {
2461                        .halg.digestsize = SHA1_DIGEST_SIZE,
2462                        .halg.base = {
2463                                .cra_name = "sha1",
2464                                .cra_driver_name = "sha1-talitos",
2465                                .cra_blocksize = SHA1_BLOCK_SIZE,
2466                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2467                                             CRYPTO_ALG_ASYNC,
2468                        }
2469                },
2470                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2471                                     DESC_HDR_SEL0_MDEUA |
2472                                     DESC_HDR_MODE0_MDEU_SHA1,
2473        },
2474        {       .type = CRYPTO_ALG_TYPE_AHASH,
2475                .alg.hash = {
2476                        .halg.digestsize = SHA224_DIGEST_SIZE,
2477                        .halg.base = {
2478                                .cra_name = "sha224",
2479                                .cra_driver_name = "sha224-talitos",
2480                                .cra_blocksize = SHA224_BLOCK_SIZE,
2481                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2482                                             CRYPTO_ALG_ASYNC,
2483                        }
2484                },
2485                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2486                                     DESC_HDR_SEL0_MDEUA |
2487                                     DESC_HDR_MODE0_MDEU_SHA224,
2488        },
2489        {       .type = CRYPTO_ALG_TYPE_AHASH,
2490                .alg.hash = {
2491                        .halg.digestsize = SHA256_DIGEST_SIZE,
2492                        .halg.base = {
2493                                .cra_name = "sha256",
2494                                .cra_driver_name = "sha256-talitos",
2495                                .cra_blocksize = SHA256_BLOCK_SIZE,
2496                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2497                                             CRYPTO_ALG_ASYNC,
2498                        }
2499                },
2500                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2501                                     DESC_HDR_SEL0_MDEUA |
2502                                     DESC_HDR_MODE0_MDEU_SHA256,
2503        },
2504        {       .type = CRYPTO_ALG_TYPE_AHASH,
2505                .alg.hash = {
2506                        .halg.digestsize = SHA384_DIGEST_SIZE,
2507                        .halg.base = {
2508                                .cra_name = "sha384",
2509                                .cra_driver_name = "sha384-talitos",
2510                                .cra_blocksize = SHA384_BLOCK_SIZE,
2511                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2512                                             CRYPTO_ALG_ASYNC,
2513                        }
2514                },
2515                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2516                                     DESC_HDR_SEL0_MDEUB |
2517                                     DESC_HDR_MODE0_MDEUB_SHA384,
2518        },
2519        {       .type = CRYPTO_ALG_TYPE_AHASH,
2520                .alg.hash = {
2521                        .halg.digestsize = SHA512_DIGEST_SIZE,
2522                        .halg.base = {
2523                                .cra_name = "sha512",
2524                                .cra_driver_name = "sha512-talitos",
2525                                .cra_blocksize = SHA512_BLOCK_SIZE,
2526                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2527                                             CRYPTO_ALG_ASYNC,
2528                        }
2529                },
2530                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2531                                     DESC_HDR_SEL0_MDEUB |
2532                                     DESC_HDR_MODE0_MDEUB_SHA512,
2533        },
2534        {       .type = CRYPTO_ALG_TYPE_AHASH,
2535                .alg.hash = {
2536                        .halg.digestsize = MD5_DIGEST_SIZE,
2537                        .halg.base = {
2538                                .cra_name = "hmac(md5)",
2539                                .cra_driver_name = "hmac-md5-talitos",
2540                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2541                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2542                                             CRYPTO_ALG_ASYNC,
2543                        }
2544                },
2545                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2546                                     DESC_HDR_SEL0_MDEUA |
2547                                     DESC_HDR_MODE0_MDEU_MD5,
2548        },
2549        {       .type = CRYPTO_ALG_TYPE_AHASH,
2550                .alg.hash = {
2551                        .halg.digestsize = SHA1_DIGEST_SIZE,
2552                        .halg.base = {
2553                                .cra_name = "hmac(sha1)",
2554                                .cra_driver_name = "hmac-sha1-talitos",
2555                                .cra_blocksize = SHA1_BLOCK_SIZE,
2556                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2557                                             CRYPTO_ALG_ASYNC,
2558                        }
2559                },
2560                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2561                                     DESC_HDR_SEL0_MDEUA |
2562                                     DESC_HDR_MODE0_MDEU_SHA1,
2563        },
2564        {       .type = CRYPTO_ALG_TYPE_AHASH,
2565                .alg.hash = {
2566                        .halg.digestsize = SHA224_DIGEST_SIZE,
2567                        .halg.base = {
2568                                .cra_name = "hmac(sha224)",
2569                                .cra_driver_name = "hmac-sha224-talitos",
2570                                .cra_blocksize = SHA224_BLOCK_SIZE,
2571                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2572                                             CRYPTO_ALG_ASYNC,
2573                        }
2574                },
2575                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2576                                     DESC_HDR_SEL0_MDEUA |
2577                                     DESC_HDR_MODE0_MDEU_SHA224,
2578        },
2579        {       .type = CRYPTO_ALG_TYPE_AHASH,
2580                .alg.hash = {
2581                        .halg.digestsize = SHA256_DIGEST_SIZE,
2582                        .halg.base = {
2583                                .cra_name = "hmac(sha256)",
2584                                .cra_driver_name = "hmac-sha256-talitos",
2585                                .cra_blocksize = SHA256_BLOCK_SIZE,
2586                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2587                                             CRYPTO_ALG_ASYNC,
2588                        }
2589                },
2590                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2591                                     DESC_HDR_SEL0_MDEUA |
2592                                     DESC_HDR_MODE0_MDEU_SHA256,
2593        },
2594        {       .type = CRYPTO_ALG_TYPE_AHASH,
2595                .alg.hash = {
2596                        .halg.digestsize = SHA384_DIGEST_SIZE,
2597                        .halg.base = {
2598                                .cra_name = "hmac(sha384)",
2599                                .cra_driver_name = "hmac-sha384-talitos",
2600                                .cra_blocksize = SHA384_BLOCK_SIZE,
2601                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2602                                             CRYPTO_ALG_ASYNC,
2603                        }
2604                },
2605                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2606                                     DESC_HDR_SEL0_MDEUB |
2607                                     DESC_HDR_MODE0_MDEUB_SHA384,
2608        },
2609        {       .type = CRYPTO_ALG_TYPE_AHASH,
2610                .alg.hash = {
2611                        .halg.digestsize = SHA512_DIGEST_SIZE,
2612                        .halg.base = {
2613                                .cra_name = "hmac(sha512)",
2614                                .cra_driver_name = "hmac-sha512-talitos",
2615                                .cra_blocksize = SHA512_BLOCK_SIZE,
2616                                .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2617                                             CRYPTO_ALG_ASYNC,
2618                        }
2619                },
2620                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2621                                     DESC_HDR_SEL0_MDEUB |
2622                                     DESC_HDR_MODE0_MDEUB_SHA512,
2623        }
2624};
2625
2626struct talitos_crypto_alg {
2627        struct list_head entry;
2628        struct device *dev;
2629        struct talitos_alg_template algt;
2630};
2631
2632static int talitos_cra_init(struct crypto_tfm *tfm)
2633{
2634        struct crypto_alg *alg = tfm->__crt_alg;
2635        struct talitos_crypto_alg *talitos_alg;
2636        struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2637        struct talitos_private *priv;
2638
2639        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2640                talitos_alg = container_of(__crypto_ahash_alg(alg),
2641                                           struct talitos_crypto_alg,
2642                                           algt.alg.hash);
2643        else
2644                talitos_alg = container_of(alg, struct talitos_crypto_alg,
2645                                           algt.alg.crypto);
2646
2647        /* update context with ptr to dev */
2648        ctx->dev = talitos_alg->dev;
2649
2650        /* assign SEC channel to tfm in round-robin fashion */
2651        priv = dev_get_drvdata(ctx->dev);
2652        ctx->ch = atomic_inc_return(&priv->last_chan) &
2653                  (priv->num_channels - 1);
2654
2655        /* copy descriptor header template value */
2656        ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2657
2658        /* select done notification */
2659        ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2660
2661        return 0;
2662}
2663
2664static int talitos_cra_init_aead(struct crypto_aead *tfm)
2665{
2666        talitos_cra_init(crypto_aead_tfm(tfm));
2667        return 0;
2668}
2669
2670static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2671{
2672        struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674        talitos_cra_init(tfm);
2675
2676        ctx->keylen = 0;
2677        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2678                                 sizeof(struct talitos_ahash_req_ctx));
2679
2680        return 0;
2681}
2682
2683/*
2684 * given the alg's descriptor header template, determine whether descriptor
2685 * type and primary/secondary execution units required match the hw
2686 * capabilities description provided in the device tree node.
2687 */
2688static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2689{
2690        struct talitos_private *priv = dev_get_drvdata(dev);
2691        int ret;
2692
2693        ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2694              (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2695
2696        if (SECONDARY_EU(desc_hdr_template))
2697                ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2698                              & priv->exec_units);
2699
2700        return ret;
2701}
2702
2703static int talitos_remove(struct platform_device *ofdev)
2704{
2705        struct device *dev = &ofdev->dev;
2706        struct talitos_private *priv = dev_get_drvdata(dev);
2707        struct talitos_crypto_alg *t_alg, *n;
2708        int i;
2709
2710        list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2711                switch (t_alg->algt.type) {
2712                case CRYPTO_ALG_TYPE_ABLKCIPHER:
2713                        break;
2714                case CRYPTO_ALG_TYPE_AEAD:
2715                        crypto_unregister_aead(&t_alg->algt.alg.aead);
2716                case CRYPTO_ALG_TYPE_AHASH:
2717                        crypto_unregister_ahash(&t_alg->algt.alg.hash);
2718                        break;
2719                }
2720                list_del(&t_alg->entry);
2721                kfree(t_alg);
2722        }
2723
2724        if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2725                talitos_unregister_rng(dev);
2726
2727        for (i = 0; priv->chan && i < priv->num_channels; i++)
2728                kfree(priv->chan[i].fifo);
2729
2730        kfree(priv->chan);
2731
2732        for (i = 0; i < 2; i++)
2733                if (priv->irq[i]) {
2734                        free_irq(priv->irq[i], dev);
2735                        irq_dispose_mapping(priv->irq[i]);
2736                }
2737
2738        tasklet_kill(&priv->done_task[0]);
2739        if (priv->irq[1])
2740                tasklet_kill(&priv->done_task[1]);
2741
2742        iounmap(priv->reg);
2743
2744        kfree(priv);
2745
2746        return 0;
2747}
2748
2749static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2750                                                    struct talitos_alg_template
2751                                                           *template)
2752{
2753        struct talitos_private *priv = dev_get_drvdata(dev);
2754        struct talitos_crypto_alg *t_alg;
2755        struct crypto_alg *alg;
2756
2757        t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2758        if (!t_alg)
2759                return ERR_PTR(-ENOMEM);
2760
2761        t_alg->algt = *template;
2762
2763        switch (t_alg->algt.type) {
2764        case CRYPTO_ALG_TYPE_ABLKCIPHER:
2765                alg = &t_alg->algt.alg.crypto;
2766                alg->cra_init = talitos_cra_init;
2767                alg->cra_type = &crypto_ablkcipher_type;
2768                alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2769                alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2770                alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2771                alg->cra_ablkcipher.geniv = "eseqiv";
2772                break;
2773        case CRYPTO_ALG_TYPE_AEAD:
2774                alg = &t_alg->algt.alg.aead.base;
2775                t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2776                t_alg->algt.alg.aead.setkey = aead_setkey;
2777                t_alg->algt.alg.aead.encrypt = aead_encrypt;
2778                t_alg->algt.alg.aead.decrypt = aead_decrypt;
2779                break;
2780        case CRYPTO_ALG_TYPE_AHASH:
2781                alg = &t_alg->algt.alg.hash.halg.base;
2782                alg->cra_init = talitos_cra_init_ahash;
2783                alg->cra_type = &crypto_ahash_type;
2784                t_alg->algt.alg.hash.init = ahash_init;
2785                t_alg->algt.alg.hash.update = ahash_update;
2786                t_alg->algt.alg.hash.final = ahash_final;
2787                t_alg->algt.alg.hash.finup = ahash_finup;
2788                t_alg->algt.alg.hash.digest = ahash_digest;
2789                t_alg->algt.alg.hash.setkey = ahash_setkey;
2790
2791                if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2792                    !strncmp(alg->cra_name, "hmac", 4)) {
2793                        kfree(t_alg);
2794                        return ERR_PTR(-ENOTSUPP);
2795                }
2796                if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2797                    (!strcmp(alg->cra_name, "sha224") ||
2798                     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2799                        t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2800                        t_alg->algt.desc_hdr_template =
2801                                        DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2802                                        DESC_HDR_SEL0_MDEUA |
2803                                        DESC_HDR_MODE0_MDEU_SHA256;
2804                }
2805                break;
2806        default:
2807                dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2808                kfree(t_alg);
2809                return ERR_PTR(-EINVAL);
2810        }
2811
2812        alg->cra_module = THIS_MODULE;
2813        alg->cra_priority = TALITOS_CRA_PRIORITY;
2814        alg->cra_alignmask = 0;
2815        alg->cra_ctxsize = sizeof(struct talitos_ctx);
2816        alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2817
2818        t_alg->dev = dev;
2819
2820        return t_alg;
2821}
2822
2823static int talitos_probe_irq(struct platform_device *ofdev)
2824{
2825        struct device *dev = &ofdev->dev;
2826        struct device_node *np = ofdev->dev.of_node;
2827        struct talitos_private *priv = dev_get_drvdata(dev);
2828        int err;
2829        bool is_sec1 = has_ftr_sec1(priv);
2830
2831        priv->irq[0] = irq_of_parse_and_map(np, 0);
2832        if (!priv->irq[0]) {
2833                dev_err(dev, "failed to map irq\n");
2834                return -EINVAL;
2835        }
2836        if (is_sec1) {
2837                err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2838                                  dev_driver_string(dev), dev);
2839                goto primary_out;
2840        }
2841
2842        priv->irq[1] = irq_of_parse_and_map(np, 1);
2843
2844        /* get the primary irq line */
2845        if (!priv->irq[1]) {
2846                err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2847                                  dev_driver_string(dev), dev);
2848                goto primary_out;
2849        }
2850
2851        err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2852                          dev_driver_string(dev), dev);
2853        if (err)
2854                goto primary_out;
2855
2856        /* get the secondary irq line */
2857        err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2858                          dev_driver_string(dev), dev);
2859        if (err) {
2860                dev_err(dev, "failed to request secondary irq\n");
2861                irq_dispose_mapping(priv->irq[1]);
2862                priv->irq[1] = 0;
2863        }
2864
2865        return err;
2866
2867primary_out:
2868        if (err) {
2869                dev_err(dev, "failed to request primary irq\n");
2870                irq_dispose_mapping(priv->irq[0]);
2871                priv->irq[0] = 0;
2872        }
2873
2874        return err;
2875}
2876
2877static int talitos_probe(struct platform_device *ofdev)
2878{
2879        struct device *dev = &ofdev->dev;
2880        struct device_node *np = ofdev->dev.of_node;
2881        struct talitos_private *priv;
2882        const unsigned int *prop;
2883        int i, err;
2884        int stride;
2885
2886        priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2887        if (!priv)
2888                return -ENOMEM;
2889
2890        INIT_LIST_HEAD(&priv->alg_list);
2891
2892        dev_set_drvdata(dev, priv);
2893
2894        priv->ofdev = ofdev;
2895
2896        spin_lock_init(&priv->reg_lock);
2897
2898        priv->reg = of_iomap(np, 0);
2899        if (!priv->reg) {
2900                dev_err(dev, "failed to of_iomap\n");
2901                err = -ENOMEM;
2902                goto err_out;
2903        }
2904
2905        /* get SEC version capabilities from device tree */
2906        prop = of_get_property(np, "fsl,num-channels", NULL);
2907        if (prop)
2908                priv->num_channels = *prop;
2909
2910        prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2911        if (prop)
2912                priv->chfifo_len = *prop;
2913
2914        prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2915        if (prop)
2916                priv->exec_units = *prop;
2917
2918        prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2919        if (prop)
2920                priv->desc_types = *prop;
2921
2922        if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2923            !priv->exec_units || !priv->desc_types) {
2924                dev_err(dev, "invalid property data in device tree node\n");
2925                err = -EINVAL;
2926                goto err_out;
2927        }
2928
2929        if (of_device_is_compatible(np, "fsl,sec3.0"))
2930                priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2931
2932        if (of_device_is_compatible(np, "fsl,sec2.1"))
2933                priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2934                                  TALITOS_FTR_SHA224_HWINIT |
2935                                  TALITOS_FTR_HMAC_OK;
2936
2937        if (of_device_is_compatible(np, "fsl,sec1.0"))
2938                priv->features |= TALITOS_FTR_SEC1;
2939
2940        if (of_device_is_compatible(np, "fsl,sec1.2")) {
2941                priv->reg_deu = priv->reg + TALITOS12_DEU;
2942                priv->reg_aesu = priv->reg + TALITOS12_AESU;
2943                priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2944                stride = TALITOS1_CH_STRIDE;
2945        } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2946                priv->reg_deu = priv->reg + TALITOS10_DEU;
2947                priv->reg_aesu = priv->reg + TALITOS10_AESU;
2948                priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2949                priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2950                priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2951                priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2952                stride = TALITOS1_CH_STRIDE;
2953        } else {
2954                priv->reg_deu = priv->reg + TALITOS2_DEU;
2955                priv->reg_aesu = priv->reg + TALITOS2_AESU;
2956                priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2957                priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2958                priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2959                priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2960                priv->reg_keu = priv->reg + TALITOS2_KEU;
2961                priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2962                stride = TALITOS2_CH_STRIDE;
2963        }
2964
2965        err = talitos_probe_irq(ofdev);
2966        if (err)
2967                goto err_out;
2968
2969        if (of_device_is_compatible(np, "fsl,sec1.0")) {
2970                tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2971                             (unsigned long)dev);
2972        } else {
2973                if (!priv->irq[1]) {
2974                        tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2975                                     (unsigned long)dev);
2976                } else {
2977                        tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2978                                     (unsigned long)dev);
2979                        tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2980                                     (unsigned long)dev);
2981                }
2982        }
2983
2984        priv->chan = kzalloc(sizeof(struct talitos_channel) *
2985                             priv->num_channels, GFP_KERNEL);
2986        if (!priv->chan) {
2987                dev_err(dev, "failed to allocate channel management space\n");
2988                err = -ENOMEM;
2989                goto err_out;
2990        }
2991
2992        priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2993
2994        for (i = 0; i < priv->num_channels; i++) {
2995                priv->chan[i].reg = priv->reg + stride * (i + 1);
2996                if (!priv->irq[1] || !(i & 1))
2997                        priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2998
2999                spin_lock_init(&priv->chan[i].head_lock);
3000                spin_lock_init(&priv->chan[i].tail_lock);
3001
3002                priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3003                                             priv->fifo_len, GFP_KERNEL);
3004                if (!priv->chan[i].fifo) {
3005                        dev_err(dev, "failed to allocate request fifo %d\n", i);
3006                        err = -ENOMEM;
3007                        goto err_out;
3008                }
3009
3010                atomic_set(&priv->chan[i].submit_count,
3011                           -(priv->chfifo_len - 1));
3012        }
3013
3014        dma_set_mask(dev, DMA_BIT_MASK(36));
3015
3016        /* reset and initialize the h/w */
3017        err = init_device(dev);
3018        if (err) {
3019                dev_err(dev, "failed to initialize device\n");
3020                goto err_out;
3021        }
3022
3023        /* register the RNG, if available */
3024        if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3025                err = talitos_register_rng(dev);
3026                if (err) {
3027                        dev_err(dev, "failed to register hwrng: %d\n", err);
3028                        goto err_out;
3029                } else
3030                        dev_info(dev, "hwrng\n");
3031        }
3032
3033        /* register crypto algorithms the device supports */
3034        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3035                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3036                        struct talitos_crypto_alg *t_alg;
3037                        struct crypto_alg *alg = NULL;
3038
3039                        t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3040                        if (IS_ERR(t_alg)) {
3041                                err = PTR_ERR(t_alg);
3042                                if (err == -ENOTSUPP)
3043                                        continue;
3044                                goto err_out;
3045                        }
3046
3047                        switch (t_alg->algt.type) {
3048                        case CRYPTO_ALG_TYPE_ABLKCIPHER:
3049                                err = crypto_register_alg(
3050                                                &t_alg->algt.alg.crypto);
3051                                alg = &t_alg->algt.alg.crypto;
3052                                break;
3053
3054                        case CRYPTO_ALG_TYPE_AEAD:
3055                                err = crypto_register_aead(
3056                                        &t_alg->algt.alg.aead);
3057                                alg = &t_alg->algt.alg.aead.base;
3058                                break;
3059
3060                        case CRYPTO_ALG_TYPE_AHASH:
3061                                err = crypto_register_ahash(
3062                                                &t_alg->algt.alg.hash);
3063                                alg = &t_alg->algt.alg.hash.halg.base;
3064                                break;
3065                        }
3066                        if (err) {
3067                                dev_err(dev, "%s alg registration failed\n",
3068                                        alg->cra_driver_name);
3069                                kfree(t_alg);
3070                        } else
3071                                list_add_tail(&t_alg->entry, &priv->alg_list);
3072                }
3073        }
3074        if (!list_empty(&priv->alg_list))
3075                dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3076                         (char *)of_get_property(np, "compatible", NULL));
3077
3078        return 0;
3079
3080err_out:
3081        talitos_remove(ofdev);
3082
3083        return err;
3084}
3085
3086static const struct of_device_id talitos_match[] = {
3087#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3088        {
3089                .compatible = "fsl,sec1.0",
3090        },
3091#endif
3092#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3093        {
3094                .compatible = "fsl,sec2.0",
3095        },
3096#endif
3097        {},
3098};
3099MODULE_DEVICE_TABLE(of, talitos_match);
3100
3101static struct platform_driver talitos_driver = {
3102        .driver = {
3103                .name = "talitos",
3104                .of_match_table = talitos_match,
3105        },
3106        .probe = talitos_probe,
3107        .remove = talitos_remove,
3108};
3109
3110module_platform_driver(talitos_driver);
3111
3112MODULE_LICENSE("GPL");
3113MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3114MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3115