linux/drivers/crypto/amcc/crypto4xx_core.c
<<
>>
Prefs
   1/**
   2 * AMCC SoC PPC4xx Crypto Driver
   3 *
   4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
   5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * This file implements AMCC crypto offload Linux device driver for use with
  18 * Linux CryptoAPI.
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/interrupt.h>
  23#include <linux/spinlock_types.h>
  24#include <linux/random.h>
  25#include <linux/scatterlist.h>
  26#include <linux/crypto.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/platform_device.h>
  29#include <linux/init.h>
  30#include <linux/of_platform.h>
  31#include <linux/slab.h>
  32#include <asm/dcr.h>
  33#include <asm/dcr-regs.h>
  34#include <asm/cacheflush.h>
  35#include <crypto/aes.h>
  36#include <crypto/sha.h>
  37#include "crypto4xx_reg_def.h"
  38#include "crypto4xx_core.h"
  39#include "crypto4xx_sa.h"
  40
  41#define PPC4XX_SEC_VERSION_STR                  "0.5"
  42
  43/**
  44 * PPC4xx Crypto Engine Initialization Routine
  45 */
  46static void crypto4xx_hw_init(struct crypto4xx_device *dev)
  47{
  48        union ce_ring_size ring_size;
  49        union ce_ring_contol ring_ctrl;
  50        union ce_part_ring_size part_ring_size;
  51        union ce_io_threshold io_threshold;
  52        u32 rand_num;
  53        union ce_pe_dma_cfg pe_dma_cfg;
  54        u32 device_ctrl;
  55
  56        writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
  57        /* setup pe dma, include reset sg, pdr and pe, then release reset */
  58        pe_dma_cfg.w = 0;
  59        pe_dma_cfg.bf.bo_sgpd_en = 1;
  60        pe_dma_cfg.bf.bo_data_en = 0;
  61        pe_dma_cfg.bf.bo_sa_en = 1;
  62        pe_dma_cfg.bf.bo_pd_en = 1;
  63        pe_dma_cfg.bf.dynamic_sa_en = 1;
  64        pe_dma_cfg.bf.reset_sg = 1;
  65        pe_dma_cfg.bf.reset_pdr = 1;
  66        pe_dma_cfg.bf.reset_pe = 1;
  67        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  68        /* un reset pe,sg and pdr */
  69        pe_dma_cfg.bf.pe_mode = 0;
  70        pe_dma_cfg.bf.reset_sg = 0;
  71        pe_dma_cfg.bf.reset_pdr = 0;
  72        pe_dma_cfg.bf.reset_pe = 0;
  73        pe_dma_cfg.bf.bo_td_en = 0;
  74        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  75        writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
  76        writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
  77        writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
  78        get_random_bytes(&rand_num, sizeof(rand_num));
  79        writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
  80        get_random_bytes(&rand_num, sizeof(rand_num));
  81        writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
  82        ring_size.w = 0;
  83        ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
  84        ring_size.bf.ring_size   = PPC4XX_NUM_PD;
  85        writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
  86        ring_ctrl.w = 0;
  87        writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
  88        device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  89        device_ctrl |= PPC4XX_DC_3DES_EN;
  90        writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  91        writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
  92        writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
  93        part_ring_size.w = 0;
  94        part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
  95        part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
  96        writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
  97        writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
  98        io_threshold.w = 0;
  99        io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
 100        io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
 101        writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
 102        writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
 103        writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
 104        writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
 105        writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
 106        writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
 107        writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
 108        writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
 109        /* un reset pe,sg and pdr */
 110        pe_dma_cfg.bf.pe_mode = 1;
 111        pe_dma_cfg.bf.reset_sg = 0;
 112        pe_dma_cfg.bf.reset_pdr = 0;
 113        pe_dma_cfg.bf.reset_pe = 0;
 114        pe_dma_cfg.bf.bo_td_en = 0;
 115        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
 116        /*clear all pending interrupt*/
 117        writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
 118        writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
 119        writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
 120        writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
 121        writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
 122}
 123
 124int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 125{
 126        ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
 127                                        &ctx->sa_in_dma_addr, GFP_ATOMIC);
 128        if (ctx->sa_in == NULL)
 129                return -ENOMEM;
 130
 131        ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
 132                                         &ctx->sa_out_dma_addr, GFP_ATOMIC);
 133        if (ctx->sa_out == NULL) {
 134                dma_free_coherent(ctx->dev->core_dev->device,
 135                                  ctx->sa_len * 4,
 136                                  ctx->sa_in, ctx->sa_in_dma_addr);
 137                return -ENOMEM;
 138        }
 139
 140        memset(ctx->sa_in, 0, size * 4);
 141        memset(ctx->sa_out, 0, size * 4);
 142        ctx->sa_len = size;
 143
 144        return 0;
 145}
 146
 147void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
 148{
 149        if (ctx->sa_in != NULL)
 150                dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
 151                                  ctx->sa_in, ctx->sa_in_dma_addr);
 152        if (ctx->sa_out != NULL)
 153                dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
 154                                  ctx->sa_out, ctx->sa_out_dma_addr);
 155
 156        ctx->sa_in_dma_addr = 0;
 157        ctx->sa_out_dma_addr = 0;
 158        ctx->sa_len = 0;
 159}
 160
 161u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
 162{
 163        ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
 164                                sizeof(struct sa_state_record),
 165                                &ctx->state_record_dma_addr, GFP_ATOMIC);
 166        if (!ctx->state_record_dma_addr)
 167                return -ENOMEM;
 168        memset(ctx->state_record, 0, sizeof(struct sa_state_record));
 169
 170        return 0;
 171}
 172
 173void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
 174{
 175        if (ctx->state_record != NULL)
 176                dma_free_coherent(ctx->dev->core_dev->device,
 177                                  sizeof(struct sa_state_record),
 178                                  ctx->state_record,
 179                                  ctx->state_record_dma_addr);
 180        ctx->state_record_dma_addr = 0;
 181}
 182
 183/**
 184 * alloc memory for the gather ring
 185 * no need to alloc buf for the ring
 186 * gdr_tail, gdr_head and gdr_count are initialized by this function
 187 */
 188static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 189{
 190        int i;
 191        struct pd_uinfo *pd_uinfo;
 192        dev->pdr = dma_alloc_coherent(dev->core_dev->device,
 193                                      sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 194                                      &dev->pdr_pa, GFP_ATOMIC);
 195        if (!dev->pdr)
 196                return -ENOMEM;
 197
 198        dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
 199                                GFP_KERNEL);
 200        if (!dev->pdr_uinfo) {
 201                dma_free_coherent(dev->core_dev->device,
 202                                  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 203                                  dev->pdr,
 204                                  dev->pdr_pa);
 205                return -ENOMEM;
 206        }
 207        memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
 208        dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
 209                                   256 * PPC4XX_NUM_PD,
 210                                   &dev->shadow_sa_pool_pa,
 211                                   GFP_ATOMIC);
 212        if (!dev->shadow_sa_pool)
 213                return -ENOMEM;
 214
 215        dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
 216                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
 217                         &dev->shadow_sr_pool_pa, GFP_ATOMIC);
 218        if (!dev->shadow_sr_pool)
 219                return -ENOMEM;
 220        for (i = 0; i < PPC4XX_NUM_PD; i++) {
 221                pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
 222                                                sizeof(struct pd_uinfo) * i);
 223
 224                /* alloc 256 bytes which is enough for any kind of dynamic sa */
 225                pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
 226                pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
 227
 228                /* alloc state record */
 229                pd_uinfo->sr_va = dev->shadow_sr_pool +
 230                    sizeof(struct sa_state_record) * i;
 231                pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
 232                    sizeof(struct sa_state_record) * i;
 233        }
 234
 235        return 0;
 236}
 237
 238static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
 239{
 240        if (dev->pdr != NULL)
 241                dma_free_coherent(dev->core_dev->device,
 242                                  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 243                                  dev->pdr, dev->pdr_pa);
 244        if (dev->shadow_sa_pool)
 245                dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
 246                                  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
 247        if (dev->shadow_sr_pool)
 248                dma_free_coherent(dev->core_dev->device,
 249                        sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
 250                        dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
 251
 252        kfree(dev->pdr_uinfo);
 253}
 254
 255static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
 256{
 257        u32 retval;
 258        u32 tmp;
 259
 260        retval = dev->pdr_head;
 261        tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
 262
 263        if (tmp == dev->pdr_tail)
 264                return ERING_WAS_FULL;
 265
 266        dev->pdr_head = tmp;
 267
 268        return retval;
 269}
 270
 271static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
 272{
 273        struct pd_uinfo *pd_uinfo;
 274        unsigned long flags;
 275
 276        pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
 277                                       sizeof(struct pd_uinfo) * idx);
 278        spin_lock_irqsave(&dev->core_dev->lock, flags);
 279        if (dev->pdr_tail != PPC4XX_LAST_PD)
 280                dev->pdr_tail++;
 281        else
 282                dev->pdr_tail = 0;
 283        pd_uinfo->state = PD_ENTRY_FREE;
 284        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 285
 286        return 0;
 287}
 288
 289static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
 290                                       dma_addr_t *pd_dma, u32 idx)
 291{
 292        *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
 293
 294        return dev->pdr + sizeof(struct ce_pd) * idx;
 295}
 296
 297/**
 298 * alloc memory for the gather ring
 299 * no need to alloc buf for the ring
 300 * gdr_tail, gdr_head and gdr_count are initialized by this function
 301 */
 302static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
 303{
 304        dev->gdr = dma_alloc_coherent(dev->core_dev->device,
 305                                      sizeof(struct ce_gd) * PPC4XX_NUM_GD,
 306                                      &dev->gdr_pa, GFP_ATOMIC);
 307        if (!dev->gdr)
 308                return -ENOMEM;
 309
 310        memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
 311
 312        return 0;
 313}
 314
 315static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
 316{
 317        dma_free_coherent(dev->core_dev->device,
 318                          sizeof(struct ce_gd) * PPC4XX_NUM_GD,
 319                          dev->gdr, dev->gdr_pa);
 320}
 321
 322/*
 323 * when this function is called.
 324 * preemption or interrupt must be disabled
 325 */
 326u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
 327{
 328        u32 retval;
 329        u32 tmp;
 330        if (n >= PPC4XX_NUM_GD)
 331                return ERING_WAS_FULL;
 332
 333        retval = dev->gdr_head;
 334        tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
 335        if (dev->gdr_head > dev->gdr_tail) {
 336                if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
 337                        return ERING_WAS_FULL;
 338        } else if (dev->gdr_head < dev->gdr_tail) {
 339                if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
 340                        return ERING_WAS_FULL;
 341        }
 342        dev->gdr_head = tmp;
 343
 344        return retval;
 345}
 346
 347static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
 348{
 349        unsigned long flags;
 350
 351        spin_lock_irqsave(&dev->core_dev->lock, flags);
 352        if (dev->gdr_tail == dev->gdr_head) {
 353                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 354                return 0;
 355        }
 356
 357        if (dev->gdr_tail != PPC4XX_LAST_GD)
 358                dev->gdr_tail++;
 359        else
 360                dev->gdr_tail = 0;
 361
 362        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 363
 364        return 0;
 365}
 366
 367static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
 368                                              dma_addr_t *gd_dma, u32 idx)
 369{
 370        *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
 371
 372        return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
 373}
 374
 375/**
 376 * alloc memory for the scatter ring
 377 * need to alloc buf for the ring
 378 * sdr_tail, sdr_head and sdr_count are initialized by this function
 379 */
 380static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 381{
 382        int i;
 383        struct ce_sd *sd_array;
 384
 385        /* alloc memory for scatter descriptor ring */
 386        dev->sdr = dma_alloc_coherent(dev->core_dev->device,
 387                                      sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 388                                      &dev->sdr_pa, GFP_ATOMIC);
 389        if (!dev->sdr)
 390                return -ENOMEM;
 391
 392        dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
 393        dev->scatter_buffer_va =
 394                dma_alloc_coherent(dev->core_dev->device,
 395                        dev->scatter_buffer_size * PPC4XX_NUM_SD,
 396                        &dev->scatter_buffer_pa, GFP_ATOMIC);
 397        if (!dev->scatter_buffer_va) {
 398                dma_free_coherent(dev->core_dev->device,
 399                                  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 400                                  dev->sdr, dev->sdr_pa);
 401                return -ENOMEM;
 402        }
 403
 404        sd_array = dev->sdr;
 405
 406        for (i = 0; i < PPC4XX_NUM_SD; i++) {
 407                sd_array[i].ptr = dev->scatter_buffer_pa +
 408                                  dev->scatter_buffer_size * i;
 409        }
 410
 411        return 0;
 412}
 413
 414static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
 415{
 416        if (dev->sdr != NULL)
 417                dma_free_coherent(dev->core_dev->device,
 418                                  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 419                                  dev->sdr, dev->sdr_pa);
 420
 421        if (dev->scatter_buffer_va != NULL)
 422                dma_free_coherent(dev->core_dev->device,
 423                                  dev->scatter_buffer_size * PPC4XX_NUM_SD,
 424                                  dev->scatter_buffer_va,
 425                                  dev->scatter_buffer_pa);
 426}
 427
 428/*
 429 * when this function is called.
 430 * preemption or interrupt must be disabled
 431 */
 432static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
 433{
 434        u32 retval;
 435        u32 tmp;
 436
 437        if (n >= PPC4XX_NUM_SD)
 438                return ERING_WAS_FULL;
 439
 440        retval = dev->sdr_head;
 441        tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
 442        if (dev->sdr_head > dev->gdr_tail) {
 443                if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
 444                        return ERING_WAS_FULL;
 445        } else if (dev->sdr_head < dev->sdr_tail) {
 446                if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
 447                        return ERING_WAS_FULL;
 448        } /* the head = tail, or empty case is already take cared */
 449        dev->sdr_head = tmp;
 450
 451        return retval;
 452}
 453
 454static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
 455{
 456        unsigned long flags;
 457
 458        spin_lock_irqsave(&dev->core_dev->lock, flags);
 459        if (dev->sdr_tail == dev->sdr_head) {
 460                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 461                return 0;
 462        }
 463        if (dev->sdr_tail != PPC4XX_LAST_SD)
 464                dev->sdr_tail++;
 465        else
 466                dev->sdr_tail = 0;
 467        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 468
 469        return 0;
 470}
 471
 472static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
 473                                              dma_addr_t *sd_dma, u32 idx)
 474{
 475        *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
 476
 477        return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
 478}
 479
 480static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
 481                                   dma_addr_t *addr, u32 *length,
 482                                   u32 *idx, u32 *offset, u32 *nbytes)
 483{
 484        u32 len;
 485
 486        if (*length > dev->scatter_buffer_size) {
 487                memcpy(phys_to_virt(*addr),
 488                        dev->scatter_buffer_va +
 489                        *idx * dev->scatter_buffer_size + *offset,
 490                        dev->scatter_buffer_size);
 491                *offset = 0;
 492                *length -= dev->scatter_buffer_size;
 493                *nbytes -= dev->scatter_buffer_size;
 494                if (*idx == PPC4XX_LAST_SD)
 495                        *idx = 0;
 496                else
 497                        (*idx)++;
 498                *addr = *addr +  dev->scatter_buffer_size;
 499                return 1;
 500        } else if (*length < dev->scatter_buffer_size) {
 501                memcpy(phys_to_virt(*addr),
 502                        dev->scatter_buffer_va +
 503                        *idx * dev->scatter_buffer_size + *offset, *length);
 504                if ((*offset + *length) == dev->scatter_buffer_size) {
 505                        if (*idx == PPC4XX_LAST_SD)
 506                                *idx = 0;
 507                        else
 508                                (*idx)++;
 509                        *nbytes -= *length;
 510                        *offset = 0;
 511                } else {
 512                        *nbytes -= *length;
 513                        *offset += *length;
 514                }
 515
 516                return 0;
 517        } else {
 518                len = (*nbytes <= dev->scatter_buffer_size) ?
 519                                (*nbytes) : dev->scatter_buffer_size;
 520                memcpy(phys_to_virt(*addr),
 521                        dev->scatter_buffer_va +
 522                        *idx * dev->scatter_buffer_size + *offset,
 523                        len);
 524                *offset = 0;
 525                *nbytes -= len;
 526
 527                if (*idx == PPC4XX_LAST_SD)
 528                        *idx = 0;
 529                else
 530                        (*idx)++;
 531
 532                return 0;
 533    }
 534}
 535
 536static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
 537                                      struct ce_pd *pd,
 538                                      struct pd_uinfo *pd_uinfo,
 539                                      u32 nbytes,
 540                                      struct scatterlist *dst)
 541{
 542        dma_addr_t addr;
 543        u32 this_sd;
 544        u32 offset;
 545        u32 len;
 546        u32 i;
 547        u32 sg_len;
 548        struct scatterlist *sg;
 549
 550        this_sd = pd_uinfo->first_sd;
 551        offset = 0;
 552        i = 0;
 553
 554        while (nbytes) {
 555                sg = &dst[i];
 556                sg_len = sg->length;
 557                addr = dma_map_page(dev->core_dev->device, sg_page(sg),
 558                                sg->offset, sg->length, DMA_TO_DEVICE);
 559
 560                if (offset == 0) {
 561                        len = (nbytes <= sg->length) ? nbytes : sg->length;
 562                        while (crypto4xx_fill_one_page(dev, &addr, &len,
 563                                &this_sd, &offset, &nbytes))
 564                                ;
 565                        if (!nbytes)
 566                                return;
 567                        i++;
 568                } else {
 569                        len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
 570                                nbytes : (dev->scatter_buffer_size - offset);
 571                        len = (sg->length < len) ? sg->length : len;
 572                        while (crypto4xx_fill_one_page(dev, &addr, &len,
 573                                               &this_sd, &offset, &nbytes))
 574                                ;
 575                        if (!nbytes)
 576                                return;
 577                        sg_len -= len;
 578                        if (sg_len) {
 579                                addr += len;
 580                                while (crypto4xx_fill_one_page(dev, &addr,
 581                                        &sg_len, &this_sd, &offset, &nbytes))
 582                                        ;
 583                        }
 584                        i++;
 585                }
 586        }
 587}
 588
 589static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
 590                                        struct crypto4xx_ctx *ctx)
 591{
 592        struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
 593        struct sa_state_record *state_record =
 594                                (struct sa_state_record *) pd_uinfo->sr_va;
 595
 596        if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
 597                memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
 598                       SA_HASH_ALG_SHA1_DIGEST_SIZE);
 599        }
 600
 601        return 0;
 602}
 603
 604static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
 605                                  struct pd_uinfo *pd_uinfo)
 606{
 607        int i;
 608        if (pd_uinfo->num_gd) {
 609                for (i = 0; i < pd_uinfo->num_gd; i++)
 610                        crypto4xx_put_gd_to_gdr(dev);
 611                pd_uinfo->first_gd = 0xffffffff;
 612                pd_uinfo->num_gd = 0;
 613        }
 614        if (pd_uinfo->num_sd) {
 615                for (i = 0; i < pd_uinfo->num_sd; i++)
 616                        crypto4xx_put_sd_to_sdr(dev);
 617
 618                pd_uinfo->first_sd = 0xffffffff;
 619                pd_uinfo->num_sd = 0;
 620        }
 621}
 622
 623static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
 624                                     struct pd_uinfo *pd_uinfo,
 625                                     struct ce_pd *pd)
 626{
 627        struct crypto4xx_ctx *ctx;
 628        struct ablkcipher_request *ablk_req;
 629        struct scatterlist *dst;
 630        dma_addr_t addr;
 631
 632        ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
 633        ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
 634
 635        if (pd_uinfo->using_sd) {
 636                crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
 637                                          ablk_req->dst);
 638        } else {
 639                dst = pd_uinfo->dest_va;
 640                addr = dma_map_page(dev->core_dev->device, sg_page(dst),
 641                                    dst->offset, dst->length, DMA_FROM_DEVICE);
 642        }
 643        crypto4xx_ret_sg_desc(dev, pd_uinfo);
 644        if (ablk_req->base.complete != NULL)
 645                ablk_req->base.complete(&ablk_req->base, 0);
 646
 647        return 0;
 648}
 649
 650static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
 651                                struct pd_uinfo *pd_uinfo)
 652{
 653        struct crypto4xx_ctx *ctx;
 654        struct ahash_request *ahash_req;
 655
 656        ahash_req = ahash_request_cast(pd_uinfo->async_req);
 657        ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
 658
 659        crypto4xx_copy_digest_to_dst(pd_uinfo,
 660                                     crypto_tfm_ctx(ahash_req->base.tfm));
 661        crypto4xx_ret_sg_desc(dev, pd_uinfo);
 662        /* call user provided callback function x */
 663        if (ahash_req->base.complete != NULL)
 664                ahash_req->base.complete(&ahash_req->base, 0);
 665
 666        return 0;
 667}
 668
 669static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
 670{
 671        struct ce_pd *pd;
 672        struct pd_uinfo *pd_uinfo;
 673
 674        pd =  dev->pdr + sizeof(struct ce_pd)*idx;
 675        pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
 676        if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
 677                        CRYPTO_ALG_TYPE_ABLKCIPHER)
 678                return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
 679        else
 680                return crypto4xx_ahash_done(dev, pd_uinfo);
 681}
 682
 683/**
 684 * Note: Only use this function to copy items that is word aligned.
 685 */
 686void crypto4xx_memcpy_le(unsigned int *dst,
 687                         const unsigned char *buf,
 688                         int len)
 689{
 690        u8 *tmp;
 691        for (; len >= 4; buf += 4, len -= 4)
 692                *dst++ = cpu_to_le32(*(unsigned int *) buf);
 693
 694        tmp = (u8 *)dst;
 695        switch (len) {
 696        case 3:
 697                *tmp++ = 0;
 698                *tmp++ = *(buf+2);
 699                *tmp++ = *(buf+1);
 700                *tmp++ = *buf;
 701                break;
 702        case 2:
 703                *tmp++ = 0;
 704                *tmp++ = 0;
 705                *tmp++ = *(buf+1);
 706                *tmp++ = *buf;
 707                break;
 708        case 1:
 709                *tmp++ = 0;
 710                *tmp++ = 0;
 711                *tmp++ = 0;
 712                *tmp++ = *buf;
 713                break;
 714        default:
 715                break;
 716        }
 717}
 718
 719static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
 720{
 721        crypto4xx_destroy_pdr(core_dev->dev);
 722        crypto4xx_destroy_gdr(core_dev->dev);
 723        crypto4xx_destroy_sdr(core_dev->dev);
 724        dev_set_drvdata(core_dev->device, NULL);
 725        iounmap(core_dev->dev->ce_base);
 726        kfree(core_dev->dev);
 727        kfree(core_dev);
 728}
 729
 730void crypto4xx_return_pd(struct crypto4xx_device *dev,
 731                         u32 pd_entry, struct ce_pd *pd,
 732                         struct pd_uinfo *pd_uinfo)
 733{
 734        /* irq should be already disabled */
 735        dev->pdr_head = pd_entry;
 736        pd->pd_ctl.w = 0;
 737        pd->pd_ctl_len.w = 0;
 738        pd_uinfo->state = PD_ENTRY_FREE;
 739}
 740
 741/*
 742 * derive number of elements in scatterlist
 743 * Shamlessly copy from talitos.c
 744 */
 745static int get_sg_count(struct scatterlist *sg_list, int nbytes)
 746{
 747        struct scatterlist *sg = sg_list;
 748        int sg_nents = 0;
 749
 750        while (nbytes) {
 751                sg_nents++;
 752                if (sg->length > nbytes)
 753                        break;
 754                nbytes -= sg->length;
 755                sg = sg_next(sg);
 756        }
 757
 758        return sg_nents;
 759}
 760
 761static u32 get_next_gd(u32 current)
 762{
 763        if (current != PPC4XX_LAST_GD)
 764                return current + 1;
 765        else
 766                return 0;
 767}
 768
 769static u32 get_next_sd(u32 current)
 770{
 771        if (current != PPC4XX_LAST_SD)
 772                return current + 1;
 773        else
 774                return 0;
 775}
 776
 777u32 crypto4xx_build_pd(struct crypto_async_request *req,
 778                       struct crypto4xx_ctx *ctx,
 779                       struct scatterlist *src,
 780                       struct scatterlist *dst,
 781                       unsigned int datalen,
 782                       void *iv, u32 iv_len)
 783{
 784        struct crypto4xx_device *dev = ctx->dev;
 785        dma_addr_t addr, pd_dma, sd_dma, gd_dma;
 786        struct dynamic_sa_ctl *sa;
 787        struct scatterlist *sg;
 788        struct ce_gd *gd;
 789        struct ce_pd *pd;
 790        u32 num_gd, num_sd;
 791        u32 fst_gd = 0xffffffff;
 792        u32 fst_sd = 0xffffffff;
 793        u32 pd_entry;
 794        unsigned long flags;
 795        struct pd_uinfo *pd_uinfo = NULL;
 796        unsigned int nbytes = datalen, idx;
 797        unsigned int ivlen = 0;
 798        u32 gd_idx = 0;
 799
 800        /* figure how many gd is needed */
 801        num_gd = get_sg_count(src, datalen);
 802        if (num_gd == 1)
 803                num_gd = 0;
 804
 805        /* figure how many sd is needed */
 806        if (sg_is_last(dst) || ctx->is_hash) {
 807                num_sd = 0;
 808        } else {
 809                if (datalen > PPC4XX_SD_BUFFER_SIZE) {
 810                        num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
 811                        if (datalen % PPC4XX_SD_BUFFER_SIZE)
 812                                num_sd++;
 813                } else {
 814                        num_sd = 1;
 815                }
 816        }
 817
 818        /*
 819         * The follow section of code needs to be protected
 820         * The gather ring and scatter ring needs to be consecutive
 821         * In case of run out of any kind of descriptor, the descriptor
 822         * already got must be return the original place.
 823         */
 824        spin_lock_irqsave(&dev->core_dev->lock, flags);
 825        if (num_gd) {
 826                fst_gd = crypto4xx_get_n_gd(dev, num_gd);
 827                if (fst_gd == ERING_WAS_FULL) {
 828                        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 829                        return -EAGAIN;
 830                }
 831        }
 832        if (num_sd) {
 833                fst_sd = crypto4xx_get_n_sd(dev, num_sd);
 834                if (fst_sd == ERING_WAS_FULL) {
 835                        if (num_gd)
 836                                dev->gdr_head = fst_gd;
 837                        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 838                        return -EAGAIN;
 839                }
 840        }
 841        pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
 842        if (pd_entry == ERING_WAS_FULL) {
 843                if (num_gd)
 844                        dev->gdr_head = fst_gd;
 845                if (num_sd)
 846                        dev->sdr_head = fst_sd;
 847                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 848                return -EAGAIN;
 849        }
 850        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 851
 852        pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
 853                                       sizeof(struct pd_uinfo) * pd_entry);
 854        pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
 855        pd_uinfo->async_req = req;
 856        pd_uinfo->num_gd = num_gd;
 857        pd_uinfo->num_sd = num_sd;
 858
 859        if (iv_len || ctx->is_hash) {
 860                ivlen = iv_len;
 861                pd->sa = pd_uinfo->sa_pa;
 862                sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
 863                if (ctx->direction == DIR_INBOUND)
 864                        memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
 865                else
 866                        memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
 867
 868                memcpy((void *) sa + ctx->offset_to_sr_ptr,
 869                        &pd_uinfo->sr_pa, 4);
 870
 871                if (iv_len)
 872                        crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
 873        } else {
 874                if (ctx->direction == DIR_INBOUND) {
 875                        pd->sa = ctx->sa_in_dma_addr;
 876                        sa = (struct dynamic_sa_ctl *) ctx->sa_in;
 877                } else {
 878                        pd->sa = ctx->sa_out_dma_addr;
 879                        sa = (struct dynamic_sa_ctl *) ctx->sa_out;
 880                }
 881        }
 882        pd->sa_len = ctx->sa_len;
 883        if (num_gd) {
 884                /* get first gd we are going to use */
 885                gd_idx = fst_gd;
 886                pd_uinfo->first_gd = fst_gd;
 887                pd_uinfo->num_gd = num_gd;
 888                gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 889                pd->src = gd_dma;
 890                /* enable gather */
 891                sa->sa_command_0.bf.gather = 1;
 892                idx = 0;
 893                src = &src[0];
 894                /* walk the sg, and setup gather array */
 895                while (nbytes) {
 896                        sg = &src[idx];
 897                        addr = dma_map_page(dev->core_dev->device, sg_page(sg),
 898                                    sg->offset, sg->length, DMA_TO_DEVICE);
 899                        gd->ptr = addr;
 900                        gd->ctl_len.len = sg->length;
 901                        gd->ctl_len.done = 0;
 902                        gd->ctl_len.ready = 1;
 903                        if (sg->length >= nbytes)
 904                                break;
 905                        nbytes -= sg->length;
 906                        gd_idx = get_next_gd(gd_idx);
 907                        gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 908                        idx++;
 909                }
 910        } else {
 911                pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
 912                                src->offset, src->length, DMA_TO_DEVICE);
 913                /*
 914                 * Disable gather in sa command
 915                 */
 916                sa->sa_command_0.bf.gather = 0;
 917                /*
 918                 * Indicate gather array is not used
 919                 */
 920                pd_uinfo->first_gd = 0xffffffff;
 921                pd_uinfo->num_gd = 0;
 922        }
 923        if (ctx->is_hash || sg_is_last(dst)) {
 924                /*
 925                 * we know application give us dst a whole piece of memory
 926                 * no need to use scatter ring.
 927                 * In case of is_hash, the icv is always at end of src data.
 928                 */
 929                pd_uinfo->using_sd = 0;
 930                pd_uinfo->first_sd = 0xffffffff;
 931                pd_uinfo->num_sd = 0;
 932                pd_uinfo->dest_va = dst;
 933                sa->sa_command_0.bf.scatter = 0;
 934                if (ctx->is_hash)
 935                        pd->dest = virt_to_phys((void *)dst);
 936                else
 937                        pd->dest = (u32)dma_map_page(dev->core_dev->device,
 938                                        sg_page(dst), dst->offset,
 939                                        dst->length, DMA_TO_DEVICE);
 940        } else {
 941                struct ce_sd *sd = NULL;
 942                u32 sd_idx = fst_sd;
 943                nbytes = datalen;
 944                sa->sa_command_0.bf.scatter = 1;
 945                pd_uinfo->using_sd = 1;
 946                pd_uinfo->dest_va = dst;
 947                pd_uinfo->first_sd = fst_sd;
 948                pd_uinfo->num_sd = num_sd;
 949                sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 950                pd->dest = sd_dma;
 951                /* setup scatter descriptor */
 952                sd->ctl.done = 0;
 953                sd->ctl.rdy = 1;
 954                /* sd->ptr should be setup by sd_init routine*/
 955                idx = 0;
 956                if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
 957                        nbytes -= PPC4XX_SD_BUFFER_SIZE;
 958                else
 959                        nbytes = 0;
 960                while (nbytes) {
 961                        sd_idx = get_next_sd(sd_idx);
 962                        sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 963                        /* setup scatter descriptor */
 964                        sd->ctl.done = 0;
 965                        sd->ctl.rdy = 1;
 966                        if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
 967                                nbytes -= PPC4XX_SD_BUFFER_SIZE;
 968                        else
 969                                /*
 970                                 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
 971                                 * which is more than nbytes, so done.
 972                                 */
 973                                nbytes = 0;
 974                }
 975        }
 976
 977        sa->sa_command_1.bf.hash_crypto_offset = 0;
 978        pd->pd_ctl.w = ctx->pd_ctl;
 979        pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
 980        pd_uinfo->state = PD_ENTRY_INUSE;
 981        wmb();
 982        /* write any value to push engine to read a pd */
 983        writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
 984        return -EINPROGRESS;
 985}
 986
 987/**
 988 * Algorithm Registration Functions
 989 */
 990static int crypto4xx_alg_init(struct crypto_tfm *tfm)
 991{
 992        struct crypto_alg *alg = tfm->__crt_alg;
 993        struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
 994        struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
 995
 996        ctx->dev = amcc_alg->dev;
 997        ctx->sa_in = NULL;
 998        ctx->sa_out = NULL;
 999        ctx->sa_in_dma_addr = 0;
1000        ctx->sa_out_dma_addr = 0;
1001        ctx->sa_len = 0;
1002
1003        switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1004        default:
1005                tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1006                break;
1007        case CRYPTO_ALG_TYPE_AHASH:
1008                crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1009                                         sizeof(struct crypto4xx_ctx));
1010                break;
1011        }
1012
1013        return 0;
1014}
1015
1016static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1017{
1018        struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1019
1020        crypto4xx_free_sa(ctx);
1021        crypto4xx_free_state_record(ctx);
1022}
1023
1024int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1025                           struct crypto4xx_alg_common *crypto_alg,
1026                           int array_size)
1027{
1028        struct crypto4xx_alg *alg;
1029        int i;
1030        int rc = 0;
1031
1032        for (i = 0; i < array_size; i++) {
1033                alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1034                if (!alg)
1035                        return -ENOMEM;
1036
1037                alg->alg = crypto_alg[i];
1038                alg->dev = sec_dev;
1039
1040                switch (alg->alg.type) {
1041                case CRYPTO_ALG_TYPE_AHASH:
1042                        rc = crypto_register_ahash(&alg->alg.u.hash);
1043                        break;
1044
1045                default:
1046                        rc = crypto_register_alg(&alg->alg.u.cipher);
1047                        break;
1048                }
1049
1050                if (rc) {
1051                        list_del(&alg->entry);
1052                        kfree(alg);
1053                } else {
1054                        list_add_tail(&alg->entry, &sec_dev->alg_list);
1055                }
1056        }
1057
1058        return 0;
1059}
1060
1061static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1062{
1063        struct crypto4xx_alg *alg, *tmp;
1064
1065        list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1066                list_del(&alg->entry);
1067                switch (alg->alg.type) {
1068                case CRYPTO_ALG_TYPE_AHASH:
1069                        crypto_unregister_ahash(&alg->alg.u.hash);
1070                        break;
1071
1072                default:
1073                        crypto_unregister_alg(&alg->alg.u.cipher);
1074                }
1075                kfree(alg);
1076        }
1077}
1078
1079static void crypto4xx_bh_tasklet_cb(unsigned long data)
1080{
1081        struct device *dev = (struct device *)data;
1082        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1083        struct pd_uinfo *pd_uinfo;
1084        struct ce_pd *pd;
1085        u32 tail;
1086
1087        while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1088                tail = core_dev->dev->pdr_tail;
1089                pd_uinfo = core_dev->dev->pdr_uinfo +
1090                        sizeof(struct pd_uinfo)*tail;
1091                pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1092                if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1093                                   pd->pd_ctl.bf.pe_done &&
1094                                   !pd->pd_ctl.bf.host_ready) {
1095                        pd->pd_ctl.bf.pe_done = 0;
1096                        crypto4xx_pd_done(core_dev->dev, tail);
1097                        crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1098                        pd_uinfo->state = PD_ENTRY_FREE;
1099                } else {
1100                        /* if tail not done, break */
1101                        break;
1102                }
1103        }
1104}
1105
1106/**
1107 * Top Half of isr.
1108 */
1109static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1110{
1111        struct device *dev = (struct device *)data;
1112        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1113
1114        if (core_dev->dev->ce_base == 0)
1115                return 0;
1116
1117        writel(PPC4XX_INTERRUPT_CLR,
1118               core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1119        tasklet_schedule(&core_dev->tasklet);
1120
1121        return IRQ_HANDLED;
1122}
1123
1124/**
1125 * Supported Crypto Algorithms
1126 */
1127struct crypto4xx_alg_common crypto4xx_alg[] = {
1128        /* Crypto AES modes */
1129        { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1130                .cra_name       = "cbc(aes)",
1131                .cra_driver_name = "cbc-aes-ppc4xx",
1132                .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1133                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1134                .cra_blocksize  = AES_BLOCK_SIZE,
1135                .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1136                .cra_type       = &crypto_ablkcipher_type,
1137                .cra_init       = crypto4xx_alg_init,
1138                .cra_exit       = crypto4xx_alg_exit,
1139                .cra_module     = THIS_MODULE,
1140                .cra_u          = {
1141                        .ablkcipher = {
1142                                .min_keysize    = AES_MIN_KEY_SIZE,
1143                                .max_keysize    = AES_MAX_KEY_SIZE,
1144                                .ivsize         = AES_IV_SIZE,
1145                                .setkey         = crypto4xx_setkey_aes_cbc,
1146                                .encrypt        = crypto4xx_encrypt,
1147                                .decrypt        = crypto4xx_decrypt,
1148                        }
1149                }
1150        }},
1151};
1152
1153/**
1154 * Module Initialization Routine
1155 */
1156static int __init crypto4xx_probe(struct platform_device *ofdev)
1157{
1158        int rc;
1159        struct resource res;
1160        struct device *dev = &ofdev->dev;
1161        struct crypto4xx_core_device *core_dev;
1162
1163        rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1164        if (rc)
1165                return -ENODEV;
1166
1167        if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1168                mtdcri(SDR0, PPC460EX_SDR0_SRST,
1169                       mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1170                mtdcri(SDR0, PPC460EX_SDR0_SRST,
1171                       mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1172        } else if (of_find_compatible_node(NULL, NULL,
1173                        "amcc,ppc405ex-crypto")) {
1174                mtdcri(SDR0, PPC405EX_SDR0_SRST,
1175                       mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1176                mtdcri(SDR0, PPC405EX_SDR0_SRST,
1177                       mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1178        } else if (of_find_compatible_node(NULL, NULL,
1179                        "amcc,ppc460sx-crypto")) {
1180                mtdcri(SDR0, PPC460SX_SDR0_SRST,
1181                       mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1182                mtdcri(SDR0, PPC460SX_SDR0_SRST,
1183                       mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1184        } else {
1185                printk(KERN_ERR "Crypto Function Not supported!\n");
1186                return -EINVAL;
1187        }
1188
1189        core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1190        if (!core_dev)
1191                return -ENOMEM;
1192
1193        dev_set_drvdata(dev, core_dev);
1194        core_dev->ofdev = ofdev;
1195        core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1196        if (!core_dev->dev)
1197                goto err_alloc_dev;
1198
1199        core_dev->dev->core_dev = core_dev;
1200        core_dev->device = dev;
1201        spin_lock_init(&core_dev->lock);
1202        INIT_LIST_HEAD(&core_dev->dev->alg_list);
1203        rc = crypto4xx_build_pdr(core_dev->dev);
1204        if (rc)
1205                goto err_build_pdr;
1206
1207        rc = crypto4xx_build_gdr(core_dev->dev);
1208        if (rc)
1209                goto err_build_gdr;
1210
1211        rc = crypto4xx_build_sdr(core_dev->dev);
1212        if (rc)
1213                goto err_build_sdr;
1214
1215        /* Init tasklet for bottom half processing */
1216        tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1217                     (unsigned long) dev);
1218
1219        /* Register for Crypto isr, Crypto Engine IRQ */
1220        core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1221        rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1222                         core_dev->dev->name, dev);
1223        if (rc)
1224                goto err_request_irq;
1225
1226        core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1227        if (!core_dev->dev->ce_base) {
1228                dev_err(dev, "failed to of_iomap\n");
1229                rc = -ENOMEM;
1230                goto err_iomap;
1231        }
1232
1233        /* need to setup pdr, rdr, gdr and sdr before this */
1234        crypto4xx_hw_init(core_dev->dev);
1235
1236        /* Register security algorithms with Linux CryptoAPI */
1237        rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1238                               ARRAY_SIZE(crypto4xx_alg));
1239        if (rc)
1240                goto err_start_dev;
1241
1242        return 0;
1243
1244err_start_dev:
1245        iounmap(core_dev->dev->ce_base);
1246err_iomap:
1247        free_irq(core_dev->irq, dev);
1248err_request_irq:
1249        irq_dispose_mapping(core_dev->irq);
1250        tasklet_kill(&core_dev->tasklet);
1251        crypto4xx_destroy_sdr(core_dev->dev);
1252err_build_sdr:
1253        crypto4xx_destroy_gdr(core_dev->dev);
1254err_build_gdr:
1255        crypto4xx_destroy_pdr(core_dev->dev);
1256err_build_pdr:
1257        kfree(core_dev->dev);
1258err_alloc_dev:
1259        kfree(core_dev);
1260
1261        return rc;
1262}
1263
1264static int __exit crypto4xx_remove(struct platform_device *ofdev)
1265{
1266        struct device *dev = &ofdev->dev;
1267        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1268
1269        free_irq(core_dev->irq, dev);
1270        irq_dispose_mapping(core_dev->irq);
1271
1272        tasklet_kill(&core_dev->tasklet);
1273        /* Un-register with Linux CryptoAPI */
1274        crypto4xx_unregister_alg(core_dev->dev);
1275        /* Free all allocated memory */
1276        crypto4xx_stop_all(core_dev);
1277
1278        return 0;
1279}
1280
1281static const struct of_device_id crypto4xx_match[] = {
1282        { .compatible      = "amcc,ppc4xx-crypto",},
1283        { },
1284};
1285
1286static struct platform_driver crypto4xx_driver = {
1287        .driver = {
1288                .name = "crypto4xx",
1289                .owner = THIS_MODULE,
1290                .of_match_table = crypto4xx_match,
1291        },
1292        .probe          = crypto4xx_probe,
1293        .remove         = crypto4xx_remove,
1294};
1295
1296module_platform_driver(crypto4xx_driver);
1297
1298MODULE_LICENSE("GPL");
1299MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1300MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1301
1302