linux/drivers/crypto/amcc/crypto4xx_core.c
<<
>>
Prefs
   1/**
   2 * AMCC SoC PPC4xx Crypto Driver
   3 *
   4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
   5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * This file implements AMCC crypto offload Linux device driver for use with
  18 * Linux CryptoAPI.
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/interrupt.h>
  23#include <linux/spinlock_types.h>
  24#include <linux/random.h>
  25#include <linux/scatterlist.h>
  26#include <linux/crypto.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/platform_device.h>
  29#include <linux/init.h>
  30#include <linux/module.h>
  31#include <linux/of_address.h>
  32#include <linux/of_irq.h>
  33#include <linux/of_platform.h>
  34#include <linux/slab.h>
  35#include <asm/dcr.h>
  36#include <asm/dcr-regs.h>
  37#include <asm/cacheflush.h>
  38#include <crypto/aes.h>
  39#include <crypto/sha.h>
  40#include "crypto4xx_reg_def.h"
  41#include "crypto4xx_core.h"
  42#include "crypto4xx_sa.h"
  43#include "crypto4xx_trng.h"
  44
  45#define PPC4XX_SEC_VERSION_STR                  "0.5"
  46
  47/**
  48 * PPC4xx Crypto Engine Initialization Routine
  49 */
  50static void crypto4xx_hw_init(struct crypto4xx_device *dev)
  51{
  52        union ce_ring_size ring_size;
  53        union ce_ring_contol ring_ctrl;
  54        union ce_part_ring_size part_ring_size;
  55        union ce_io_threshold io_threshold;
  56        u32 rand_num;
  57        union ce_pe_dma_cfg pe_dma_cfg;
  58        u32 device_ctrl;
  59
  60        writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
  61        /* setup pe dma, include reset sg, pdr and pe, then release reset */
  62        pe_dma_cfg.w = 0;
  63        pe_dma_cfg.bf.bo_sgpd_en = 1;
  64        pe_dma_cfg.bf.bo_data_en = 0;
  65        pe_dma_cfg.bf.bo_sa_en = 1;
  66        pe_dma_cfg.bf.bo_pd_en = 1;
  67        pe_dma_cfg.bf.dynamic_sa_en = 1;
  68        pe_dma_cfg.bf.reset_sg = 1;
  69        pe_dma_cfg.bf.reset_pdr = 1;
  70        pe_dma_cfg.bf.reset_pe = 1;
  71        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  72        /* un reset pe,sg and pdr */
  73        pe_dma_cfg.bf.pe_mode = 0;
  74        pe_dma_cfg.bf.reset_sg = 0;
  75        pe_dma_cfg.bf.reset_pdr = 0;
  76        pe_dma_cfg.bf.reset_pe = 0;
  77        pe_dma_cfg.bf.bo_td_en = 0;
  78        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  79        writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
  80        writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
  81        writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
  82        get_random_bytes(&rand_num, sizeof(rand_num));
  83        writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
  84        get_random_bytes(&rand_num, sizeof(rand_num));
  85        writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
  86        ring_size.w = 0;
  87        ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
  88        ring_size.bf.ring_size   = PPC4XX_NUM_PD;
  89        writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
  90        ring_ctrl.w = 0;
  91        writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
  92        device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  93        device_ctrl |= PPC4XX_DC_3DES_EN;
  94        writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  95        writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
  96        writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
  97        part_ring_size.w = 0;
  98        part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
  99        part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
 100        writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
 101        writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
 102        io_threshold.w = 0;
 103        io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
 104        io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
 105        writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
 106        writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
 107        writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
 108        writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
 109        writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
 110        writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
 111        writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
 112        writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
 113        /* un reset pe,sg and pdr */
 114        pe_dma_cfg.bf.pe_mode = 1;
 115        pe_dma_cfg.bf.reset_sg = 0;
 116        pe_dma_cfg.bf.reset_pdr = 0;
 117        pe_dma_cfg.bf.reset_pe = 0;
 118        pe_dma_cfg.bf.bo_td_en = 0;
 119        writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
 120        /*clear all pending interrupt*/
 121        writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
 122        writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
 123        writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
 124        writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
 125        writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
 126}
 127
 128int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 129{
 130        ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
 131                                        &ctx->sa_in_dma_addr, GFP_ATOMIC);
 132        if (ctx->sa_in == NULL)
 133                return -ENOMEM;
 134
 135        ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
 136                                         &ctx->sa_out_dma_addr, GFP_ATOMIC);
 137        if (ctx->sa_out == NULL) {
 138                dma_free_coherent(ctx->dev->core_dev->device, size * 4,
 139                                  ctx->sa_in, ctx->sa_in_dma_addr);
 140                return -ENOMEM;
 141        }
 142
 143        memset(ctx->sa_in, 0, size * 4);
 144        memset(ctx->sa_out, 0, size * 4);
 145        ctx->sa_len = size;
 146
 147        return 0;
 148}
 149
 150void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
 151{
 152        if (ctx->sa_in != NULL)
 153                dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
 154                                  ctx->sa_in, ctx->sa_in_dma_addr);
 155        if (ctx->sa_out != NULL)
 156                dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
 157                                  ctx->sa_out, ctx->sa_out_dma_addr);
 158
 159        ctx->sa_in_dma_addr = 0;
 160        ctx->sa_out_dma_addr = 0;
 161        ctx->sa_len = 0;
 162}
 163
 164u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
 165{
 166        ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
 167                                sizeof(struct sa_state_record),
 168                                &ctx->state_record_dma_addr, GFP_ATOMIC);
 169        if (!ctx->state_record_dma_addr)
 170                return -ENOMEM;
 171        memset(ctx->state_record, 0, sizeof(struct sa_state_record));
 172
 173        return 0;
 174}
 175
 176void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
 177{
 178        if (ctx->state_record != NULL)
 179                dma_free_coherent(ctx->dev->core_dev->device,
 180                                  sizeof(struct sa_state_record),
 181                                  ctx->state_record,
 182                                  ctx->state_record_dma_addr);
 183        ctx->state_record_dma_addr = 0;
 184}
 185
 186/**
 187 * alloc memory for the gather ring
 188 * no need to alloc buf for the ring
 189 * gdr_tail, gdr_head and gdr_count are initialized by this function
 190 */
 191static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 192{
 193        int i;
 194        struct pd_uinfo *pd_uinfo;
 195        dev->pdr = dma_alloc_coherent(dev->core_dev->device,
 196                                      sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 197                                      &dev->pdr_pa, GFP_ATOMIC);
 198        if (!dev->pdr)
 199                return -ENOMEM;
 200
 201        dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
 202                                GFP_KERNEL);
 203        if (!dev->pdr_uinfo) {
 204                dma_free_coherent(dev->core_dev->device,
 205                                  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 206                                  dev->pdr,
 207                                  dev->pdr_pa);
 208                return -ENOMEM;
 209        }
 210        memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
 211        dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
 212                                   256 * PPC4XX_NUM_PD,
 213                                   &dev->shadow_sa_pool_pa,
 214                                   GFP_ATOMIC);
 215        if (!dev->shadow_sa_pool)
 216                return -ENOMEM;
 217
 218        dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
 219                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
 220                         &dev->shadow_sr_pool_pa, GFP_ATOMIC);
 221        if (!dev->shadow_sr_pool)
 222                return -ENOMEM;
 223        for (i = 0; i < PPC4XX_NUM_PD; i++) {
 224                pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
 225                                                sizeof(struct pd_uinfo) * i);
 226
 227                /* alloc 256 bytes which is enough for any kind of dynamic sa */
 228                pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
 229                pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
 230
 231                /* alloc state record */
 232                pd_uinfo->sr_va = dev->shadow_sr_pool +
 233                    sizeof(struct sa_state_record) * i;
 234                pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
 235                    sizeof(struct sa_state_record) * i;
 236        }
 237
 238        return 0;
 239}
 240
 241static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
 242{
 243        if (dev->pdr != NULL)
 244                dma_free_coherent(dev->core_dev->device,
 245                                  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 246                                  dev->pdr, dev->pdr_pa);
 247        if (dev->shadow_sa_pool)
 248                dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
 249                                  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
 250        if (dev->shadow_sr_pool)
 251                dma_free_coherent(dev->core_dev->device,
 252                        sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
 253                        dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
 254
 255        kfree(dev->pdr_uinfo);
 256}
 257
 258static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
 259{
 260        u32 retval;
 261        u32 tmp;
 262
 263        retval = dev->pdr_head;
 264        tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
 265
 266        if (tmp == dev->pdr_tail)
 267                return ERING_WAS_FULL;
 268
 269        dev->pdr_head = tmp;
 270
 271        return retval;
 272}
 273
 274static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
 275{
 276        struct pd_uinfo *pd_uinfo;
 277        unsigned long flags;
 278
 279        pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
 280                                       sizeof(struct pd_uinfo) * idx);
 281        spin_lock_irqsave(&dev->core_dev->lock, flags);
 282        if (dev->pdr_tail != PPC4XX_LAST_PD)
 283                dev->pdr_tail++;
 284        else
 285                dev->pdr_tail = 0;
 286        pd_uinfo->state = PD_ENTRY_FREE;
 287        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 288
 289        return 0;
 290}
 291
 292static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
 293                                       dma_addr_t *pd_dma, u32 idx)
 294{
 295        *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
 296
 297        return dev->pdr + sizeof(struct ce_pd) * idx;
 298}
 299
 300/**
 301 * alloc memory for the gather ring
 302 * no need to alloc buf for the ring
 303 * gdr_tail, gdr_head and gdr_count are initialized by this function
 304 */
 305static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
 306{
 307        dev->gdr = dma_alloc_coherent(dev->core_dev->device,
 308                                      sizeof(struct ce_gd) * PPC4XX_NUM_GD,
 309                                      &dev->gdr_pa, GFP_ATOMIC);
 310        if (!dev->gdr)
 311                return -ENOMEM;
 312
 313        memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
 314
 315        return 0;
 316}
 317
 318static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
 319{
 320        dma_free_coherent(dev->core_dev->device,
 321                          sizeof(struct ce_gd) * PPC4XX_NUM_GD,
 322                          dev->gdr, dev->gdr_pa);
 323}
 324
 325/*
 326 * when this function is called.
 327 * preemption or interrupt must be disabled
 328 */
 329u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
 330{
 331        u32 retval;
 332        u32 tmp;
 333        if (n >= PPC4XX_NUM_GD)
 334                return ERING_WAS_FULL;
 335
 336        retval = dev->gdr_head;
 337        tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
 338        if (dev->gdr_head > dev->gdr_tail) {
 339                if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
 340                        return ERING_WAS_FULL;
 341        } else if (dev->gdr_head < dev->gdr_tail) {
 342                if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
 343                        return ERING_WAS_FULL;
 344        }
 345        dev->gdr_head = tmp;
 346
 347        return retval;
 348}
 349
 350static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
 351{
 352        unsigned long flags;
 353
 354        spin_lock_irqsave(&dev->core_dev->lock, flags);
 355        if (dev->gdr_tail == dev->gdr_head) {
 356                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 357                return 0;
 358        }
 359
 360        if (dev->gdr_tail != PPC4XX_LAST_GD)
 361                dev->gdr_tail++;
 362        else
 363                dev->gdr_tail = 0;
 364
 365        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 366
 367        return 0;
 368}
 369
 370static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
 371                                              dma_addr_t *gd_dma, u32 idx)
 372{
 373        *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
 374
 375        return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
 376}
 377
 378/**
 379 * alloc memory for the scatter ring
 380 * need to alloc buf for the ring
 381 * sdr_tail, sdr_head and sdr_count are initialized by this function
 382 */
 383static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 384{
 385        int i;
 386        struct ce_sd *sd_array;
 387
 388        /* alloc memory for scatter descriptor ring */
 389        dev->sdr = dma_alloc_coherent(dev->core_dev->device,
 390                                      sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 391                                      &dev->sdr_pa, GFP_ATOMIC);
 392        if (!dev->sdr)
 393                return -ENOMEM;
 394
 395        dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
 396        dev->scatter_buffer_va =
 397                dma_alloc_coherent(dev->core_dev->device,
 398                        dev->scatter_buffer_size * PPC4XX_NUM_SD,
 399                        &dev->scatter_buffer_pa, GFP_ATOMIC);
 400        if (!dev->scatter_buffer_va) {
 401                dma_free_coherent(dev->core_dev->device,
 402                                  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 403                                  dev->sdr, dev->sdr_pa);
 404                return -ENOMEM;
 405        }
 406
 407        sd_array = dev->sdr;
 408
 409        for (i = 0; i < PPC4XX_NUM_SD; i++) {
 410                sd_array[i].ptr = dev->scatter_buffer_pa +
 411                                  dev->scatter_buffer_size * i;
 412        }
 413
 414        return 0;
 415}
 416
 417static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
 418{
 419        if (dev->sdr != NULL)
 420                dma_free_coherent(dev->core_dev->device,
 421                                  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 422                                  dev->sdr, dev->sdr_pa);
 423
 424        if (dev->scatter_buffer_va != NULL)
 425                dma_free_coherent(dev->core_dev->device,
 426                                  dev->scatter_buffer_size * PPC4XX_NUM_SD,
 427                                  dev->scatter_buffer_va,
 428                                  dev->scatter_buffer_pa);
 429}
 430
 431/*
 432 * when this function is called.
 433 * preemption or interrupt must be disabled
 434 */
 435static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
 436{
 437        u32 retval;
 438        u32 tmp;
 439
 440        if (n >= PPC4XX_NUM_SD)
 441                return ERING_WAS_FULL;
 442
 443        retval = dev->sdr_head;
 444        tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
 445        if (dev->sdr_head > dev->gdr_tail) {
 446                if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
 447                        return ERING_WAS_FULL;
 448        } else if (dev->sdr_head < dev->sdr_tail) {
 449                if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
 450                        return ERING_WAS_FULL;
 451        } /* the head = tail, or empty case is already take cared */
 452        dev->sdr_head = tmp;
 453
 454        return retval;
 455}
 456
 457static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
 458{
 459        unsigned long flags;
 460
 461        spin_lock_irqsave(&dev->core_dev->lock, flags);
 462        if (dev->sdr_tail == dev->sdr_head) {
 463                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 464                return 0;
 465        }
 466        if (dev->sdr_tail != PPC4XX_LAST_SD)
 467                dev->sdr_tail++;
 468        else
 469                dev->sdr_tail = 0;
 470        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 471
 472        return 0;
 473}
 474
 475static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
 476                                              dma_addr_t *sd_dma, u32 idx)
 477{
 478        *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
 479
 480        return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
 481}
 482
 483static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
 484                                   dma_addr_t *addr, u32 *length,
 485                                   u32 *idx, u32 *offset, u32 *nbytes)
 486{
 487        u32 len;
 488
 489        if (*length > dev->scatter_buffer_size) {
 490                memcpy(phys_to_virt(*addr),
 491                        dev->scatter_buffer_va +
 492                        *idx * dev->scatter_buffer_size + *offset,
 493                        dev->scatter_buffer_size);
 494                *offset = 0;
 495                *length -= dev->scatter_buffer_size;
 496                *nbytes -= dev->scatter_buffer_size;
 497                if (*idx == PPC4XX_LAST_SD)
 498                        *idx = 0;
 499                else
 500                        (*idx)++;
 501                *addr = *addr +  dev->scatter_buffer_size;
 502                return 1;
 503        } else if (*length < dev->scatter_buffer_size) {
 504                memcpy(phys_to_virt(*addr),
 505                        dev->scatter_buffer_va +
 506                        *idx * dev->scatter_buffer_size + *offset, *length);
 507                if ((*offset + *length) == dev->scatter_buffer_size) {
 508                        if (*idx == PPC4XX_LAST_SD)
 509                                *idx = 0;
 510                        else
 511                                (*idx)++;
 512                        *nbytes -= *length;
 513                        *offset = 0;
 514                } else {
 515                        *nbytes -= *length;
 516                        *offset += *length;
 517                }
 518
 519                return 0;
 520        } else {
 521                len = (*nbytes <= dev->scatter_buffer_size) ?
 522                                (*nbytes) : dev->scatter_buffer_size;
 523                memcpy(phys_to_virt(*addr),
 524                        dev->scatter_buffer_va +
 525                        *idx * dev->scatter_buffer_size + *offset,
 526                        len);
 527                *offset = 0;
 528                *nbytes -= len;
 529
 530                if (*idx == PPC4XX_LAST_SD)
 531                        *idx = 0;
 532                else
 533                        (*idx)++;
 534
 535                return 0;
 536    }
 537}
 538
 539static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
 540                                      struct ce_pd *pd,
 541                                      struct pd_uinfo *pd_uinfo,
 542                                      u32 nbytes,
 543                                      struct scatterlist *dst)
 544{
 545        dma_addr_t addr;
 546        u32 this_sd;
 547        u32 offset;
 548        u32 len;
 549        u32 i;
 550        u32 sg_len;
 551        struct scatterlist *sg;
 552
 553        this_sd = pd_uinfo->first_sd;
 554        offset = 0;
 555        i = 0;
 556
 557        while (nbytes) {
 558                sg = &dst[i];
 559                sg_len = sg->length;
 560                addr = dma_map_page(dev->core_dev->device, sg_page(sg),
 561                                sg->offset, sg->length, DMA_TO_DEVICE);
 562
 563                if (offset == 0) {
 564                        len = (nbytes <= sg->length) ? nbytes : sg->length;
 565                        while (crypto4xx_fill_one_page(dev, &addr, &len,
 566                                &this_sd, &offset, &nbytes))
 567                                ;
 568                        if (!nbytes)
 569                                return;
 570                        i++;
 571                } else {
 572                        len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
 573                                nbytes : (dev->scatter_buffer_size - offset);
 574                        len = (sg->length < len) ? sg->length : len;
 575                        while (crypto4xx_fill_one_page(dev, &addr, &len,
 576                                               &this_sd, &offset, &nbytes))
 577                                ;
 578                        if (!nbytes)
 579                                return;
 580                        sg_len -= len;
 581                        if (sg_len) {
 582                                addr += len;
 583                                while (crypto4xx_fill_one_page(dev, &addr,
 584                                        &sg_len, &this_sd, &offset, &nbytes))
 585                                        ;
 586                        }
 587                        i++;
 588                }
 589        }
 590}
 591
 592static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
 593                                        struct crypto4xx_ctx *ctx)
 594{
 595        struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
 596        struct sa_state_record *state_record =
 597                                (struct sa_state_record *) pd_uinfo->sr_va;
 598
 599        if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
 600                memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
 601                       SA_HASH_ALG_SHA1_DIGEST_SIZE);
 602        }
 603
 604        return 0;
 605}
 606
 607static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
 608                                  struct pd_uinfo *pd_uinfo)
 609{
 610        int i;
 611        if (pd_uinfo->num_gd) {
 612                for (i = 0; i < pd_uinfo->num_gd; i++)
 613                        crypto4xx_put_gd_to_gdr(dev);
 614                pd_uinfo->first_gd = 0xffffffff;
 615                pd_uinfo->num_gd = 0;
 616        }
 617        if (pd_uinfo->num_sd) {
 618                for (i = 0; i < pd_uinfo->num_sd; i++)
 619                        crypto4xx_put_sd_to_sdr(dev);
 620
 621                pd_uinfo->first_sd = 0xffffffff;
 622                pd_uinfo->num_sd = 0;
 623        }
 624}
 625
 626static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
 627                                     struct pd_uinfo *pd_uinfo,
 628                                     struct ce_pd *pd)
 629{
 630        struct crypto4xx_ctx *ctx;
 631        struct ablkcipher_request *ablk_req;
 632        struct scatterlist *dst;
 633        dma_addr_t addr;
 634
 635        ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
 636        ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
 637
 638        if (pd_uinfo->using_sd) {
 639                crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
 640                                          ablk_req->dst);
 641        } else {
 642                dst = pd_uinfo->dest_va;
 643                addr = dma_map_page(dev->core_dev->device, sg_page(dst),
 644                                    dst->offset, dst->length, DMA_FROM_DEVICE);
 645        }
 646        crypto4xx_ret_sg_desc(dev, pd_uinfo);
 647        if (ablk_req->base.complete != NULL)
 648                ablk_req->base.complete(&ablk_req->base, 0);
 649
 650        return 0;
 651}
 652
 653static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
 654                                struct pd_uinfo *pd_uinfo)
 655{
 656        struct crypto4xx_ctx *ctx;
 657        struct ahash_request *ahash_req;
 658
 659        ahash_req = ahash_request_cast(pd_uinfo->async_req);
 660        ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
 661
 662        crypto4xx_copy_digest_to_dst(pd_uinfo,
 663                                     crypto_tfm_ctx(ahash_req->base.tfm));
 664        crypto4xx_ret_sg_desc(dev, pd_uinfo);
 665        /* call user provided callback function x */
 666        if (ahash_req->base.complete != NULL)
 667                ahash_req->base.complete(&ahash_req->base, 0);
 668
 669        return 0;
 670}
 671
 672static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
 673{
 674        struct ce_pd *pd;
 675        struct pd_uinfo *pd_uinfo;
 676
 677        pd =  dev->pdr + sizeof(struct ce_pd)*idx;
 678        pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
 679        if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
 680                        CRYPTO_ALG_TYPE_ABLKCIPHER)
 681                return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
 682        else
 683                return crypto4xx_ahash_done(dev, pd_uinfo);
 684}
 685
 686/**
 687 * Note: Only use this function to copy items that is word aligned.
 688 */
 689void crypto4xx_memcpy_le(unsigned int *dst,
 690                         const unsigned char *buf,
 691                         int len)
 692{
 693        u8 *tmp;
 694        for (; len >= 4; buf += 4, len -= 4)
 695                *dst++ = cpu_to_le32(*(unsigned int *) buf);
 696
 697        tmp = (u8 *)dst;
 698        switch (len) {
 699        case 3:
 700                *tmp++ = 0;
 701                *tmp++ = *(buf+2);
 702                *tmp++ = *(buf+1);
 703                *tmp++ = *buf;
 704                break;
 705        case 2:
 706                *tmp++ = 0;
 707                *tmp++ = 0;
 708                *tmp++ = *(buf+1);
 709                *tmp++ = *buf;
 710                break;
 711        case 1:
 712                *tmp++ = 0;
 713                *tmp++ = 0;
 714                *tmp++ = 0;
 715                *tmp++ = *buf;
 716                break;
 717        default:
 718                break;
 719        }
 720}
 721
 722static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
 723{
 724        crypto4xx_destroy_pdr(core_dev->dev);
 725        crypto4xx_destroy_gdr(core_dev->dev);
 726        crypto4xx_destroy_sdr(core_dev->dev);
 727        iounmap(core_dev->dev->ce_base);
 728        kfree(core_dev->dev);
 729        kfree(core_dev);
 730}
 731
 732void crypto4xx_return_pd(struct crypto4xx_device *dev,
 733                         u32 pd_entry, struct ce_pd *pd,
 734                         struct pd_uinfo *pd_uinfo)
 735{
 736        /* irq should be already disabled */
 737        dev->pdr_head = pd_entry;
 738        pd->pd_ctl.w = 0;
 739        pd->pd_ctl_len.w = 0;
 740        pd_uinfo->state = PD_ENTRY_FREE;
 741}
 742
 743static u32 get_next_gd(u32 current)
 744{
 745        if (current != PPC4XX_LAST_GD)
 746                return current + 1;
 747        else
 748                return 0;
 749}
 750
 751static u32 get_next_sd(u32 current)
 752{
 753        if (current != PPC4XX_LAST_SD)
 754                return current + 1;
 755        else
 756                return 0;
 757}
 758
 759u32 crypto4xx_build_pd(struct crypto_async_request *req,
 760                       struct crypto4xx_ctx *ctx,
 761                       struct scatterlist *src,
 762                       struct scatterlist *dst,
 763                       unsigned int datalen,
 764                       void *iv, u32 iv_len)
 765{
 766        struct crypto4xx_device *dev = ctx->dev;
 767        dma_addr_t addr, pd_dma, sd_dma, gd_dma;
 768        struct dynamic_sa_ctl *sa;
 769        struct scatterlist *sg;
 770        struct ce_gd *gd;
 771        struct ce_pd *pd;
 772        u32 num_gd, num_sd;
 773        u32 fst_gd = 0xffffffff;
 774        u32 fst_sd = 0xffffffff;
 775        u32 pd_entry;
 776        unsigned long flags;
 777        struct pd_uinfo *pd_uinfo = NULL;
 778        unsigned int nbytes = datalen, idx;
 779        unsigned int ivlen = 0;
 780        u32 gd_idx = 0;
 781
 782        /* figure how many gd is needed */
 783        num_gd = sg_nents_for_len(src, datalen);
 784        if ((int)num_gd < 0) {
 785                dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
 786                return -EINVAL;
 787        }
 788        if (num_gd == 1)
 789                num_gd = 0;
 790
 791        /* figure how many sd is needed */
 792        if (sg_is_last(dst) || ctx->is_hash) {
 793                num_sd = 0;
 794        } else {
 795                if (datalen > PPC4XX_SD_BUFFER_SIZE) {
 796                        num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
 797                        if (datalen % PPC4XX_SD_BUFFER_SIZE)
 798                                num_sd++;
 799                } else {
 800                        num_sd = 1;
 801                }
 802        }
 803
 804        /*
 805         * The follow section of code needs to be protected
 806         * The gather ring and scatter ring needs to be consecutive
 807         * In case of run out of any kind of descriptor, the descriptor
 808         * already got must be return the original place.
 809         */
 810        spin_lock_irqsave(&dev->core_dev->lock, flags);
 811        if (num_gd) {
 812                fst_gd = crypto4xx_get_n_gd(dev, num_gd);
 813                if (fst_gd == ERING_WAS_FULL) {
 814                        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 815                        return -EAGAIN;
 816                }
 817        }
 818        if (num_sd) {
 819                fst_sd = crypto4xx_get_n_sd(dev, num_sd);
 820                if (fst_sd == ERING_WAS_FULL) {
 821                        if (num_gd)
 822                                dev->gdr_head = fst_gd;
 823                        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 824                        return -EAGAIN;
 825                }
 826        }
 827        pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
 828        if (pd_entry == ERING_WAS_FULL) {
 829                if (num_gd)
 830                        dev->gdr_head = fst_gd;
 831                if (num_sd)
 832                        dev->sdr_head = fst_sd;
 833                spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 834                return -EAGAIN;
 835        }
 836        spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 837
 838        pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
 839                                       sizeof(struct pd_uinfo) * pd_entry);
 840        pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
 841        pd_uinfo->async_req = req;
 842        pd_uinfo->num_gd = num_gd;
 843        pd_uinfo->num_sd = num_sd;
 844
 845        if (iv_len || ctx->is_hash) {
 846                ivlen = iv_len;
 847                pd->sa = pd_uinfo->sa_pa;
 848                sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
 849                if (ctx->direction == DIR_INBOUND)
 850                        memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
 851                else
 852                        memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
 853
 854                memcpy((void *) sa + ctx->offset_to_sr_ptr,
 855                        &pd_uinfo->sr_pa, 4);
 856
 857                if (iv_len)
 858                        crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
 859        } else {
 860                if (ctx->direction == DIR_INBOUND) {
 861                        pd->sa = ctx->sa_in_dma_addr;
 862                        sa = (struct dynamic_sa_ctl *) ctx->sa_in;
 863                } else {
 864                        pd->sa = ctx->sa_out_dma_addr;
 865                        sa = (struct dynamic_sa_ctl *) ctx->sa_out;
 866                }
 867        }
 868        pd->sa_len = ctx->sa_len;
 869        if (num_gd) {
 870                /* get first gd we are going to use */
 871                gd_idx = fst_gd;
 872                pd_uinfo->first_gd = fst_gd;
 873                pd_uinfo->num_gd = num_gd;
 874                gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 875                pd->src = gd_dma;
 876                /* enable gather */
 877                sa->sa_command_0.bf.gather = 1;
 878                idx = 0;
 879                src = &src[0];
 880                /* walk the sg, and setup gather array */
 881                while (nbytes) {
 882                        sg = &src[idx];
 883                        addr = dma_map_page(dev->core_dev->device, sg_page(sg),
 884                                    sg->offset, sg->length, DMA_TO_DEVICE);
 885                        gd->ptr = addr;
 886                        gd->ctl_len.len = sg->length;
 887                        gd->ctl_len.done = 0;
 888                        gd->ctl_len.ready = 1;
 889                        if (sg->length >= nbytes)
 890                                break;
 891                        nbytes -= sg->length;
 892                        gd_idx = get_next_gd(gd_idx);
 893                        gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 894                        idx++;
 895                }
 896        } else {
 897                pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
 898                                src->offset, src->length, DMA_TO_DEVICE);
 899                /*
 900                 * Disable gather in sa command
 901                 */
 902                sa->sa_command_0.bf.gather = 0;
 903                /*
 904                 * Indicate gather array is not used
 905                 */
 906                pd_uinfo->first_gd = 0xffffffff;
 907                pd_uinfo->num_gd = 0;
 908        }
 909        if (ctx->is_hash || sg_is_last(dst)) {
 910                /*
 911                 * we know application give us dst a whole piece of memory
 912                 * no need to use scatter ring.
 913                 * In case of is_hash, the icv is always at end of src data.
 914                 */
 915                pd_uinfo->using_sd = 0;
 916                pd_uinfo->first_sd = 0xffffffff;
 917                pd_uinfo->num_sd = 0;
 918                pd_uinfo->dest_va = dst;
 919                sa->sa_command_0.bf.scatter = 0;
 920                if (ctx->is_hash)
 921                        pd->dest = virt_to_phys((void *)dst);
 922                else
 923                        pd->dest = (u32)dma_map_page(dev->core_dev->device,
 924                                        sg_page(dst), dst->offset,
 925                                        dst->length, DMA_TO_DEVICE);
 926        } else {
 927                struct ce_sd *sd = NULL;
 928                u32 sd_idx = fst_sd;
 929                nbytes = datalen;
 930                sa->sa_command_0.bf.scatter = 1;
 931                pd_uinfo->using_sd = 1;
 932                pd_uinfo->dest_va = dst;
 933                pd_uinfo->first_sd = fst_sd;
 934                pd_uinfo->num_sd = num_sd;
 935                sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 936                pd->dest = sd_dma;
 937                /* setup scatter descriptor */
 938                sd->ctl.done = 0;
 939                sd->ctl.rdy = 1;
 940                /* sd->ptr should be setup by sd_init routine*/
 941                idx = 0;
 942                if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
 943                        nbytes -= PPC4XX_SD_BUFFER_SIZE;
 944                else
 945                        nbytes = 0;
 946                while (nbytes) {
 947                        sd_idx = get_next_sd(sd_idx);
 948                        sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 949                        /* setup scatter descriptor */
 950                        sd->ctl.done = 0;
 951                        sd->ctl.rdy = 1;
 952                        if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
 953                                nbytes -= PPC4XX_SD_BUFFER_SIZE;
 954                        else
 955                                /*
 956                                 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
 957                                 * which is more than nbytes, so done.
 958                                 */
 959                                nbytes = 0;
 960                }
 961        }
 962
 963        sa->sa_command_1.bf.hash_crypto_offset = 0;
 964        pd->pd_ctl.w = ctx->pd_ctl;
 965        pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
 966        pd_uinfo->state = PD_ENTRY_INUSE;
 967        wmb();
 968        /* write any value to push engine to read a pd */
 969        writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
 970        return -EINPROGRESS;
 971}
 972
 973/**
 974 * Algorithm Registration Functions
 975 */
 976static int crypto4xx_alg_init(struct crypto_tfm *tfm)
 977{
 978        struct crypto_alg *alg = tfm->__crt_alg;
 979        struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
 980        struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
 981
 982        ctx->dev = amcc_alg->dev;
 983        ctx->sa_in = NULL;
 984        ctx->sa_out = NULL;
 985        ctx->sa_in_dma_addr = 0;
 986        ctx->sa_out_dma_addr = 0;
 987        ctx->sa_len = 0;
 988
 989        switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 990        default:
 991                tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
 992                break;
 993        case CRYPTO_ALG_TYPE_AHASH:
 994                crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 995                                         sizeof(struct crypto4xx_ctx));
 996                break;
 997        }
 998
 999        return 0;
1000}
1001
1002static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1003{
1004        struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1005
1006        crypto4xx_free_sa(ctx);
1007        crypto4xx_free_state_record(ctx);
1008}
1009
1010int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1011                           struct crypto4xx_alg_common *crypto_alg,
1012                           int array_size)
1013{
1014        struct crypto4xx_alg *alg;
1015        int i;
1016        int rc = 0;
1017
1018        for (i = 0; i < array_size; i++) {
1019                alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1020                if (!alg)
1021                        return -ENOMEM;
1022
1023                alg->alg = crypto_alg[i];
1024                alg->dev = sec_dev;
1025
1026                switch (alg->alg.type) {
1027                case CRYPTO_ALG_TYPE_AHASH:
1028                        rc = crypto_register_ahash(&alg->alg.u.hash);
1029                        break;
1030
1031                default:
1032                        rc = crypto_register_alg(&alg->alg.u.cipher);
1033                        break;
1034                }
1035
1036                if (rc) {
1037                        list_del(&alg->entry);
1038                        kfree(alg);
1039                } else {
1040                        list_add_tail(&alg->entry, &sec_dev->alg_list);
1041                }
1042        }
1043
1044        return 0;
1045}
1046
1047static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1048{
1049        struct crypto4xx_alg *alg, *tmp;
1050
1051        list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1052                list_del(&alg->entry);
1053                switch (alg->alg.type) {
1054                case CRYPTO_ALG_TYPE_AHASH:
1055                        crypto_unregister_ahash(&alg->alg.u.hash);
1056                        break;
1057
1058                default:
1059                        crypto_unregister_alg(&alg->alg.u.cipher);
1060                }
1061                kfree(alg);
1062        }
1063}
1064
1065static void crypto4xx_bh_tasklet_cb(unsigned long data)
1066{
1067        struct device *dev = (struct device *)data;
1068        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1069        struct pd_uinfo *pd_uinfo;
1070        struct ce_pd *pd;
1071        u32 tail;
1072
1073        while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1074                tail = core_dev->dev->pdr_tail;
1075                pd_uinfo = core_dev->dev->pdr_uinfo +
1076                        sizeof(struct pd_uinfo)*tail;
1077                pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1078                if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1079                                   pd->pd_ctl.bf.pe_done &&
1080                                   !pd->pd_ctl.bf.host_ready) {
1081                        pd->pd_ctl.bf.pe_done = 0;
1082                        crypto4xx_pd_done(core_dev->dev, tail);
1083                        crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1084                        pd_uinfo->state = PD_ENTRY_FREE;
1085                } else {
1086                        /* if tail not done, break */
1087                        break;
1088                }
1089        }
1090}
1091
1092/**
1093 * Top Half of isr.
1094 */
1095static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1096{
1097        struct device *dev = (struct device *)data;
1098        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1099
1100        if (!core_dev->dev->ce_base)
1101                return 0;
1102
1103        writel(PPC4XX_INTERRUPT_CLR,
1104               core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1105        tasklet_schedule(&core_dev->tasklet);
1106
1107        return IRQ_HANDLED;
1108}
1109
1110/**
1111 * Supported Crypto Algorithms
1112 */
1113struct crypto4xx_alg_common crypto4xx_alg[] = {
1114        /* Crypto AES modes */
1115        { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1116                .cra_name       = "cbc(aes)",
1117                .cra_driver_name = "cbc-aes-ppc4xx",
1118                .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1119                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1120                .cra_blocksize  = AES_BLOCK_SIZE,
1121                .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1122                .cra_type       = &crypto_ablkcipher_type,
1123                .cra_init       = crypto4xx_alg_init,
1124                .cra_exit       = crypto4xx_alg_exit,
1125                .cra_module     = THIS_MODULE,
1126                .cra_u          = {
1127                        .ablkcipher = {
1128                                .min_keysize    = AES_MIN_KEY_SIZE,
1129                                .max_keysize    = AES_MAX_KEY_SIZE,
1130                                .ivsize         = AES_IV_SIZE,
1131                                .setkey         = crypto4xx_setkey_aes_cbc,
1132                                .encrypt        = crypto4xx_encrypt,
1133                                .decrypt        = crypto4xx_decrypt,
1134                        }
1135                }
1136        }},
1137};
1138
1139/**
1140 * Module Initialization Routine
1141 */
1142static int crypto4xx_probe(struct platform_device *ofdev)
1143{
1144        int rc;
1145        struct resource res;
1146        struct device *dev = &ofdev->dev;
1147        struct crypto4xx_core_device *core_dev;
1148
1149        rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1150        if (rc)
1151                return -ENODEV;
1152
1153        if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1154                mtdcri(SDR0, PPC460EX_SDR0_SRST,
1155                       mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1156                mtdcri(SDR0, PPC460EX_SDR0_SRST,
1157                       mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1158        } else if (of_find_compatible_node(NULL, NULL,
1159                        "amcc,ppc405ex-crypto")) {
1160                mtdcri(SDR0, PPC405EX_SDR0_SRST,
1161                       mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1162                mtdcri(SDR0, PPC405EX_SDR0_SRST,
1163                       mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1164        } else if (of_find_compatible_node(NULL, NULL,
1165                        "amcc,ppc460sx-crypto")) {
1166                mtdcri(SDR0, PPC460SX_SDR0_SRST,
1167                       mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1168                mtdcri(SDR0, PPC460SX_SDR0_SRST,
1169                       mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1170        } else {
1171                printk(KERN_ERR "Crypto Function Not supported!\n");
1172                return -EINVAL;
1173        }
1174
1175        core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1176        if (!core_dev)
1177                return -ENOMEM;
1178
1179        dev_set_drvdata(dev, core_dev);
1180        core_dev->ofdev = ofdev;
1181        core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1182        if (!core_dev->dev)
1183                goto err_alloc_dev;
1184
1185        core_dev->dev->core_dev = core_dev;
1186        core_dev->device = dev;
1187        spin_lock_init(&core_dev->lock);
1188        INIT_LIST_HEAD(&core_dev->dev->alg_list);
1189        rc = crypto4xx_build_pdr(core_dev->dev);
1190        if (rc)
1191                goto err_build_pdr;
1192
1193        rc = crypto4xx_build_gdr(core_dev->dev);
1194        if (rc)
1195                goto err_build_gdr;
1196
1197        rc = crypto4xx_build_sdr(core_dev->dev);
1198        if (rc)
1199                goto err_build_sdr;
1200
1201        /* Init tasklet for bottom half processing */
1202        tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1203                     (unsigned long) dev);
1204
1205        /* Register for Crypto isr, Crypto Engine IRQ */
1206        core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1207        rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1208                         core_dev->dev->name, dev);
1209        if (rc)
1210                goto err_request_irq;
1211
1212        core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1213        if (!core_dev->dev->ce_base) {
1214                dev_err(dev, "failed to of_iomap\n");
1215                rc = -ENOMEM;
1216                goto err_iomap;
1217        }
1218
1219        /* need to setup pdr, rdr, gdr and sdr before this */
1220        crypto4xx_hw_init(core_dev->dev);
1221
1222        /* Register security algorithms with Linux CryptoAPI */
1223        rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1224                               ARRAY_SIZE(crypto4xx_alg));
1225        if (rc)
1226                goto err_start_dev;
1227
1228        ppc4xx_trng_probe(core_dev);
1229        return 0;
1230
1231err_start_dev:
1232        iounmap(core_dev->dev->ce_base);
1233err_iomap:
1234        free_irq(core_dev->irq, dev);
1235err_request_irq:
1236        irq_dispose_mapping(core_dev->irq);
1237        tasklet_kill(&core_dev->tasklet);
1238        crypto4xx_destroy_sdr(core_dev->dev);
1239err_build_sdr:
1240        crypto4xx_destroy_gdr(core_dev->dev);
1241err_build_gdr:
1242        crypto4xx_destroy_pdr(core_dev->dev);
1243err_build_pdr:
1244        kfree(core_dev->dev);
1245err_alloc_dev:
1246        kfree(core_dev);
1247
1248        return rc;
1249}
1250
1251static int crypto4xx_remove(struct platform_device *ofdev)
1252{
1253        struct device *dev = &ofdev->dev;
1254        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1255
1256        ppc4xx_trng_remove(core_dev);
1257
1258        free_irq(core_dev->irq, dev);
1259        irq_dispose_mapping(core_dev->irq);
1260
1261        tasklet_kill(&core_dev->tasklet);
1262        /* Un-register with Linux CryptoAPI */
1263        crypto4xx_unregister_alg(core_dev->dev);
1264        /* Free all allocated memory */
1265        crypto4xx_stop_all(core_dev);
1266
1267        return 0;
1268}
1269
1270static const struct of_device_id crypto4xx_match[] = {
1271        { .compatible      = "amcc,ppc4xx-crypto",},
1272        { },
1273};
1274MODULE_DEVICE_TABLE(of, crypto4xx_match);
1275
1276static struct platform_driver crypto4xx_driver = {
1277        .driver = {
1278                .name = MODULE_NAME,
1279                .of_match_table = crypto4xx_match,
1280        },
1281        .probe          = crypto4xx_probe,
1282        .remove         = crypto4xx_remove,
1283};
1284
1285module_platform_driver(crypto4xx_driver);
1286
1287MODULE_LICENSE("GPL");
1288MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1289MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1290