linux/drivers/crypto/ccp/ccp-dev.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel.h>
  16#include <linux/kthread.h>
  17#include <linux/sched.h>
  18#include <linux/interrupt.h>
  19#include <linux/spinlock.h>
  20#include <linux/spinlock_types.h>
  21#include <linux/types.h>
  22#include <linux/mutex.h>
  23#include <linux/delay.h>
  24#include <linux/hw_random.h>
  25#include <linux/cpu.h>
  26#ifdef CONFIG_X86
  27#include <asm/cpu_device_id.h>
  28#endif
  29#include <linux/ccp.h>
  30
  31#include "ccp-dev.h"
  32
  33MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  34MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
  35MODULE_LICENSE("GPL");
  36MODULE_VERSION("1.1.0");
  37MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
  38
  39struct ccp_tasklet_data {
  40        struct completion completion;
  41        struct ccp_cmd *cmd;
  42};
  43
  44/* Human-readable error strings */
  45static char *ccp_error_codes[] = {
  46        "",
  47        "ERR 01: ILLEGAL_ENGINE",
  48        "ERR 02: ILLEGAL_KEY_ID",
  49        "ERR 03: ILLEGAL_FUNCTION_TYPE",
  50        "ERR 04: ILLEGAL_FUNCTION_MODE",
  51        "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
  52        "ERR 06: ILLEGAL_FUNCTION_SIZE",
  53        "ERR 07: Zlib_MISSING_INIT_EOM",
  54        "ERR 08: ILLEGAL_FUNCTION_RSVD",
  55        "ERR 09: ILLEGAL_BUFFER_LENGTH",
  56        "ERR 10: VLSB_FAULT",
  57        "ERR 11: ILLEGAL_MEM_ADDR",
  58        "ERR 12: ILLEGAL_MEM_SEL",
  59        "ERR 13: ILLEGAL_CONTEXT_ID",
  60        "ERR 14: ILLEGAL_KEY_ADDR",
  61        "ERR 15: 0xF Reserved",
  62        "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
  63        "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
  64        "ERR 18: CMD_TIMEOUT",
  65        "ERR 19: IDMA0_AXI_SLVERR",
  66        "ERR 20: IDMA0_AXI_DECERR",
  67        "ERR 21: 0x15 Reserved",
  68        "ERR 22: IDMA1_AXI_SLAVE_FAULT",
  69        "ERR 23: IDMA1_AIXI_DECERR",
  70        "ERR 24: 0x18 Reserved",
  71        "ERR 25: ZLIBVHB_AXI_SLVERR",
  72        "ERR 26: ZLIBVHB_AXI_DECERR",
  73        "ERR 27: 0x1B Reserved",
  74        "ERR 27: ZLIB_UNEXPECTED_EOM",
  75        "ERR 27: ZLIB_EXTRA_DATA",
  76        "ERR 30: ZLIB_BTYPE",
  77        "ERR 31: ZLIB_UNDEFINED_SYMBOL",
  78        "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
  79        "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
  80        "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
  81        "ERR 35: ZLIB_UNCOMPRESSED_LEN",
  82        "ERR 36: ZLIB_LIMIT_REACHED",
  83        "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
  84        "ERR 38: ODMA0_AXI_SLVERR",
  85        "ERR 39: ODMA0_AXI_DECERR",
  86        "ERR 40: 0x28 Reserved",
  87        "ERR 41: ODMA1_AXI_SLVERR",
  88        "ERR 42: ODMA1_AXI_DECERR",
  89        "ERR 43: LSB_PARITY_ERR",
  90};
  91
  92void ccp_log_error(struct ccp_device *d, int e)
  93{
  94        dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
  95}
  96
  97/* List of CCPs, CCP count, read-write access lock, and access functions
  98 *
  99 * Lock structure: get ccp_unit_lock for reading whenever we need to
 100 * examine the CCP list. While holding it for reading we can acquire
 101 * the RR lock to update the round-robin next-CCP pointer. The unit lock
 102 * must be acquired before the RR lock.
 103 *
 104 * If the unit-lock is acquired for writing, we have total control over
 105 * the list, so there's no value in getting the RR lock.
 106 */
 107static DEFINE_RWLOCK(ccp_unit_lock);
 108static LIST_HEAD(ccp_units);
 109
 110/* Round-robin counter */
 111static DEFINE_SPINLOCK(ccp_rr_lock);
 112static struct ccp_device *ccp_rr;
 113
 114/* Ever-increasing value to produce unique unit numbers */
 115static atomic_t ccp_unit_ordinal;
 116static unsigned int ccp_increment_unit_ordinal(void)
 117{
 118        return atomic_inc_return(&ccp_unit_ordinal);
 119}
 120
 121/**
 122 * ccp_add_device - add a CCP device to the list
 123 *
 124 * @ccp: ccp_device struct pointer
 125 *
 126 * Put this CCP on the unit list, which makes it available
 127 * for use.
 128 *
 129 * Returns zero if a CCP device is present, -ENODEV otherwise.
 130 */
 131void ccp_add_device(struct ccp_device *ccp)
 132{
 133        unsigned long flags;
 134
 135        write_lock_irqsave(&ccp_unit_lock, flags);
 136        list_add_tail(&ccp->entry, &ccp_units);
 137        if (!ccp_rr)
 138                /* We already have the list lock (we're first) so this
 139                 * pointer can't change on us. Set its initial value.
 140                 */
 141                ccp_rr = ccp;
 142        write_unlock_irqrestore(&ccp_unit_lock, flags);
 143}
 144
 145/**
 146 * ccp_del_device - remove a CCP device from the list
 147 *
 148 * @ccp: ccp_device struct pointer
 149 *
 150 * Remove this unit from the list of devices. If the next device
 151 * up for use is this one, adjust the pointer. If this is the last
 152 * device, NULL the pointer.
 153 */
 154void ccp_del_device(struct ccp_device *ccp)
 155{
 156        unsigned long flags;
 157
 158        write_lock_irqsave(&ccp_unit_lock, flags);
 159        if (ccp_rr == ccp) {
 160                /* ccp_unit_lock is read/write; any read access
 161                 * will be suspended while we make changes to the
 162                 * list and RR pointer.
 163                 */
 164                if (list_is_last(&ccp_rr->entry, &ccp_units))
 165                        ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
 166                                                  entry);
 167                else
 168                        ccp_rr = list_next_entry(ccp_rr, entry);
 169        }
 170        list_del(&ccp->entry);
 171        if (list_empty(&ccp_units))
 172                ccp_rr = NULL;
 173        write_unlock_irqrestore(&ccp_unit_lock, flags);
 174}
 175
 176
 177
 178int ccp_register_rng(struct ccp_device *ccp)
 179{
 180        int ret = 0;
 181
 182        dev_dbg(ccp->dev, "Registering RNG...\n");
 183        /* Register an RNG */
 184        ccp->hwrng.name = ccp->rngname;
 185        ccp->hwrng.read = ccp_trng_read;
 186        ret = hwrng_register(&ccp->hwrng);
 187        if (ret)
 188                dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
 189
 190        return ret;
 191}
 192
 193void ccp_unregister_rng(struct ccp_device *ccp)
 194{
 195        if (ccp->hwrng.name)
 196                hwrng_unregister(&ccp->hwrng);
 197}
 198
 199static struct ccp_device *ccp_get_device(void)
 200{
 201        unsigned long flags;
 202        struct ccp_device *dp = NULL;
 203
 204        /* We round-robin through the unit list.
 205         * The (ccp_rr) pointer refers to the next unit to use.
 206         */
 207        read_lock_irqsave(&ccp_unit_lock, flags);
 208        if (!list_empty(&ccp_units)) {
 209                spin_lock(&ccp_rr_lock);
 210                dp = ccp_rr;
 211                if (list_is_last(&ccp_rr->entry, &ccp_units))
 212                        ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
 213                                                  entry);
 214                else
 215                        ccp_rr = list_next_entry(ccp_rr, entry);
 216                spin_unlock(&ccp_rr_lock);
 217        }
 218        read_unlock_irqrestore(&ccp_unit_lock, flags);
 219
 220        return dp;
 221}
 222
 223/**
 224 * ccp_present - check if a CCP device is present
 225 *
 226 * Returns zero if a CCP device is present, -ENODEV otherwise.
 227 */
 228int ccp_present(void)
 229{
 230        unsigned long flags;
 231        int ret;
 232
 233        read_lock_irqsave(&ccp_unit_lock, flags);
 234        ret = list_empty(&ccp_units);
 235        read_unlock_irqrestore(&ccp_unit_lock, flags);
 236
 237        return ret ? -ENODEV : 0;
 238}
 239EXPORT_SYMBOL_GPL(ccp_present);
 240
 241/**
 242 * ccp_version - get the version of the CCP device
 243 *
 244 * Returns the version from the first unit on the list;
 245 * otherwise a zero if no CCP device is present
 246 */
 247unsigned int ccp_version(void)
 248{
 249        struct ccp_device *dp;
 250        unsigned long flags;
 251        int ret = 0;
 252
 253        read_lock_irqsave(&ccp_unit_lock, flags);
 254        if (!list_empty(&ccp_units)) {
 255                dp = list_first_entry(&ccp_units, struct ccp_device, entry);
 256                ret = dp->vdata->version;
 257        }
 258        read_unlock_irqrestore(&ccp_unit_lock, flags);
 259
 260        return ret;
 261}
 262EXPORT_SYMBOL_GPL(ccp_version);
 263
 264/**
 265 * ccp_enqueue_cmd - queue an operation for processing by the CCP
 266 *
 267 * @cmd: ccp_cmd struct to be processed
 268 *
 269 * Queue a cmd to be processed by the CCP. If queueing the cmd
 270 * would exceed the defined length of the cmd queue the cmd will
 271 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
 272 * result in a return code of -EBUSY.
 273 *
 274 * The callback routine specified in the ccp_cmd struct will be
 275 * called to notify the caller of completion (if the cmd was not
 276 * backlogged) or advancement out of the backlog. If the cmd has
 277 * advanced out of the backlog the "err" value of the callback
 278 * will be -EINPROGRESS. Any other "err" value during callback is
 279 * the result of the operation.
 280 *
 281 * The cmd has been successfully queued if:
 282 *   the return code is -EINPROGRESS or
 283 *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
 284 */
 285int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 286{
 287        struct ccp_device *ccp;
 288        unsigned long flags;
 289        unsigned int i;
 290        int ret;
 291
 292        /* Some commands might need to be sent to a specific device */
 293        ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
 294
 295        if (!ccp)
 296                return -ENODEV;
 297
 298        /* Caller must supply a callback routine */
 299        if (!cmd->callback)
 300                return -EINVAL;
 301
 302        cmd->ccp = ccp;
 303
 304        spin_lock_irqsave(&ccp->cmd_lock, flags);
 305
 306        i = ccp->cmd_q_count;
 307
 308        if (ccp->cmd_count >= MAX_CMD_QLEN) {
 309                ret = -EBUSY;
 310                if (cmd->flags & CCP_CMD_MAY_BACKLOG)
 311                        list_add_tail(&cmd->entry, &ccp->backlog);
 312        } else {
 313                ret = -EINPROGRESS;
 314                ccp->cmd_count++;
 315                list_add_tail(&cmd->entry, &ccp->cmd);
 316
 317                /* Find an idle queue */
 318                if (!ccp->suspending) {
 319                        for (i = 0; i < ccp->cmd_q_count; i++) {
 320                                if (ccp->cmd_q[i].active)
 321                                        continue;
 322
 323                                break;
 324                        }
 325                }
 326        }
 327
 328        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 329
 330        /* If we found an idle queue, wake it up */
 331        if (i < ccp->cmd_q_count)
 332                wake_up_process(ccp->cmd_q[i].kthread);
 333
 334        return ret;
 335}
 336EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
 337
 338static void ccp_do_cmd_backlog(struct work_struct *work)
 339{
 340        struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
 341        struct ccp_device *ccp = cmd->ccp;
 342        unsigned long flags;
 343        unsigned int i;
 344
 345        cmd->callback(cmd->data, -EINPROGRESS);
 346
 347        spin_lock_irqsave(&ccp->cmd_lock, flags);
 348
 349        ccp->cmd_count++;
 350        list_add_tail(&cmd->entry, &ccp->cmd);
 351
 352        /* Find an idle queue */
 353        for (i = 0; i < ccp->cmd_q_count; i++) {
 354                if (ccp->cmd_q[i].active)
 355                        continue;
 356
 357                break;
 358        }
 359
 360        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 361
 362        /* If we found an idle queue, wake it up */
 363        if (i < ccp->cmd_q_count)
 364                wake_up_process(ccp->cmd_q[i].kthread);
 365}
 366
 367static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
 368{
 369        struct ccp_device *ccp = cmd_q->ccp;
 370        struct ccp_cmd *cmd = NULL;
 371        struct ccp_cmd *backlog = NULL;
 372        unsigned long flags;
 373
 374        spin_lock_irqsave(&ccp->cmd_lock, flags);
 375
 376        cmd_q->active = 0;
 377
 378        if (ccp->suspending) {
 379                cmd_q->suspended = 1;
 380
 381                spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 382                wake_up_interruptible(&ccp->suspend_queue);
 383
 384                return NULL;
 385        }
 386
 387        if (ccp->cmd_count) {
 388                cmd_q->active = 1;
 389
 390                cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
 391                list_del(&cmd->entry);
 392
 393                ccp->cmd_count--;
 394        }
 395
 396        if (!list_empty(&ccp->backlog)) {
 397                backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
 398                                           entry);
 399                list_del(&backlog->entry);
 400        }
 401
 402        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 403
 404        if (backlog) {
 405                INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
 406                schedule_work(&backlog->work);
 407        }
 408
 409        return cmd;
 410}
 411
 412static void ccp_do_cmd_complete(unsigned long data)
 413{
 414        struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
 415        struct ccp_cmd *cmd = tdata->cmd;
 416
 417        cmd->callback(cmd->data, cmd->ret);
 418        complete(&tdata->completion);
 419}
 420
 421/**
 422 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
 423 *
 424 * @data: thread-specific data
 425 */
 426int ccp_cmd_queue_thread(void *data)
 427{
 428        struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
 429        struct ccp_cmd *cmd;
 430        struct ccp_tasklet_data tdata;
 431        struct tasklet_struct tasklet;
 432
 433        tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
 434
 435        set_current_state(TASK_INTERRUPTIBLE);
 436        while (!kthread_should_stop()) {
 437                schedule();
 438
 439                set_current_state(TASK_INTERRUPTIBLE);
 440
 441                cmd = ccp_dequeue_cmd(cmd_q);
 442                if (!cmd)
 443                        continue;
 444
 445                __set_current_state(TASK_RUNNING);
 446
 447                /* Execute the command */
 448                cmd->ret = ccp_run_cmd(cmd_q, cmd);
 449
 450                /* Schedule the completion callback */
 451                tdata.cmd = cmd;
 452                init_completion(&tdata.completion);
 453                tasklet_schedule(&tasklet);
 454                wait_for_completion(&tdata.completion);
 455        }
 456
 457        __set_current_state(TASK_RUNNING);
 458
 459        return 0;
 460}
 461
 462/**
 463 * ccp_alloc_struct - allocate and initialize the ccp_device struct
 464 *
 465 * @dev: device struct of the CCP
 466 */
 467struct ccp_device *ccp_alloc_struct(struct device *dev)
 468{
 469        struct ccp_device *ccp;
 470
 471        ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
 472        if (!ccp)
 473                return NULL;
 474        ccp->dev = dev;
 475
 476        INIT_LIST_HEAD(&ccp->cmd);
 477        INIT_LIST_HEAD(&ccp->backlog);
 478
 479        spin_lock_init(&ccp->cmd_lock);
 480        mutex_init(&ccp->req_mutex);
 481        mutex_init(&ccp->sb_mutex);
 482        ccp->sb_count = KSB_COUNT;
 483        ccp->sb_start = 0;
 484
 485        /* Initialize the wait queues */
 486        init_waitqueue_head(&ccp->sb_queue);
 487        init_waitqueue_head(&ccp->suspend_queue);
 488
 489        ccp->ord = ccp_increment_unit_ordinal();
 490        snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
 491        snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
 492
 493        return ccp;
 494}
 495
 496int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 497{
 498        struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
 499        u32 trng_value;
 500        int len = min_t(int, sizeof(trng_value), max);
 501
 502        /* Locking is provided by the caller so we can update device
 503         * hwrng-related fields safely
 504         */
 505        trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
 506        if (!trng_value) {
 507                /* Zero is returned if not data is available or if a
 508                 * bad-entropy error is present. Assume an error if
 509                 * we exceed TRNG_RETRIES reads of zero.
 510                 */
 511                if (ccp->hwrng_retries++ > TRNG_RETRIES)
 512                        return -EIO;
 513
 514                return 0;
 515        }
 516
 517        /* Reset the counter and save the rng value */
 518        ccp->hwrng_retries = 0;
 519        memcpy(data, &trng_value, len);
 520
 521        return len;
 522}
 523
 524#ifdef CONFIG_PM
 525bool ccp_queues_suspended(struct ccp_device *ccp)
 526{
 527        unsigned int suspended = 0;
 528        unsigned long flags;
 529        unsigned int i;
 530
 531        spin_lock_irqsave(&ccp->cmd_lock, flags);
 532
 533        for (i = 0; i < ccp->cmd_q_count; i++)
 534                if (ccp->cmd_q[i].suspended)
 535                        suspended++;
 536
 537        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 538
 539        return ccp->cmd_q_count == suspended;
 540}
 541#endif
 542
 543static int __init ccp_mod_init(void)
 544{
 545#ifdef CONFIG_X86
 546        int ret;
 547
 548        ret = ccp_pci_init();
 549        if (ret)
 550                return ret;
 551
 552        /* Don't leave the driver loaded if init failed */
 553        if (ccp_present() != 0) {
 554                ccp_pci_exit();
 555                return -ENODEV;
 556        }
 557
 558        return 0;
 559#endif
 560
 561#ifdef CONFIG_ARM64
 562        int ret;
 563
 564        ret = ccp_platform_init();
 565        if (ret)
 566                return ret;
 567
 568        /* Don't leave the driver loaded if init failed */
 569        if (ccp_present() != 0) {
 570                ccp_platform_exit();
 571                return -ENODEV;
 572        }
 573
 574        return 0;
 575#endif
 576
 577        return -ENODEV;
 578}
 579
 580static void __exit ccp_mod_exit(void)
 581{
 582#ifdef CONFIG_X86
 583        ccp_pci_exit();
 584#endif
 585
 586#ifdef CONFIG_ARM64
 587        ccp_platform_exit();
 588#endif
 589}
 590
 591module_init(ccp_mod_init);
 592module_exit(ccp_mod_exit);
 593