linux/drivers/crypto/ccp/ccp-dev.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/kernel.h>
  15#include <linux/kthread.h>
  16#include <linux/sched.h>
  17#include <linux/interrupt.h>
  18#include <linux/spinlock.h>
  19#include <linux/rwlock_types.h>
  20#include <linux/types.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/hw_random.h>
  24#include <linux/cpu.h>
  25#ifdef CONFIG_X86
  26#include <asm/cpu_device_id.h>
  27#endif
  28#include <linux/ccp.h>
  29
  30#include "ccp-dev.h"
  31
  32MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  33MODULE_LICENSE("GPL");
  34MODULE_VERSION("1.0.0");
  35MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
  36
  37struct ccp_tasklet_data {
  38        struct completion completion;
  39        struct ccp_cmd *cmd;
  40};
  41
  42/* List of CCPs, CCP count, read-write access lock, and access functions
  43 *
  44 * Lock structure: get ccp_unit_lock for reading whenever we need to
  45 * examine the CCP list. While holding it for reading we can acquire
  46 * the RR lock to update the round-robin next-CCP pointer. The unit lock
  47 * must be acquired before the RR lock.
  48 *
  49 * If the unit-lock is acquired for writing, we have total control over
  50 * the list, so there's no value in getting the RR lock.
  51 */
  52static DEFINE_RWLOCK(ccp_unit_lock);
  53static LIST_HEAD(ccp_units);
  54
  55/* Round-robin counter */
  56static DEFINE_SPINLOCK(ccp_rr_lock);
  57static struct ccp_device *ccp_rr;
  58
  59/* Ever-increasing value to produce unique unit numbers */
  60static atomic_t ccp_unit_ordinal;
  61unsigned int ccp_increment_unit_ordinal(void)
  62{
  63        return atomic_inc_return(&ccp_unit_ordinal);
  64}
  65
  66/**
  67 * ccp_add_device - add a CCP device to the list
  68 *
  69 * @ccp: ccp_device struct pointer
  70 *
  71 * Put this CCP on the unit list, which makes it available
  72 * for use.
  73 *
  74 * Returns zero if a CCP device is present, -ENODEV otherwise.
  75 */
  76void ccp_add_device(struct ccp_device *ccp)
  77{
  78        unsigned long flags;
  79
  80        write_lock_irqsave(&ccp_unit_lock, flags);
  81        list_add_tail(&ccp->entry, &ccp_units);
  82        if (!ccp_rr)
  83                /* We already have the list lock (we're first) so this
  84                 * pointer can't change on us. Set its initial value.
  85                 */
  86                ccp_rr = ccp;
  87        write_unlock_irqrestore(&ccp_unit_lock, flags);
  88}
  89
  90/**
  91 * ccp_del_device - remove a CCP device from the list
  92 *
  93 * @ccp: ccp_device struct pointer
  94 *
  95 * Remove this unit from the list of devices. If the next device
  96 * up for use is this one, adjust the pointer. If this is the last
  97 * device, NULL the pointer.
  98 */
  99void ccp_del_device(struct ccp_device *ccp)
 100{
 101        unsigned long flags;
 102
 103        write_lock_irqsave(&ccp_unit_lock, flags);
 104        if (ccp_rr == ccp) {
 105                /* ccp_unit_lock is read/write; any read access
 106                 * will be suspended while we make changes to the
 107                 * list and RR pointer.
 108                 */
 109                if (list_is_last(&ccp_rr->entry, &ccp_units))
 110                        ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
 111                                                  entry);
 112                else
 113                        ccp_rr = list_next_entry(ccp_rr, entry);
 114        }
 115        list_del(&ccp->entry);
 116        if (list_empty(&ccp_units))
 117                ccp_rr = NULL;
 118        write_unlock_irqrestore(&ccp_unit_lock, flags);
 119}
 120
 121static struct ccp_device *ccp_get_device(void)
 122{
 123        unsigned long flags;
 124        struct ccp_device *dp = NULL;
 125
 126        /* We round-robin through the unit list.
 127         * The (ccp_rr) pointer refers to the next unit to use.
 128         */
 129        read_lock_irqsave(&ccp_unit_lock, flags);
 130        if (!list_empty(&ccp_units)) {
 131                spin_lock(&ccp_rr_lock);
 132                dp = ccp_rr;
 133                if (list_is_last(&ccp_rr->entry, &ccp_units))
 134                        ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
 135                                                  entry);
 136                else
 137                        ccp_rr = list_next_entry(ccp_rr, entry);
 138                spin_unlock(&ccp_rr_lock);
 139        }
 140        read_unlock_irqrestore(&ccp_unit_lock, flags);
 141
 142        return dp;
 143}
 144
 145/**
 146 * ccp_present - check if a CCP device is present
 147 *
 148 * Returns zero if a CCP device is present, -ENODEV otherwise.
 149 */
 150int ccp_present(void)
 151{
 152        unsigned long flags;
 153        int ret;
 154
 155        read_lock_irqsave(&ccp_unit_lock, flags);
 156        ret = list_empty(&ccp_units);
 157        read_unlock_irqrestore(&ccp_unit_lock, flags);
 158
 159        return ret ? -ENODEV : 0;
 160}
 161EXPORT_SYMBOL_GPL(ccp_present);
 162
 163/**
 164 * ccp_version - get the version of the CCP device
 165 *
 166 * Returns the version from the first unit on the list;
 167 * otherwise a zero if no CCP device is present
 168 */
 169unsigned int ccp_version(void)
 170{
 171        struct ccp_device *dp;
 172        unsigned long flags;
 173        int ret = 0;
 174
 175        read_lock_irqsave(&ccp_unit_lock, flags);
 176        if (!list_empty(&ccp_units)) {
 177                dp = list_first_entry(&ccp_units, struct ccp_device, entry);
 178                ret = dp->vdata->version;
 179        }
 180        read_unlock_irqrestore(&ccp_unit_lock, flags);
 181
 182        return ret;
 183}
 184EXPORT_SYMBOL_GPL(ccp_version);
 185
 186/**
 187 * ccp_enqueue_cmd - queue an operation for processing by the CCP
 188 *
 189 * @cmd: ccp_cmd struct to be processed
 190 *
 191 * Queue a cmd to be processed by the CCP. If queueing the cmd
 192 * would exceed the defined length of the cmd queue the cmd will
 193 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
 194 * result in a return code of -EBUSY.
 195 *
 196 * The callback routine specified in the ccp_cmd struct will be
 197 * called to notify the caller of completion (if the cmd was not
 198 * backlogged) or advancement out of the backlog. If the cmd has
 199 * advanced out of the backlog the "err" value of the callback
 200 * will be -EINPROGRESS. Any other "err" value during callback is
 201 * the result of the operation.
 202 *
 203 * The cmd has been successfully queued if:
 204 *   the return code is -EINPROGRESS or
 205 *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
 206 */
 207int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 208{
 209        struct ccp_device *ccp = ccp_get_device();
 210        unsigned long flags;
 211        unsigned int i;
 212        int ret;
 213
 214        if (!ccp)
 215                return -ENODEV;
 216
 217        /* Caller must supply a callback routine */
 218        if (!cmd->callback)
 219                return -EINVAL;
 220
 221        cmd->ccp = ccp;
 222
 223        spin_lock_irqsave(&ccp->cmd_lock, flags);
 224
 225        i = ccp->cmd_q_count;
 226
 227        if (ccp->cmd_count >= MAX_CMD_QLEN) {
 228                ret = -EBUSY;
 229                if (cmd->flags & CCP_CMD_MAY_BACKLOG)
 230                        list_add_tail(&cmd->entry, &ccp->backlog);
 231        } else {
 232                ret = -EINPROGRESS;
 233                ccp->cmd_count++;
 234                list_add_tail(&cmd->entry, &ccp->cmd);
 235
 236                /* Find an idle queue */
 237                if (!ccp->suspending) {
 238                        for (i = 0; i < ccp->cmd_q_count; i++) {
 239                                if (ccp->cmd_q[i].active)
 240                                        continue;
 241
 242                                break;
 243                        }
 244                }
 245        }
 246
 247        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 248
 249        /* If we found an idle queue, wake it up */
 250        if (i < ccp->cmd_q_count)
 251                wake_up_process(ccp->cmd_q[i].kthread);
 252
 253        return ret;
 254}
 255EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
 256
 257static void ccp_do_cmd_backlog(struct work_struct *work)
 258{
 259        struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
 260        struct ccp_device *ccp = cmd->ccp;
 261        unsigned long flags;
 262        unsigned int i;
 263
 264        cmd->callback(cmd->data, -EINPROGRESS);
 265
 266        spin_lock_irqsave(&ccp->cmd_lock, flags);
 267
 268        ccp->cmd_count++;
 269        list_add_tail(&cmd->entry, &ccp->cmd);
 270
 271        /* Find an idle queue */
 272        for (i = 0; i < ccp->cmd_q_count; i++) {
 273                if (ccp->cmd_q[i].active)
 274                        continue;
 275
 276                break;
 277        }
 278
 279        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 280
 281        /* If we found an idle queue, wake it up */
 282        if (i < ccp->cmd_q_count)
 283                wake_up_process(ccp->cmd_q[i].kthread);
 284}
 285
 286static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
 287{
 288        struct ccp_device *ccp = cmd_q->ccp;
 289        struct ccp_cmd *cmd = NULL;
 290        struct ccp_cmd *backlog = NULL;
 291        unsigned long flags;
 292
 293        spin_lock_irqsave(&ccp->cmd_lock, flags);
 294
 295        cmd_q->active = 0;
 296
 297        if (ccp->suspending) {
 298                cmd_q->suspended = 1;
 299
 300                spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 301                wake_up_interruptible(&ccp->suspend_queue);
 302
 303                return NULL;
 304        }
 305
 306        if (ccp->cmd_count) {
 307                cmd_q->active = 1;
 308
 309                cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
 310                list_del(&cmd->entry);
 311
 312                ccp->cmd_count--;
 313        }
 314
 315        if (!list_empty(&ccp->backlog)) {
 316                backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
 317                                           entry);
 318                list_del(&backlog->entry);
 319        }
 320
 321        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 322
 323        if (backlog) {
 324                INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
 325                schedule_work(&backlog->work);
 326        }
 327
 328        return cmd;
 329}
 330
 331static void ccp_do_cmd_complete(unsigned long data)
 332{
 333        struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
 334        struct ccp_cmd *cmd = tdata->cmd;
 335
 336        cmd->callback(cmd->data, cmd->ret);
 337        complete(&tdata->completion);
 338}
 339
 340/**
 341 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
 342 *
 343 * @data: thread-specific data
 344 */
 345int ccp_cmd_queue_thread(void *data)
 346{
 347        struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
 348        struct ccp_cmd *cmd;
 349        struct ccp_tasklet_data tdata;
 350        struct tasklet_struct tasklet;
 351
 352        tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
 353
 354        set_current_state(TASK_INTERRUPTIBLE);
 355        while (!kthread_should_stop()) {
 356                schedule();
 357
 358                set_current_state(TASK_INTERRUPTIBLE);
 359
 360                cmd = ccp_dequeue_cmd(cmd_q);
 361                if (!cmd)
 362                        continue;
 363
 364                __set_current_state(TASK_RUNNING);
 365
 366                /* Execute the command */
 367                cmd->ret = ccp_run_cmd(cmd_q, cmd);
 368
 369                /* Schedule the completion callback */
 370                tdata.cmd = cmd;
 371                init_completion(&tdata.completion);
 372                tasklet_schedule(&tasklet);
 373                wait_for_completion(&tdata.completion);
 374        }
 375
 376        __set_current_state(TASK_RUNNING);
 377
 378        return 0;
 379}
 380
 381/**
 382 * ccp_alloc_struct - allocate and initialize the ccp_device struct
 383 *
 384 * @dev: device struct of the CCP
 385 */
 386struct ccp_device *ccp_alloc_struct(struct device *dev)
 387{
 388        struct ccp_device *ccp;
 389
 390        ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
 391        if (!ccp)
 392                return NULL;
 393        ccp->dev = dev;
 394
 395        INIT_LIST_HEAD(&ccp->cmd);
 396        INIT_LIST_HEAD(&ccp->backlog);
 397
 398        spin_lock_init(&ccp->cmd_lock);
 399        mutex_init(&ccp->req_mutex);
 400        mutex_init(&ccp->ksb_mutex);
 401        ccp->ksb_count = KSB_COUNT;
 402        ccp->ksb_start = 0;
 403
 404        ccp->ord = ccp_increment_unit_ordinal();
 405        snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
 406        snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
 407
 408        return ccp;
 409}
 410
 411#ifdef CONFIG_PM
 412bool ccp_queues_suspended(struct ccp_device *ccp)
 413{
 414        unsigned int suspended = 0;
 415        unsigned long flags;
 416        unsigned int i;
 417
 418        spin_lock_irqsave(&ccp->cmd_lock, flags);
 419
 420        for (i = 0; i < ccp->cmd_q_count; i++)
 421                if (ccp->cmd_q[i].suspended)
 422                        suspended++;
 423
 424        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
 425
 426        return ccp->cmd_q_count == suspended;
 427}
 428#endif
 429
 430static int __init ccp_mod_init(void)
 431{
 432#ifdef CONFIG_X86
 433        int ret;
 434
 435        ret = ccp_pci_init();
 436        if (ret)
 437                return ret;
 438
 439        /* Don't leave the driver loaded if init failed */
 440        if (ccp_present() != 0) {
 441                ccp_pci_exit();
 442                return -ENODEV;
 443        }
 444
 445        return 0;
 446#endif
 447
 448#ifdef CONFIG_ARM64
 449        int ret;
 450
 451        ret = ccp_platform_init();
 452        if (ret)
 453                return ret;
 454
 455        /* Don't leave the driver loaded if init failed */
 456        if (ccp_present() != 0) {
 457                ccp_platform_exit();
 458                return -ENODEV;
 459        }
 460
 461        return 0;
 462#endif
 463
 464        return -ENODEV;
 465}
 466
 467static void __exit ccp_mod_exit(void)
 468{
 469#ifdef CONFIG_X86
 470        ccp_pci_exit();
 471#endif
 472
 473#ifdef CONFIG_ARM64
 474        ccp_platform_exit();
 475#endif
 476}
 477
 478module_init(ccp_mod_init);
 479module_exit(ccp_mod_exit);
 480