linux/drivers/char/tpm/tpm_ibmvtpm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 IBM Corporation
   4 *
   5 * Author: Ashley Lai <ashleydlai@gmail.com>
   6 *
   7 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
   8 *
   9 * Device driver for TCG/TCPA TPM (trusted platform module).
  10 * Specifications at www.trustedcomputinggroup.org
  11 */
  12
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/slab.h>
  16#include <asm/vio.h>
  17#include <asm/irq.h>
  18#include <linux/types.h>
  19#include <linux/list.h>
  20#include <linux/spinlock.h>
  21#include <linux/interrupt.h>
  22#include <linux/wait.h>
  23#include <asm/prom.h>
  24
  25#include "tpm.h"
  26#include "tpm_ibmvtpm.h"
  27
  28static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  29
  30static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
  31        { "IBM,vtpm", "IBM,vtpm"},
  32        { "", "" }
  33};
  34MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  35
  36/**
  37 * ibmvtpm_send_crq_word() - Send a CRQ request
  38 * @vdev:       vio device struct
  39 * @w1:         pre-constructed first word of tpm crq (second word is reserved)
  40 *
  41 * Return:
  42 *      0 - Success
  43 *      Non-zero - Failure
  44 */
  45static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
  46{
  47        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
  48}
  49
  50/**
  51 * ibmvtpm_send_crq() - Send a CRQ request
  52 *
  53 * @vdev:       vio device struct
  54 * @valid:      Valid field
  55 * @msg:        Type field
  56 * @len:        Length field
  57 * @data:       Data field
  58 *
  59 * The ibmvtpm crq is defined as follows:
  60 *
  61 * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
  62 * -----------------------------------------------------------------------
  63 * Word0 | Valid | Type  |     Length    |              Data
  64 * -----------------------------------------------------------------------
  65 * Word1 |                Reserved
  66 * -----------------------------------------------------------------------
  67 *
  68 * Which matches the following structure (on bigendian host):
  69 *
  70 * struct ibmvtpm_crq {
  71 *         u8 valid;
  72 *         u8 msg;
  73 *         __be16 len;
  74 *         __be32 data;
  75 *         __be64 reserved;
  76 * } __attribute__((packed, aligned(8)));
  77 *
  78 * However, the value is passed in a register so just compute the numeric value
  79 * to load into the register avoiding byteswap altogether. Endian only affects
  80 * memory loads and stores - registers are internally represented the same.
  81 *
  82 * Return:
  83 *      0 (H_SUCCESS) - Success
  84 *      Non-zero - Failure
  85 */
  86static int ibmvtpm_send_crq(struct vio_dev *vdev,
  87                u8 valid, u8 msg, u16 len, u32 data)
  88{
  89        u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
  90                (u64)data;
  91        return ibmvtpm_send_crq_word(vdev, w1);
  92}
  93
  94/**
  95 * tpm_ibmvtpm_recv - Receive data after send
  96 *
  97 * @chip:       tpm chip struct
  98 * @buf:        buffer to read
  99 * @count:      size of buffer
 100 *
 101 * Return:
 102 *      Number of bytes read
 103 */
 104static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 105{
 106        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 107        u16 len;
 108        int sig;
 109
 110        if (!ibmvtpm->rtce_buf) {
 111                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
 112                return 0;
 113        }
 114
 115        sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
 116        if (sig)
 117                return -EINTR;
 118
 119        len = ibmvtpm->res_len;
 120
 121        if (count < len) {
 122                dev_err(ibmvtpm->dev,
 123                        "Invalid size in recv: count=%zd, crq_size=%d\n",
 124                        count, len);
 125                return -EIO;
 126        }
 127
 128        spin_lock(&ibmvtpm->rtce_lock);
 129        memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
 130        memset(ibmvtpm->rtce_buf, 0, len);
 131        ibmvtpm->res_len = 0;
 132        spin_unlock(&ibmvtpm->rtce_lock);
 133        return len;
 134}
 135
 136/**
 137 * tpm_ibmvtpm_send() - Send a TPM command
 138 * @chip:       tpm chip struct
 139 * @buf:        buffer contains data to send
 140 * @count:      size of buffer
 141 *
 142 * Return:
 143 *   0 on success,
 144 *   -errno on error
 145 */
 146static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
 147{
 148        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 149        int rc, sig;
 150
 151        if (!ibmvtpm->rtce_buf) {
 152                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
 153                return 0;
 154        }
 155
 156        if (count > ibmvtpm->rtce_size) {
 157                dev_err(ibmvtpm->dev,
 158                        "Invalid size in send: count=%zd, rtce_size=%d\n",
 159                        count, ibmvtpm->rtce_size);
 160                return -EIO;
 161        }
 162
 163        if (ibmvtpm->tpm_processing_cmd) {
 164                dev_info(ibmvtpm->dev,
 165                         "Need to wait for TPM to finish\n");
 166                /* wait for previous command to finish */
 167                sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
 168                if (sig)
 169                        return -EINTR;
 170        }
 171
 172        spin_lock(&ibmvtpm->rtce_lock);
 173        ibmvtpm->res_len = 0;
 174        memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
 175
 176        /*
 177         * set the processing flag before the Hcall, since we may get the
 178         * result (interrupt) before even being able to check rc.
 179         */
 180        ibmvtpm->tpm_processing_cmd = true;
 181
 182        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 183                        IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
 184                        count, ibmvtpm->rtce_dma_handle);
 185        if (rc != H_SUCCESS) {
 186                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
 187                rc = 0;
 188                ibmvtpm->tpm_processing_cmd = false;
 189        } else
 190                rc = 0;
 191
 192        spin_unlock(&ibmvtpm->rtce_lock);
 193        return rc;
 194}
 195
 196static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
 197{
 198        return;
 199}
 200
 201static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
 202{
 203        return 0;
 204}
 205
 206/**
 207 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
 208 *
 209 * @ibmvtpm:    vtpm device struct
 210 *
 211 * Return:
 212 *      0 on success.
 213 *      Non-zero on failure.
 214 */
 215static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
 216{
 217        int rc;
 218
 219        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 220                        IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
 221        if (rc != H_SUCCESS)
 222                dev_err(ibmvtpm->dev,
 223                        "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
 224
 225        return rc;
 226}
 227
 228/**
 229 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
 230 *                         - Note that this is vtpm version and not tpm version
 231 *
 232 * @ibmvtpm:    vtpm device struct
 233 *
 234 * Return:
 235 *      0 on success.
 236 *      Non-zero on failure.
 237 */
 238static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
 239{
 240        int rc;
 241
 242        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 243                        IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
 244        if (rc != H_SUCCESS)
 245                dev_err(ibmvtpm->dev,
 246                        "ibmvtpm_crq_get_version failed rc=%d\n", rc);
 247
 248        return rc;
 249}
 250
 251/**
 252 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
 253 * @ibmvtpm:    vtpm device struct
 254 *
 255 * Return:
 256 *      0 on success.
 257 *      Non-zero on failure.
 258 */
 259static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
 260{
 261        int rc;
 262
 263        rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
 264        if (rc != H_SUCCESS)
 265                dev_err(ibmvtpm->dev,
 266                        "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
 267
 268        return rc;
 269}
 270
 271/**
 272 * ibmvtpm_crq_send_init - Send a CRQ initialize message
 273 * @ibmvtpm:    vtpm device struct
 274 *
 275 * Return:
 276 *      0 on success.
 277 *      Non-zero on failure.
 278 */
 279static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
 280{
 281        int rc;
 282
 283        rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
 284        if (rc != H_SUCCESS)
 285                dev_err(ibmvtpm->dev,
 286                        "ibmvtpm_crq_send_init failed rc=%d\n", rc);
 287
 288        return rc;
 289}
 290
 291/**
 292 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
 293 * @vdev:       vio device struct
 294 *
 295 * Return: Always 0.
 296 */
 297static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
 298{
 299        struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
 300        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 301        int rc = 0;
 302
 303        tpm_chip_unregister(chip);
 304
 305        free_irq(vdev->irq, ibmvtpm);
 306
 307        do {
 308                if (rc)
 309                        msleep(100);
 310                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 311        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 312
 313        dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
 314                         CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
 315        free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
 316
 317        if (ibmvtpm->rtce_buf) {
 318                dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
 319                                 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
 320                kfree(ibmvtpm->rtce_buf);
 321        }
 322
 323        kfree(ibmvtpm);
 324        /* For tpm_ibmvtpm_get_desired_dma */
 325        dev_set_drvdata(&vdev->dev, NULL);
 326
 327        return 0;
 328}
 329
 330/**
 331 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
 332 * @vdev:       vio device struct
 333 *
 334 * Return:
 335 *      Number of bytes the driver needs to DMA map.
 336 */
 337static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
 338{
 339        struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
 340        struct ibmvtpm_dev *ibmvtpm;
 341
 342        /*
 343         * ibmvtpm initializes at probe time, so the data we are
 344         * asking for may not be set yet. Estimate that 4K required
 345         * for TCE-mapped buffer in addition to CRQ.
 346         */
 347        if (chip)
 348                ibmvtpm = dev_get_drvdata(&chip->dev);
 349        else
 350                return CRQ_RES_BUF_SIZE + PAGE_SIZE;
 351
 352        return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
 353}
 354
 355/**
 356 * tpm_ibmvtpm_suspend - Suspend
 357 * @dev:        device struct
 358 *
 359 * Return: Always 0.
 360 */
 361static int tpm_ibmvtpm_suspend(struct device *dev)
 362{
 363        struct tpm_chip *chip = dev_get_drvdata(dev);
 364        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 365        int rc = 0;
 366
 367        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 368                        IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
 369        if (rc != H_SUCCESS)
 370                dev_err(ibmvtpm->dev,
 371                        "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
 372
 373        return rc;
 374}
 375
 376/**
 377 * ibmvtpm_reset_crq - Reset CRQ
 378 *
 379 * @ibmvtpm:    ibm vtpm struct
 380 *
 381 * Return:
 382 *      0 on success.
 383 *      Non-zero on failure.
 384 */
 385static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
 386{
 387        int rc = 0;
 388
 389        do {
 390                if (rc)
 391                        msleep(100);
 392                rc = plpar_hcall_norets(H_FREE_CRQ,
 393                                        ibmvtpm->vdev->unit_address);
 394        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 395
 396        memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
 397        ibmvtpm->crq_queue.index = 0;
 398
 399        return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
 400                                  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
 401}
 402
 403/**
 404 * tpm_ibmvtpm_resume - Resume from suspend
 405 *
 406 * @dev:        device struct
 407 *
 408 * Return: Always 0.
 409 */
 410static int tpm_ibmvtpm_resume(struct device *dev)
 411{
 412        struct tpm_chip *chip = dev_get_drvdata(dev);
 413        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 414        int rc = 0;
 415
 416        do {
 417                if (rc)
 418                        msleep(100);
 419                rc = plpar_hcall_norets(H_ENABLE_CRQ,
 420                                        ibmvtpm->vdev->unit_address);
 421        } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
 422
 423        if (rc) {
 424                dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
 425                return rc;
 426        }
 427
 428        rc = vio_enable_interrupts(ibmvtpm->vdev);
 429        if (rc) {
 430                dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
 431                return rc;
 432        }
 433
 434        rc = ibmvtpm_crq_send_init(ibmvtpm);
 435        if (rc)
 436                dev_err(dev, "Error send_init rc=%d\n", rc);
 437
 438        return rc;
 439}
 440
 441static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
 442{
 443        return (status == 0);
 444}
 445
 446static const struct tpm_class_ops tpm_ibmvtpm = {
 447        .recv = tpm_ibmvtpm_recv,
 448        .send = tpm_ibmvtpm_send,
 449        .cancel = tpm_ibmvtpm_cancel,
 450        .status = tpm_ibmvtpm_status,
 451        .req_complete_mask = 0,
 452        .req_complete_val = 0,
 453        .req_canceled = tpm_ibmvtpm_req_canceled,
 454};
 455
 456static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
 457        .suspend = tpm_ibmvtpm_suspend,
 458        .resume = tpm_ibmvtpm_resume,
 459};
 460
 461/**
 462 * ibmvtpm_crq_get_next - Get next responded crq
 463 *
 464 * @ibmvtpm:    vtpm device struct
 465 *
 466 * Return: vtpm crq pointer or NULL.
 467 */
 468static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
 469{
 470        struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
 471        struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
 472
 473        if (crq->valid & VTPM_MSG_RES) {
 474                if (++crq_q->index == crq_q->num_entry)
 475                        crq_q->index = 0;
 476                smp_rmb();
 477        } else
 478                crq = NULL;
 479        return crq;
 480}
 481
 482/**
 483 * ibmvtpm_crq_process - Process responded crq
 484 *
 485 * @crq:        crq to be processed
 486 * @ibmvtpm:    vtpm device struct
 487 *
 488 */
 489static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
 490                                struct ibmvtpm_dev *ibmvtpm)
 491{
 492        int rc = 0;
 493
 494        switch (crq->valid) {
 495        case VALID_INIT_CRQ:
 496                switch (crq->msg) {
 497                case INIT_CRQ_RES:
 498                        dev_info(ibmvtpm->dev, "CRQ initialized\n");
 499                        rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
 500                        if (rc)
 501                                dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
 502                        return;
 503                case INIT_CRQ_COMP_RES:
 504                        dev_info(ibmvtpm->dev,
 505                                 "CRQ initialization completed\n");
 506                        return;
 507                default:
 508                        dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
 509                        return;
 510                }
 511        case IBMVTPM_VALID_CMD:
 512                switch (crq->msg) {
 513                case VTPM_GET_RTCE_BUFFER_SIZE_RES:
 514                        if (be16_to_cpu(crq->len) <= 0) {
 515                                dev_err(ibmvtpm->dev, "Invalid rtce size\n");
 516                                return;
 517                        }
 518                        ibmvtpm->rtce_size = be16_to_cpu(crq->len);
 519                        ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
 520                                                    GFP_ATOMIC);
 521                        if (!ibmvtpm->rtce_buf) {
 522                                dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
 523                                return;
 524                        }
 525
 526                        ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
 527                                ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
 528                                DMA_BIDIRECTIONAL);
 529
 530                        if (dma_mapping_error(ibmvtpm->dev,
 531                                              ibmvtpm->rtce_dma_handle)) {
 532                                kfree(ibmvtpm->rtce_buf);
 533                                ibmvtpm->rtce_buf = NULL;
 534                                dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
 535                        }
 536
 537                        return;
 538                case VTPM_GET_VERSION_RES:
 539                        ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
 540                        return;
 541                case VTPM_TPM_COMMAND_RES:
 542                        /* len of the data in rtce buffer */
 543                        ibmvtpm->res_len = be16_to_cpu(crq->len);
 544                        ibmvtpm->tpm_processing_cmd = false;
 545                        wake_up_interruptible(&ibmvtpm->wq);
 546                        return;
 547                default:
 548                        return;
 549                }
 550        }
 551        return;
 552}
 553
 554/**
 555 * ibmvtpm_interrupt -  Interrupt handler
 556 *
 557 * @irq:                irq number to handle
 558 * @vtpm_instance:      vtpm that received interrupt
 559 *
 560 * Returns:
 561 *      IRQ_HANDLED
 562 **/
 563static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
 564{
 565        struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
 566        struct ibmvtpm_crq *crq;
 567
 568        /* while loop is needed for initial setup (get version and
 569         * get rtce_size). There should be only one tpm request at any
 570         * given time.
 571         */
 572        while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
 573                ibmvtpm_crq_process(crq, ibmvtpm);
 574                crq->valid = 0;
 575                smp_wmb();
 576        }
 577
 578        return IRQ_HANDLED;
 579}
 580
 581/**
 582 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
 583 *
 584 * @vio_dev:    vio device struct
 585 * @id:         vio device id struct
 586 *
 587 * Return:
 588 *      0 on success.
 589 *      Non-zero on failure.
 590 */
 591static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
 592                                   const struct vio_device_id *id)
 593{
 594        struct ibmvtpm_dev *ibmvtpm;
 595        struct device *dev = &vio_dev->dev;
 596        struct ibmvtpm_crq_queue *crq_q;
 597        struct tpm_chip *chip;
 598        int rc = -ENOMEM, rc1;
 599
 600        chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
 601        if (IS_ERR(chip))
 602                return PTR_ERR(chip);
 603
 604        ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
 605        if (!ibmvtpm) {
 606                dev_err(dev, "kzalloc for ibmvtpm failed\n");
 607                goto cleanup;
 608        }
 609
 610        ibmvtpm->dev = dev;
 611        ibmvtpm->vdev = vio_dev;
 612
 613        crq_q = &ibmvtpm->crq_queue;
 614        crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
 615        if (!crq_q->crq_addr) {
 616                dev_err(dev, "Unable to allocate memory for crq_addr\n");
 617                goto cleanup;
 618        }
 619
 620        crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
 621        ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
 622                                                 CRQ_RES_BUF_SIZE,
 623                                                 DMA_BIDIRECTIONAL);
 624
 625        if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
 626                dev_err(dev, "dma mapping failed\n");
 627                goto cleanup;
 628        }
 629
 630        rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
 631                                ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
 632        if (rc == H_RESOURCE)
 633                rc = ibmvtpm_reset_crq(ibmvtpm);
 634
 635        if (rc) {
 636                dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
 637                goto reg_crq_cleanup;
 638        }
 639
 640        rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
 641                         tpm_ibmvtpm_driver_name, ibmvtpm);
 642        if (rc) {
 643                dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
 644                goto init_irq_cleanup;
 645        }
 646
 647        rc = vio_enable_interrupts(vio_dev);
 648        if (rc) {
 649                dev_err(dev, "Error %d enabling interrupts\n", rc);
 650                goto init_irq_cleanup;
 651        }
 652
 653        init_waitqueue_head(&ibmvtpm->wq);
 654
 655        crq_q->index = 0;
 656
 657        dev_set_drvdata(&chip->dev, ibmvtpm);
 658
 659        spin_lock_init(&ibmvtpm->rtce_lock);
 660
 661        rc = ibmvtpm_crq_send_init(ibmvtpm);
 662        if (rc)
 663                goto init_irq_cleanup;
 664
 665        rc = ibmvtpm_crq_get_version(ibmvtpm);
 666        if (rc)
 667                goto init_irq_cleanup;
 668
 669        rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
 670        if (rc)
 671                goto init_irq_cleanup;
 672
 673        return tpm_chip_register(chip);
 674init_irq_cleanup:
 675        do {
 676                rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
 677        } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
 678reg_crq_cleanup:
 679        dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
 680                         DMA_BIDIRECTIONAL);
 681cleanup:
 682        if (ibmvtpm) {
 683                if (crq_q->crq_addr)
 684                        free_page((unsigned long)crq_q->crq_addr);
 685                kfree(ibmvtpm);
 686        }
 687
 688        return rc;
 689}
 690
 691static struct vio_driver ibmvtpm_driver = {
 692        .id_table        = tpm_ibmvtpm_device_table,
 693        .probe           = tpm_ibmvtpm_probe,
 694        .remove          = tpm_ibmvtpm_remove,
 695        .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
 696        .name            = tpm_ibmvtpm_driver_name,
 697        .pm              = &tpm_ibmvtpm_pm_ops,
 698};
 699
 700/**
 701 * ibmvtpm_module_init - Initialize ibm vtpm module.
 702 *
 703 *
 704 * Return:
 705 *      0 on success.
 706 *      Non-zero on failure.
 707 */
 708static int __init ibmvtpm_module_init(void)
 709{
 710        return vio_register_driver(&ibmvtpm_driver);
 711}
 712
 713/**
 714 * ibmvtpm_module_exit - Tear down ibm vtpm module.
 715 */
 716static void __exit ibmvtpm_module_exit(void)
 717{
 718        vio_unregister_driver(&ibmvtpm_driver);
 719}
 720
 721module_init(ibmvtpm_module_init);
 722module_exit(ibmvtpm_module_exit);
 723
 724MODULE_AUTHOR("adlai@us.ibm.com");
 725MODULE_DESCRIPTION("IBM vTPM Driver");
 726MODULE_VERSION("1.0");
 727MODULE_LICENSE("GPL");
 728