linux/drivers/char/tpm/tpm_ibmvtpm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012-2020 IBM Corporation
   4 *
   5 * Author: Ashley Lai <ashleydlai@gmail.com>
   6 *
   7 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
   8 *
   9 * Device driver for TCG/TCPA TPM (trusted platform module).
  10 * Specifications at www.trustedcomputinggroup.org
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License as
  14 * published by the Free Software Foundation, version 2 of the
  15 * License.
  16 *
  17 */
  18
  19#include <linux/dma-mapping.h>
  20#include <linux/dmapool.h>
  21#include <linux/slab.h>
  22#include <asm/vio.h>
  23#include <asm/irq.h>
  24#include <linux/types.h>
  25#include <linux/list.h>
  26#include <linux/spinlock.h>
  27#include <linux/interrupt.h>
  28#include <linux/wait.h>
  29#include <asm/prom.h>
  30
  31#include "tpm.h"
  32#include "tpm_ibmvtpm.h"
  33
  34static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  35
  36static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
  37        { "IBM,vtpm", "IBM,vtpm"},
  38        { "IBM,vtpm", "IBM,vtpm20"},
  39        { "", "" }
  40};
  41MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  42
  43/**
  44 * ibmvtpm_send_crq_word() - Send a CRQ request
  45 * @vdev:       vio device struct
  46 * @w1:         pre-constructed first word of tpm crq (second word is reserved)
  47 *
  48 * Return:
  49 *      0 - Success
  50 *      Non-zero - Failure
  51 */
  52static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
  53{
  54        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
  55}
  56
  57/**
  58 * ibmvtpm_send_crq() - Send a CRQ request
  59 *
  60 * @vdev:       vio device struct
  61 * @valid:      Valid field
  62 * @msg:        Type field
  63 * @len:        Length field
  64 * @data:       Data field
  65 *
  66 * The ibmvtpm crq is defined as follows:
  67 *
  68 * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
  69 * -----------------------------------------------------------------------
  70 * Word0 | Valid | Type  |     Length    |              Data
  71 * -----------------------------------------------------------------------
  72 * Word1 |                Reserved
  73 * -----------------------------------------------------------------------
  74 *
  75 * Which matches the following structure (on bigendian host):
  76 *
  77 * struct ibmvtpm_crq {
  78 *         u8 valid;
  79 *         u8 msg;
  80 *         __be16 len;
  81 *         __be32 data;
  82 *         __be64 reserved;
  83 * } __attribute__((packed, aligned(8)));
  84 *
  85 * However, the value is passed in a register so just compute the numeric value
  86 * to load into the register avoiding byteswap altogether. Endian only affects
  87 * memory loads and stores - registers are internally represented the same.
  88 *
  89 * Return:
  90 *      0 (H_SUCCESS) - Success
  91 *      Non-zero - Failure
  92 */
  93static int ibmvtpm_send_crq(struct vio_dev *vdev,
  94                u8 valid, u8 msg, u16 len, u32 data)
  95{
  96        u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
  97                (u64)data;
  98        return ibmvtpm_send_crq_word(vdev, w1);
  99}
 100
 101/**
 102 * tpm_ibmvtpm_recv - Receive data after send
 103 *
 104 * @chip:       tpm chip struct
 105 * @buf:        buffer to read
 106 * @count:      size of buffer
 107 *
 108 * Return:
 109 *      Number of bytes read
 110 */
 111static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 112{
 113        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 114        u16 len;
 115        int sig;
 116
 117        if (!ibmvtpm->rtce_buf) {
 118                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
 119                return 0;
 120        }
 121
 122        sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
 123        if (sig)
 124                return -EINTR;
 125
 126        len = ibmvtpm->res_len;
 127
 128        if (count < len) {
 129                dev_err(ibmvtpm->dev,
 130                        "Invalid size in recv: count=%zd, crq_size=%d\n",
 131                        count, len);
 132                return -EIO;
 133        }
 134
 135        spin_lock(&ibmvtpm->rtce_lock);
 136        memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
 137        memset(ibmvtpm->rtce_buf, 0, len);
 138        ibmvtpm->res_len = 0;
 139        spin_unlock(&ibmvtpm->rtce_lock);
 140        return len;
 141}
 142
 143/**
 144 * ibmvtpm_crq_send_init - Send a CRQ initialize message
 145 * @ibmvtpm:    vtpm device struct
 146 *
 147 * Return:
 148 *      0 on success.
 149 *      Non-zero on failure.
 150 */
 151static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
 152{
 153        int rc;
 154
 155        rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
 156        if (rc != H_SUCCESS)
 157                dev_err(ibmvtpm->dev,
 158                        "%s failed rc=%d\n", __func__, rc);
 159
 160        return rc;
 161}
 162
 163/**
 164 * tpm_ibmvtpm_resume - Resume from suspend
 165 *
 166 * @dev:        device struct
 167 *
 168 * Return: Always 0.
 169 */
 170static int tpm_ibmvtpm_resume(struct device *dev)
 171{
 172        struct tpm_chip *chip = dev_get_drvdata(dev);
 173        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 174        int rc = 0;
 175
 176        do {
 177                if (rc)
 178                        msleep(100);
 179                rc = plpar_hcall_norets(H_ENABLE_CRQ,
 180                                        ibmvtpm->vdev->unit_address);
 181        } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
 182
 183        if (rc) {
 184                dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
 185                return rc;
 186        }
 187
 188        rc = vio_enable_interrupts(ibmvtpm->vdev);
 189        if (rc) {
 190                dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
 191                return rc;
 192        }
 193
 194        rc = ibmvtpm_crq_send_init(ibmvtpm);
 195        if (rc)
 196                dev_err(dev, "Error send_init rc=%d\n", rc);
 197
 198        return rc;
 199}
 200
 201/**
 202 * tpm_ibmvtpm_send() - Send a TPM command
 203 * @chip:       tpm chip struct
 204 * @buf:        buffer contains data to send
 205 * @count:      size of buffer
 206 *
 207 * Return:
 208 *   0 on success,
 209 *   -errno on error
 210 */
 211static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
 212{
 213        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 214        bool retry = true;
 215        int rc, sig;
 216
 217        if (!ibmvtpm->rtce_buf) {
 218                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
 219                return 0;
 220        }
 221
 222        if (count > ibmvtpm->rtce_size) {
 223                dev_err(ibmvtpm->dev,
 224                        "Invalid size in send: count=%zd, rtce_size=%d\n",
 225                        count, ibmvtpm->rtce_size);
 226                return -EIO;
 227        }
 228
 229        if (ibmvtpm->tpm_processing_cmd) {
 230                dev_info(ibmvtpm->dev,
 231                         "Need to wait for TPM to finish\n");
 232                /* wait for previous command to finish */
 233                sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
 234                if (sig)
 235                        return -EINTR;
 236        }
 237
 238        spin_lock(&ibmvtpm->rtce_lock);
 239        ibmvtpm->res_len = 0;
 240        memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
 241
 242        /*
 243         * set the processing flag before the Hcall, since we may get the
 244         * result (interrupt) before even being able to check rc.
 245         */
 246        ibmvtpm->tpm_processing_cmd = true;
 247
 248again:
 249        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 250                        IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
 251                        count, ibmvtpm->rtce_dma_handle);
 252        if (rc != H_SUCCESS) {
 253                /*
 254                 * H_CLOSED can be returned after LPM resume.  Call
 255                 * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
 256                 * ibmvtpm_send_crq() once before failing.
 257                 */
 258                if (rc == H_CLOSED && retry) {
 259                        tpm_ibmvtpm_resume(ibmvtpm->dev);
 260                        retry = false;
 261                        goto again;
 262                }
 263                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
 264                ibmvtpm->tpm_processing_cmd = false;
 265        }
 266
 267        spin_unlock(&ibmvtpm->rtce_lock);
 268        return 0;
 269}
 270
 271static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
 272{
 273        return;
 274}
 275
 276static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
 277{
 278        return 0;
 279}
 280
 281/**
 282 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
 283 *
 284 * @ibmvtpm:    vtpm device struct
 285 *
 286 * Return:
 287 *      0 on success.
 288 *      Non-zero on failure.
 289 */
 290static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
 291{
 292        int rc;
 293
 294        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 295                        IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
 296        if (rc != H_SUCCESS)
 297                dev_err(ibmvtpm->dev,
 298                        "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
 299
 300        return rc;
 301}
 302
 303/**
 304 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
 305 *                         - Note that this is vtpm version and not tpm version
 306 *
 307 * @ibmvtpm:    vtpm device struct
 308 *
 309 * Return:
 310 *      0 on success.
 311 *      Non-zero on failure.
 312 */
 313static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
 314{
 315        int rc;
 316
 317        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 318                        IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
 319        if (rc != H_SUCCESS)
 320                dev_err(ibmvtpm->dev,
 321                        "ibmvtpm_crq_get_version failed rc=%d\n", rc);
 322
 323        return rc;
 324}
 325
 326/**
 327 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
 328 * @ibmvtpm:    vtpm device struct
 329 *
 330 * Return:
 331 *      0 on success.
 332 *      Non-zero on failure.
 333 */
 334static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
 335{
 336        int rc;
 337
 338        rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
 339        if (rc != H_SUCCESS)
 340                dev_err(ibmvtpm->dev,
 341                        "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
 342
 343        return rc;
 344}
 345
 346/**
 347 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
 348 * @vdev:       vio device struct
 349 *
 350 * Return: Always 0.
 351 */
 352static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
 353{
 354        struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
 355        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 356        int rc = 0;
 357
 358        tpm_chip_unregister(chip);
 359
 360        free_irq(vdev->irq, ibmvtpm);
 361
 362        do {
 363                if (rc)
 364                        msleep(100);
 365                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 366        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 367
 368        dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
 369                         CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
 370        free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
 371
 372        if (ibmvtpm->rtce_buf) {
 373                dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
 374                                 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
 375                kfree(ibmvtpm->rtce_buf);
 376        }
 377
 378        kfree(ibmvtpm);
 379        /* For tpm_ibmvtpm_get_desired_dma */
 380        dev_set_drvdata(&vdev->dev, NULL);
 381
 382        return 0;
 383}
 384
 385/**
 386 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
 387 * @vdev:       vio device struct
 388 *
 389 * Return:
 390 *      Number of bytes the driver needs to DMA map.
 391 */
 392static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
 393{
 394        struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
 395        struct ibmvtpm_dev *ibmvtpm;
 396
 397        /*
 398         * ibmvtpm initializes at probe time, so the data we are
 399         * asking for may not be set yet. Estimate that 4K required
 400         * for TCE-mapped buffer in addition to CRQ.
 401         */
 402        if (chip)
 403                ibmvtpm = dev_get_drvdata(&chip->dev);
 404        else
 405                return CRQ_RES_BUF_SIZE + PAGE_SIZE;
 406
 407        return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
 408}
 409
 410/**
 411 * tpm_ibmvtpm_suspend - Suspend
 412 * @dev:        device struct
 413 *
 414 * Return: Always 0.
 415 */
 416static int tpm_ibmvtpm_suspend(struct device *dev)
 417{
 418        struct tpm_chip *chip = dev_get_drvdata(dev);
 419        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
 420        int rc = 0;
 421
 422        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
 423                        IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
 424        if (rc != H_SUCCESS)
 425                dev_err(ibmvtpm->dev,
 426                        "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
 427
 428        return rc;
 429}
 430
 431/**
 432 * ibmvtpm_reset_crq - Reset CRQ
 433 *
 434 * @ibmvtpm:    ibm vtpm struct
 435 *
 436 * Return:
 437 *      0 on success.
 438 *      Non-zero on failure.
 439 */
 440static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
 441{
 442        int rc = 0;
 443
 444        do {
 445                if (rc)
 446                        msleep(100);
 447                rc = plpar_hcall_norets(H_FREE_CRQ,
 448                                        ibmvtpm->vdev->unit_address);
 449        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 450
 451        memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
 452        ibmvtpm->crq_queue.index = 0;
 453
 454        return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
 455                                  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
 456}
 457
 458static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
 459{
 460        return (status == 0);
 461}
 462
 463static const struct tpm_class_ops tpm_ibmvtpm = {
 464        .recv = tpm_ibmvtpm_recv,
 465        .send = tpm_ibmvtpm_send,
 466        .cancel = tpm_ibmvtpm_cancel,
 467        .status = tpm_ibmvtpm_status,
 468        .req_complete_mask = 0,
 469        .req_complete_val = 0,
 470        .req_canceled = tpm_ibmvtpm_req_canceled,
 471};
 472
 473static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
 474        .suspend = tpm_ibmvtpm_suspend,
 475        .resume = tpm_ibmvtpm_resume,
 476};
 477
 478/**
 479 * ibmvtpm_crq_get_next - Get next responded crq
 480 *
 481 * @ibmvtpm:    vtpm device struct
 482 *
 483 * Return: vtpm crq pointer or NULL.
 484 */
 485static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
 486{
 487        struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
 488        struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
 489
 490        if (crq->valid & VTPM_MSG_RES) {
 491                if (++crq_q->index == crq_q->num_entry)
 492                        crq_q->index = 0;
 493                smp_rmb();
 494        } else
 495                crq = NULL;
 496        return crq;
 497}
 498
 499/**
 500 * ibmvtpm_crq_process - Process responded crq
 501 *
 502 * @crq:        crq to be processed
 503 * @ibmvtpm:    vtpm device struct
 504 *
 505 */
 506static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
 507                                struct ibmvtpm_dev *ibmvtpm)
 508{
 509        int rc = 0;
 510
 511        switch (crq->valid) {
 512        case VALID_INIT_CRQ:
 513                switch (crq->msg) {
 514                case INIT_CRQ_RES:
 515                        dev_info(ibmvtpm->dev, "CRQ initialized\n");
 516                        rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
 517                        if (rc)
 518                                dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
 519                        return;
 520                case INIT_CRQ_COMP_RES:
 521                        dev_info(ibmvtpm->dev,
 522                                 "CRQ initialization completed\n");
 523                        return;
 524                default:
 525                        dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
 526                        return;
 527                }
 528        case IBMVTPM_VALID_CMD:
 529                switch (crq->msg) {
 530                case VTPM_GET_RTCE_BUFFER_SIZE_RES:
 531                        if (be16_to_cpu(crq->len) <= 0) {
 532                                dev_err(ibmvtpm->dev, "Invalid rtce size\n");
 533                                return;
 534                        }
 535                        ibmvtpm->rtce_size = be16_to_cpu(crq->len);
 536                        ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
 537                                                    GFP_ATOMIC);
 538                        if (!ibmvtpm->rtce_buf) {
 539                                dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
 540                                return;
 541                        }
 542
 543                        ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
 544                                ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
 545                                DMA_BIDIRECTIONAL);
 546
 547                        if (dma_mapping_error(ibmvtpm->dev,
 548                                              ibmvtpm->rtce_dma_handle)) {
 549                                kfree(ibmvtpm->rtce_buf);
 550                                ibmvtpm->rtce_buf = NULL;
 551                                dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
 552                        }
 553
 554                        return;
 555                case VTPM_GET_VERSION_RES:
 556                        ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
 557                        return;
 558                case VTPM_TPM_COMMAND_RES:
 559                        /* len of the data in rtce buffer */
 560                        ibmvtpm->res_len = be16_to_cpu(crq->len);
 561                        ibmvtpm->tpm_processing_cmd = false;
 562                        wake_up_interruptible(&ibmvtpm->wq);
 563                        return;
 564                default:
 565                        return;
 566                }
 567        }
 568        return;
 569}
 570
 571/**
 572 * ibmvtpm_interrupt -  Interrupt handler
 573 *
 574 * @irq:                irq number to handle
 575 * @vtpm_instance:      vtpm that received interrupt
 576 *
 577 * Returns:
 578 *      IRQ_HANDLED
 579 **/
 580static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
 581{
 582        struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
 583        struct ibmvtpm_crq *crq;
 584
 585        /* while loop is needed for initial setup (get version and
 586         * get rtce_size). There should be only one tpm request at any
 587         * given time.
 588         */
 589        while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
 590                ibmvtpm_crq_process(crq, ibmvtpm);
 591                wake_up_interruptible(&ibmvtpm->crq_queue.wq);
 592                crq->valid = 0;
 593                smp_wmb();
 594        }
 595
 596        return IRQ_HANDLED;
 597}
 598
 599/**
 600 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
 601 *
 602 * @vio_dev:    vio device struct
 603 * @id:         vio device id struct
 604 *
 605 * Return:
 606 *      0 on success.
 607 *      Non-zero on failure.
 608 */
 609static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
 610                                   const struct vio_device_id *id)
 611{
 612        struct ibmvtpm_dev *ibmvtpm;
 613        struct device *dev = &vio_dev->dev;
 614        struct ibmvtpm_crq_queue *crq_q;
 615        struct tpm_chip *chip;
 616        int rc = -ENOMEM, rc1;
 617
 618        chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
 619        if (IS_ERR(chip))
 620                return PTR_ERR(chip);
 621
 622        ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
 623        if (!ibmvtpm) {
 624                dev_err(dev, "kzalloc for ibmvtpm failed\n");
 625                goto cleanup;
 626        }
 627
 628        ibmvtpm->dev = dev;
 629        ibmvtpm->vdev = vio_dev;
 630
 631        crq_q = &ibmvtpm->crq_queue;
 632        crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
 633        if (!crq_q->crq_addr) {
 634                dev_err(dev, "Unable to allocate memory for crq_addr\n");
 635                goto cleanup;
 636        }
 637
 638        crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
 639        init_waitqueue_head(&crq_q->wq);
 640        ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
 641                                                 CRQ_RES_BUF_SIZE,
 642                                                 DMA_BIDIRECTIONAL);
 643
 644        if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
 645                dev_err(dev, "dma mapping failed\n");
 646                goto cleanup;
 647        }
 648
 649        rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
 650                                ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
 651        if (rc == H_RESOURCE)
 652                rc = ibmvtpm_reset_crq(ibmvtpm);
 653
 654        if (rc) {
 655                dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
 656                goto reg_crq_cleanup;
 657        }
 658
 659        rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
 660                         tpm_ibmvtpm_driver_name, ibmvtpm);
 661        if (rc) {
 662                dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
 663                goto init_irq_cleanup;
 664        }
 665
 666        rc = vio_enable_interrupts(vio_dev);
 667        if (rc) {
 668                dev_err(dev, "Error %d enabling interrupts\n", rc);
 669                goto init_irq_cleanup;
 670        }
 671
 672        init_waitqueue_head(&ibmvtpm->wq);
 673
 674        crq_q->index = 0;
 675
 676        dev_set_drvdata(&chip->dev, ibmvtpm);
 677
 678        spin_lock_init(&ibmvtpm->rtce_lock);
 679
 680        rc = ibmvtpm_crq_send_init(ibmvtpm);
 681        if (rc)
 682                goto init_irq_cleanup;
 683
 684        rc = ibmvtpm_crq_get_version(ibmvtpm);
 685        if (rc)
 686                goto init_irq_cleanup;
 687
 688        rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
 689        if (rc)
 690                goto init_irq_cleanup;
 691
 692        if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
 693                                ibmvtpm->rtce_buf != NULL,
 694                                HZ)) {
 695                dev_err(dev, "CRQ response timed out\n");
 696                goto init_irq_cleanup;
 697        }
 698
 699        if (!strcmp(id->compat, "IBM,vtpm20")) {
 700                chip->flags |= TPM_CHIP_FLAG_TPM2;
 701                rc = tpm2_get_cc_attrs_tbl(chip);
 702                if (rc)
 703                        goto init_irq_cleanup;
 704        }
 705
 706        return tpm_chip_register(chip);
 707init_irq_cleanup:
 708        do {
 709                rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
 710        } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
 711reg_crq_cleanup:
 712        dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
 713                         DMA_BIDIRECTIONAL);
 714cleanup:
 715        if (ibmvtpm) {
 716                if (crq_q->crq_addr)
 717                        free_page((unsigned long)crq_q->crq_addr);
 718                kfree(ibmvtpm);
 719        }
 720
 721        return rc;
 722}
 723
 724static struct vio_driver ibmvtpm_driver = {
 725        .id_table        = tpm_ibmvtpm_device_table,
 726        .probe           = tpm_ibmvtpm_probe,
 727        .remove          = tpm_ibmvtpm_remove,
 728        .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
 729        .name            = tpm_ibmvtpm_driver_name,
 730        .pm              = &tpm_ibmvtpm_pm_ops,
 731};
 732
 733/**
 734 * ibmvtpm_module_init - Initialize ibm vtpm module.
 735 *
 736 *
 737 * Return:
 738 *      0 on success.
 739 *      Non-zero on failure.
 740 */
 741static int __init ibmvtpm_module_init(void)
 742{
 743        return vio_register_driver(&ibmvtpm_driver);
 744}
 745
 746/**
 747 * ibmvtpm_module_exit - Tear down ibm vtpm module.
 748 */
 749static void __exit ibmvtpm_module_exit(void)
 750{
 751        vio_unregister_driver(&ibmvtpm_driver);
 752}
 753
 754module_init(ibmvtpm_module_init);
 755module_exit(ibmvtpm_module_exit);
 756
 757MODULE_AUTHOR("adlai@us.ibm.com");
 758MODULE_DESCRIPTION("IBM vTPM Driver");
 759MODULE_VERSION("1.0");
 760MODULE_LICENSE("GPL");
 761