linux/drivers/crypto/cavium/cpt/cptpf_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Cavium, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License
   6 * as published by the Free Software Foundation.
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/firmware.h>
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/moduleparam.h>
  14#include <linux/pci.h>
  15#include <linux/printk.h>
  16#include <linux/version.h>
  17
  18#include "cptpf.h"
  19
  20#define DRV_NAME        "thunder-cpt"
  21#define DRV_VERSION     "1.0"
  22
  23static u32 num_vfs = 4; /* Default 4 VF enabled */
  24module_param(num_vfs, uint, 0444);
  25MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
  26
  27/*
  28 * Disable cores specified by coremask
  29 */
  30static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
  31                              u8 type, u8 grp)
  32{
  33        u64 pf_exe_ctl;
  34        u32 timeout = 100;
  35        u64 grpmask = 0;
  36        struct device *dev = &cpt->pdev->dev;
  37
  38        if (type == AE_TYPES)
  39                coremask = (coremask << cpt->max_se_cores);
  40
  41        /* Disengage the cores from groups */
  42        grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
  43        cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
  44                        (grpmask & ~coremask));
  45        udelay(CSR_DELAY);
  46        grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
  47        while (grp & coremask) {
  48                dev_err(dev, "Cores still busy %llx", coremask);
  49                grp = cpt_read_csr64(cpt->reg_base,
  50                                     CPTX_PF_EXEC_BUSY(0));
  51                if (timeout--)
  52                        break;
  53
  54                udelay(CSR_DELAY);
  55        }
  56
  57        /* Disable the cores */
  58        pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
  59        cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
  60                        (pf_exe_ctl & ~coremask));
  61        udelay(CSR_DELAY);
  62}
  63
  64/*
  65 * Enable cores specified by coremask
  66 */
  67static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
  68                             u8 type)
  69{
  70        u64 pf_exe_ctl;
  71
  72        if (type == AE_TYPES)
  73                coremask = (coremask << cpt->max_se_cores);
  74
  75        pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
  76        cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
  77                        (pf_exe_ctl | coremask));
  78        udelay(CSR_DELAY);
  79}
  80
  81static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
  82                                u64 coremask, u8 type)
  83{
  84        u64 pf_gx_en = 0;
  85
  86        if (type == AE_TYPES)
  87                coremask = (coremask << cpt->max_se_cores);
  88
  89        pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
  90        cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
  91                        (pf_gx_en | coremask));
  92        udelay(CSR_DELAY);
  93}
  94
  95static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
  96{
  97        /* Clear mbox(0) interupts for all vfs */
  98        cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
  99}
 100
 101static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
 102{
 103        /* Clear ecc(0) interupts for all vfs */
 104        cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
 105}
 106
 107static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
 108{
 109        /* Clear exec interupts for all vfs */
 110        cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
 111}
 112
 113static void cpt_disable_all_interrupts(struct cpt_device *cpt)
 114{
 115        cpt_disable_mbox_interrupts(cpt);
 116        cpt_disable_ecc_interrupts(cpt);
 117        cpt_disable_exec_interrupts(cpt);
 118}
 119
 120static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
 121{
 122        /* Set mbox(0) interupts for all vfs */
 123        cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
 124}
 125
 126static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
 127{
 128        int ret = 0, core = 0, shift = 0;
 129        u32 total_cores = 0;
 130        struct device *dev = &cpt->pdev->dev;
 131
 132        if (!mcode || !mcode->code) {
 133                dev_err(dev, "Either the mcode is null or data is NULL\n");
 134                return -EINVAL;
 135        }
 136
 137        if (mcode->code_size == 0) {
 138                dev_err(dev, "microcode size is 0\n");
 139                return -EINVAL;
 140        }
 141
 142        /* Assumes 0-9 are SE cores for UCODE_BASE registers and
 143         * AE core bases follow
 144         */
 145        if (mcode->is_ae) {
 146                core = CPT_MAX_SE_CORES; /* start couting from 10 */
 147                total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */
 148        } else {
 149                core = 0; /* start couting from 0 */
 150                total_cores = CPT_MAX_SE_CORES; /* upto 9 */
 151        }
 152
 153        /* Point to microcode for each core of the group */
 154        for (; core < total_cores ; core++, shift++) {
 155                if (mcode->core_mask & (1 << shift)) {
 156                        cpt_write_csr64(cpt->reg_base,
 157                                        CPTX_PF_ENGX_UCODE_BASE(0, core),
 158                                        (u64)mcode->phys_base);
 159                }
 160        }
 161        return ret;
 162}
 163
 164static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
 165{
 166        int ret = 0;
 167        struct device *dev = &cpt->pdev->dev;
 168
 169        /* Make device not ready */
 170        cpt->flags &= ~CPT_FLAG_DEVICE_READY;
 171        /* Disable All PF interrupts */
 172        cpt_disable_all_interrupts(cpt);
 173        /* Calculate mcode group and coremasks */
 174        if (mcode->is_ae) {
 175                if (mcode->num_cores > cpt->max_ae_cores) {
 176                        dev_err(dev, "Requested for more cores than available AE cores\n");
 177                        ret = -EINVAL;
 178                        goto cpt_init_fail;
 179                }
 180
 181                if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
 182                        dev_err(dev, "Can't load, all eight microcode groups in use");
 183                        return -ENFILE;
 184                }
 185
 186                mcode->group = cpt->next_group;
 187                /* Convert requested cores to mask */
 188                mcode->core_mask = GENMASK(mcode->num_cores, 0);
 189                cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
 190                                  mcode->group);
 191                /* Load microcode for AE engines */
 192                ret = cpt_load_microcode(cpt, mcode);
 193                if (ret) {
 194                        dev_err(dev, "Microcode load Failed for %s\n",
 195                                mcode->version);
 196                        goto cpt_init_fail;
 197                }
 198                cpt->next_group++;
 199                /* Configure group mask for the mcode */
 200                cpt_configure_group(cpt, mcode->group, mcode->core_mask,
 201                                    AE_TYPES);
 202                /* Enable AE cores for the group mask */
 203                cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
 204        } else {
 205                if (mcode->num_cores > cpt->max_se_cores) {
 206                        dev_err(dev, "Requested for more cores than available SE cores\n");
 207                        ret = -EINVAL;
 208                        goto cpt_init_fail;
 209                }
 210                if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
 211                        dev_err(dev, "Can't load, all eight microcode groups in use");
 212                        return -ENFILE;
 213                }
 214
 215                mcode->group = cpt->next_group;
 216                /* Covert requested cores to mask */
 217                mcode->core_mask = GENMASK(mcode->num_cores, 0);
 218                cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
 219                                  mcode->group);
 220                /* Load microcode for SE engines */
 221                ret = cpt_load_microcode(cpt, mcode);
 222                if (ret) {
 223                        dev_err(dev, "Microcode load Failed for %s\n",
 224                                mcode->version);
 225                        goto cpt_init_fail;
 226                }
 227                cpt->next_group++;
 228                /* Configure group mask for the mcode */
 229                cpt_configure_group(cpt, mcode->group, mcode->core_mask,
 230                                    SE_TYPES);
 231                /* Enable SE cores for the group mask */
 232                cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
 233        }
 234
 235        /* Enabled PF mailbox interrupts */
 236        cpt_enable_mbox_interrupts(cpt);
 237        cpt->flags |= CPT_FLAG_DEVICE_READY;
 238
 239        return ret;
 240
 241cpt_init_fail:
 242        /* Enabled PF mailbox interrupts */
 243        cpt_enable_mbox_interrupts(cpt);
 244
 245        return ret;
 246}
 247
 248struct ucode_header {
 249        u8 version[CPT_UCODE_VERSION_SZ];
 250        u32 code_length;
 251        u32 data_length;
 252        u64 sram_address;
 253};
 254
 255static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
 256{
 257        const struct firmware *fw_entry;
 258        struct device *dev = &cpt->pdev->dev;
 259        struct ucode_header *ucode;
 260        struct microcode *mcode;
 261        int j, ret = 0;
 262
 263        ret = request_firmware(&fw_entry, fw, dev);
 264        if (ret)
 265                return ret;
 266
 267        ucode = (struct ucode_header *)fw_entry->data;
 268        mcode = &cpt->mcode[cpt->next_mc_idx];
 269        memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
 270        mcode->code_size = ntohl(ucode->code_length) * 2;
 271        if (!mcode->code_size) {
 272                ret = -EINVAL;
 273                goto fw_release;
 274        }
 275
 276        mcode->is_ae = is_ae;
 277        mcode->core_mask = 0ULL;
 278        mcode->num_cores = is_ae ? 6 : 10;
 279
 280        /*  Allocate DMAable space */
 281        mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
 282                                          &mcode->phys_base, GFP_KERNEL);
 283        if (!mcode->code) {
 284                dev_err(dev, "Unable to allocate space for microcode");
 285                ret = -ENOMEM;
 286                goto fw_release;
 287        }
 288
 289        memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
 290               mcode->code_size);
 291
 292        /* Byte swap 64-bit */
 293        for (j = 0; j < (mcode->code_size / 8); j++)
 294                ((u64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]);
 295        /*  MC needs 16-bit swap */
 296        for (j = 0; j < (mcode->code_size / 2); j++)
 297                ((u16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]);
 298
 299        dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size);
 300        dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae);
 301        dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores);
 302        dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code);
 303        dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base);
 304
 305        ret = do_cpt_init(cpt, mcode);
 306        if (ret) {
 307                dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
 308                goto fw_release;
 309        }
 310
 311        dev_info(dev, "Microcode Loaded %s\n", mcode->version);
 312        mcode->is_mc_valid = 1;
 313        cpt->next_mc_idx++;
 314
 315fw_release:
 316        release_firmware(fw_entry);
 317
 318        return ret;
 319}
 320
 321static int cpt_ucode_load(struct cpt_device *cpt)
 322{
 323        int ret = 0;
 324        struct device *dev = &cpt->pdev->dev;
 325
 326        ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
 327        if (ret) {
 328                dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret);
 329                return ret;
 330        }
 331        ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
 332        if (ret) {
 333                dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret);
 334                return ret;
 335        }
 336
 337        return ret;
 338}
 339
 340static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq)
 341{
 342        struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
 343
 344        cpt_mbox_intr_handler(cpt, 0);
 345
 346        return IRQ_HANDLED;
 347}
 348
 349static void cpt_reset(struct cpt_device *cpt)
 350{
 351        cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
 352}
 353
 354static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
 355{
 356        union cptx_pf_constants pf_cnsts = {0};
 357
 358        pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
 359        cpt->max_se_cores = pf_cnsts.s.se;
 360        cpt->max_ae_cores = pf_cnsts.s.ae;
 361}
 362
 363static u32 cpt_check_bist_status(struct cpt_device *cpt)
 364{
 365        union cptx_pf_bist_status bist_sts = {0};
 366
 367        bist_sts.u = cpt_read_csr64(cpt->reg_base,
 368                                    CPTX_PF_BIST_STATUS(0));
 369
 370        return bist_sts.u;
 371}
 372
 373static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
 374{
 375        union cptx_pf_exe_bist_status bist_sts = {0};
 376
 377        bist_sts.u = cpt_read_csr64(cpt->reg_base,
 378                                    CPTX_PF_EXE_BIST_STATUS(0));
 379
 380        return bist_sts.u;
 381}
 382
 383static void cpt_disable_all_cores(struct cpt_device *cpt)
 384{
 385        u32 grp, timeout = 100;
 386        struct device *dev = &cpt->pdev->dev;
 387
 388        /* Disengage the cores from groups */
 389        for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
 390                cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
 391                udelay(CSR_DELAY);
 392        }
 393
 394        grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
 395        while (grp) {
 396                dev_err(dev, "Cores still busy");
 397                grp = cpt_read_csr64(cpt->reg_base,
 398                                     CPTX_PF_EXEC_BUSY(0));
 399                if (timeout--)
 400                        break;
 401
 402                udelay(CSR_DELAY);
 403        }
 404        /* Disable the cores */
 405        cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
 406}
 407
 408/**
 409 * Ensure all cores are disengaged from all groups by
 410 * calling cpt_disable_all_cores() before calling this
 411 * function.
 412 */
 413static void cpt_unload_microcode(struct cpt_device *cpt)
 414{
 415        u32 grp = 0, core;
 416
 417        /* Free microcode bases and reset group masks */
 418        for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
 419                struct microcode *mcode = &cpt->mcode[grp];
 420
 421                if (cpt->mcode[grp].code)
 422                        dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
 423                                          mcode->code, mcode->phys_base);
 424                mcode->code = NULL;
 425        }
 426        /* Clear UCODE_BASE registers for all engines */
 427        for (core = 0; core < CPT_MAX_TOTAL_CORES; core++)
 428                cpt_write_csr64(cpt->reg_base,
 429                                CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull);
 430}
 431
 432static int cpt_device_init(struct cpt_device *cpt)
 433{
 434        u64 bist;
 435        struct device *dev = &cpt->pdev->dev;
 436
 437        /* Reset the PF when probed first */
 438        cpt_reset(cpt);
 439        mdelay(100);
 440
 441        /*Check BIST status*/
 442        bist = (u64)cpt_check_bist_status(cpt);
 443        if (bist) {
 444                dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
 445                return -ENODEV;
 446        }
 447
 448        bist = cpt_check_exe_bist_status(cpt);
 449        if (bist) {
 450                dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
 451                return -ENODEV;
 452        }
 453
 454        /*Get CLK frequency*/
 455        /*Get max enabled cores */
 456        cpt_find_max_enabled_cores(cpt);
 457        /*Disable all cores*/
 458        cpt_disable_all_cores(cpt);
 459        /*Reset device parameters*/
 460        cpt->next_mc_idx   = 0;
 461        cpt->next_group = 0;
 462        /* PF is ready */
 463        cpt->flags |= CPT_FLAG_DEVICE_READY;
 464
 465        return 0;
 466}
 467
 468static int cpt_register_interrupts(struct cpt_device *cpt)
 469{
 470        int ret;
 471        struct device *dev = &cpt->pdev->dev;
 472
 473        /* Enable MSI-X */
 474        ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
 475                        CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX);
 476        if (ret < 0) {
 477                dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
 478                        CPT_PF_MSIX_VECTORS);
 479                return ret;
 480        }
 481
 482        /* Register mailbox interrupt handlers */
 483        ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
 484                          cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
 485        if (ret)
 486                goto fail;
 487
 488        /* Enable mailbox interrupt */
 489        cpt_enable_mbox_interrupts(cpt);
 490        return 0;
 491
 492fail:
 493        dev_err(dev, "Request irq failed\n");
 494        pci_disable_msix(cpt->pdev);
 495        return ret;
 496}
 497
 498static void cpt_unregister_interrupts(struct cpt_device *cpt)
 499{
 500        free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
 501        pci_disable_msix(cpt->pdev);
 502}
 503
 504static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
 505{
 506        int pos = 0;
 507        int err;
 508        u16 total_vf_cnt;
 509        struct pci_dev *pdev = cpt->pdev;
 510
 511        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
 512        if (!pos) {
 513                dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
 514                return -ENODEV;
 515        }
 516
 517        cpt->num_vf_en = num_vfs; /* User requested VFs */
 518        pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
 519        if (total_vf_cnt < cpt->num_vf_en)
 520                cpt->num_vf_en = total_vf_cnt;
 521
 522        if (!total_vf_cnt)
 523                return 0;
 524
 525        /*Enabled the available VFs */
 526        err = pci_enable_sriov(pdev, cpt->num_vf_en);
 527        if (err) {
 528                dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
 529                        cpt->num_vf_en);
 530                cpt->num_vf_en = 0;
 531                return err;
 532        }
 533
 534        /* TODO: Optionally enable static VQ priorities feature */
 535
 536        dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
 537                 cpt->num_vf_en);
 538
 539        cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
 540
 541        return 0;
 542}
 543
 544static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 545{
 546        struct device *dev = &pdev->dev;
 547        struct cpt_device *cpt;
 548        int err;
 549
 550        if (num_vfs > 16 || num_vfs < 4) {
 551                dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n",
 552                         num_vfs);
 553                num_vfs = 4;
 554        }
 555
 556        cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
 557        if (!cpt)
 558                return -ENOMEM;
 559
 560        pci_set_drvdata(pdev, cpt);
 561        cpt->pdev = pdev;
 562        err = pci_enable_device(pdev);
 563        if (err) {
 564                dev_err(dev, "Failed to enable PCI device\n");
 565                pci_set_drvdata(pdev, NULL);
 566                return err;
 567        }
 568
 569        err = pci_request_regions(pdev, DRV_NAME);
 570        if (err) {
 571                dev_err(dev, "PCI request regions failed 0x%x\n", err);
 572                goto cpt_err_disable_device;
 573        }
 574
 575        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
 576        if (err) {
 577                dev_err(dev, "Unable to get usable DMA configuration\n");
 578                goto cpt_err_release_regions;
 579        }
 580
 581        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
 582        if (err) {
 583                dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
 584                goto cpt_err_release_regions;
 585        }
 586
 587        /* MAP PF's configuration registers */
 588        cpt->reg_base = pcim_iomap(pdev, 0, 0);
 589        if (!cpt->reg_base) {
 590                dev_err(dev, "Cannot map config register space, aborting\n");
 591                err = -ENOMEM;
 592                goto cpt_err_release_regions;
 593        }
 594
 595        /* CPT device HW initialization */
 596        cpt_device_init(cpt);
 597
 598        /* Register interrupts */
 599        err = cpt_register_interrupts(cpt);
 600        if (err)
 601                goto cpt_err_release_regions;
 602
 603        err = cpt_ucode_load(cpt);
 604        if (err)
 605                goto cpt_err_unregister_interrupts;
 606
 607        /* Configure SRIOV */
 608        err = cpt_sriov_init(cpt, num_vfs);
 609        if (err)
 610                goto cpt_err_unregister_interrupts;
 611
 612        return 0;
 613
 614cpt_err_unregister_interrupts:
 615        cpt_unregister_interrupts(cpt);
 616cpt_err_release_regions:
 617        pci_release_regions(pdev);
 618cpt_err_disable_device:
 619        pci_disable_device(pdev);
 620        pci_set_drvdata(pdev, NULL);
 621        return err;
 622}
 623
 624static void cpt_remove(struct pci_dev *pdev)
 625{
 626        struct cpt_device *cpt = pci_get_drvdata(pdev);
 627
 628        /* Disengage SE and AE cores from all groups*/
 629        cpt_disable_all_cores(cpt);
 630        /* Unload microcodes */
 631        cpt_unload_microcode(cpt);
 632        cpt_unregister_interrupts(cpt);
 633        pci_disable_sriov(pdev);
 634        pci_release_regions(pdev);
 635        pci_disable_device(pdev);
 636        pci_set_drvdata(pdev, NULL);
 637}
 638
 639static void cpt_shutdown(struct pci_dev *pdev)
 640{
 641        struct cpt_device *cpt = pci_get_drvdata(pdev);
 642
 643        if (!cpt)
 644                return;
 645
 646        dev_info(&pdev->dev, "Shutdown device %x:%x.\n",
 647                 (u32)pdev->vendor, (u32)pdev->device);
 648
 649        cpt_unregister_interrupts(cpt);
 650        pci_release_regions(pdev);
 651        pci_disable_device(pdev);
 652        pci_set_drvdata(pdev, NULL);
 653}
 654
 655/* Supported devices */
 656static const struct pci_device_id cpt_id_table[] = {
 657        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) },
 658        { 0, }  /* end of table */
 659};
 660
 661static struct pci_driver cpt_pci_driver = {
 662        .name = DRV_NAME,
 663        .id_table = cpt_id_table,
 664        .probe = cpt_probe,
 665        .remove = cpt_remove,
 666        .shutdown = cpt_shutdown,
 667};
 668
 669module_pci_driver(cpt_pci_driver);
 670
 671MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
 672MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver");
 673MODULE_LICENSE("GPL v2");
 674MODULE_VERSION(DRV_VERSION);
 675MODULE_DEVICE_TABLE(pci, cpt_id_table);
 676