linux/drivers/crypto/cavium/cpt/cptvf_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Cavium, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License
   6 * as published by the Free Software Foundation.
   7 */
   8
   9#include <linux/interrupt.h>
  10#include <linux/module.h>
  11
  12#include "cptvf.h"
  13
  14#define DRV_NAME        "thunder-cptvf"
  15#define DRV_VERSION     "1.0"
  16
  17struct cptvf_wqe {
  18        struct tasklet_struct twork;
  19        void *cptvf;
  20        u32 qno;
  21};
  22
  23struct cptvf_wqe_info {
  24        struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
  25};
  26
  27static void vq_work_handler(unsigned long data)
  28{
  29        struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
  30        struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
  31
  32        vq_post_process(cwqe->cptvf, cwqe->qno);
  33}
  34
  35static int init_worker_threads(struct cpt_vf *cptvf)
  36{
  37        struct pci_dev *pdev = cptvf->pdev;
  38        struct cptvf_wqe_info *cwqe_info;
  39        int i;
  40
  41        cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
  42        if (!cwqe_info)
  43                return -ENOMEM;
  44
  45        if (cptvf->nr_queues) {
  46                dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
  47                         cptvf->nr_queues);
  48        }
  49
  50        for (i = 0; i < cptvf->nr_queues; i++) {
  51                tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
  52                             (u64)cwqe_info);
  53                cwqe_info->vq_wqe[i].qno = i;
  54                cwqe_info->vq_wqe[i].cptvf = cptvf;
  55        }
  56
  57        cptvf->wqe_info = cwqe_info;
  58
  59        return 0;
  60}
  61
  62static void cleanup_worker_threads(struct cpt_vf *cptvf)
  63{
  64        struct cptvf_wqe_info *cwqe_info;
  65        struct pci_dev *pdev = cptvf->pdev;
  66        int i;
  67
  68        cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
  69        if (!cwqe_info)
  70                return;
  71
  72        if (cptvf->nr_queues) {
  73                dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
  74                         cptvf->nr_queues);
  75        }
  76
  77        for (i = 0; i < cptvf->nr_queues; i++)
  78                tasklet_kill(&cwqe_info->vq_wqe[i].twork);
  79
  80        kzfree(cwqe_info);
  81        cptvf->wqe_info = NULL;
  82}
  83
  84static void free_pending_queues(struct pending_qinfo *pqinfo)
  85{
  86        int i;
  87        struct pending_queue *queue;
  88
  89        for_each_pending_queue(pqinfo, queue, i) {
  90                if (!queue->head)
  91                        continue;
  92
  93                /* free single queue */
  94                kzfree((queue->head));
  95
  96                queue->front = 0;
  97                queue->rear = 0;
  98
  99                return;
 100        }
 101
 102        pqinfo->qlen = 0;
 103        pqinfo->nr_queues = 0;
 104}
 105
 106static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
 107                                u32 nr_queues)
 108{
 109        u32 i;
 110        size_t size;
 111        int ret;
 112        struct pending_queue *queue = NULL;
 113
 114        pqinfo->nr_queues = nr_queues;
 115        pqinfo->qlen = qlen;
 116
 117        size = (qlen * sizeof(struct pending_entry));
 118
 119        for_each_pending_queue(pqinfo, queue, i) {
 120                queue->head = kzalloc((size), GFP_KERNEL);
 121                if (!queue->head) {
 122                        ret = -ENOMEM;
 123                        goto pending_qfail;
 124                }
 125
 126                queue->front = 0;
 127                queue->rear = 0;
 128                atomic64_set((&queue->pending_count), (0));
 129
 130                /* init queue spin lock */
 131                spin_lock_init(&queue->lock);
 132        }
 133
 134        return 0;
 135
 136pending_qfail:
 137        free_pending_queues(pqinfo);
 138
 139        return ret;
 140}
 141
 142static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
 143{
 144        struct pci_dev *pdev = cptvf->pdev;
 145        int ret;
 146
 147        if (!nr_queues)
 148                return 0;
 149
 150        ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
 151        if (ret) {
 152                dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
 153                        nr_queues);
 154                return ret;
 155        }
 156
 157        return 0;
 158}
 159
 160static void cleanup_pending_queues(struct cpt_vf *cptvf)
 161{
 162        struct pci_dev *pdev = cptvf->pdev;
 163
 164        if (!cptvf->nr_queues)
 165                return;
 166
 167        dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
 168                 cptvf->nr_queues);
 169        free_pending_queues(&cptvf->pqinfo);
 170}
 171
 172static void free_command_queues(struct cpt_vf *cptvf,
 173                                struct command_qinfo *cqinfo)
 174{
 175        int i;
 176        struct command_queue *queue = NULL;
 177        struct command_chunk *chunk = NULL;
 178        struct pci_dev *pdev = cptvf->pdev;
 179        struct hlist_node *node;
 180
 181        /* clean up for each queue */
 182        for (i = 0; i < cptvf->nr_queues; i++) {
 183                queue = &cqinfo->queue[i];
 184                if (hlist_empty(&cqinfo->queue[i].chead))
 185                        continue;
 186
 187                hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
 188                                          nextchunk) {
 189                        dma_free_coherent(&pdev->dev, chunk->size,
 190                                          chunk->head,
 191                                          chunk->dma_addr);
 192                        chunk->head = NULL;
 193                        chunk->dma_addr = 0;
 194                        hlist_del(&chunk->nextchunk);
 195                        kzfree(chunk);
 196                }
 197
 198                queue->nchunks = 0;
 199                queue->idx = 0;
 200        }
 201
 202        /* common cleanup */
 203        cqinfo->cmd_size = 0;
 204}
 205
 206static int alloc_command_queues(struct cpt_vf *cptvf,
 207                                struct command_qinfo *cqinfo, size_t cmd_size,
 208                                u32 qlen)
 209{
 210        int i;
 211        size_t q_size;
 212        struct command_queue *queue = NULL;
 213        struct pci_dev *pdev = cptvf->pdev;
 214
 215        /* common init */
 216        cqinfo->cmd_size = cmd_size;
 217        /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
 218        cptvf->qsize = min(qlen, cqinfo->qchunksize) *
 219                        CPT_NEXT_CHUNK_PTR_SIZE + 1;
 220        /* Qsize in bytes to create space for alignment */
 221        q_size = qlen * cqinfo->cmd_size;
 222
 223        /* per queue initialization */
 224        for (i = 0; i < cptvf->nr_queues; i++) {
 225                size_t c_size = 0;
 226                size_t rem_q_size = q_size;
 227                struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
 228                u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
 229
 230                queue = &cqinfo->queue[i];
 231                INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
 232                do {
 233                        curr = kzalloc(sizeof(*curr), GFP_KERNEL);
 234                        if (!curr)
 235                                goto cmd_qfail;
 236
 237                        c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
 238                                        rem_q_size;
 239                        curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
 240                                          c_size + CPT_NEXT_CHUNK_PTR_SIZE,
 241                                          &curr->dma_addr, GFP_KERNEL);
 242                        if (!curr->head) {
 243                                dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
 244                                        i, queue->nchunks);
 245                                kfree(curr);
 246                                goto cmd_qfail;
 247                        }
 248
 249                        curr->size = c_size;
 250                        if (queue->nchunks == 0) {
 251                                hlist_add_head(&curr->nextchunk,
 252                                               &cqinfo->queue[i].chead);
 253                                first = curr;
 254                        } else {
 255                                hlist_add_behind(&curr->nextchunk,
 256                                                 &last->nextchunk);
 257                        }
 258
 259                        queue->nchunks++;
 260                        rem_q_size -= c_size;
 261                        if (last)
 262                                *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
 263
 264                        last = curr;
 265                } while (rem_q_size);
 266
 267                /* Make the queue circular */
 268                /* Tie back last chunk entry to head */
 269                curr = first;
 270                *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
 271                queue->qhead = curr;
 272                spin_lock_init(&queue->lock);
 273        }
 274        return 0;
 275
 276cmd_qfail:
 277        free_command_queues(cptvf, cqinfo);
 278        return -ENOMEM;
 279}
 280
 281static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
 282{
 283        struct pci_dev *pdev = cptvf->pdev;
 284        int ret;
 285
 286        /* setup AE command queues */
 287        ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
 288                                   qlen);
 289        if (ret) {
 290                dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
 291                        cptvf->nr_queues);
 292                return ret;
 293        }
 294
 295        return ret;
 296}
 297
 298static void cleanup_command_queues(struct cpt_vf *cptvf)
 299{
 300        struct pci_dev *pdev = cptvf->pdev;
 301
 302        if (!cptvf->nr_queues)
 303                return;
 304
 305        dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
 306                 cptvf->nr_queues);
 307        free_command_queues(cptvf, &cptvf->cqinfo);
 308}
 309
 310static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
 311{
 312        cleanup_worker_threads(cptvf);
 313        cleanup_pending_queues(cptvf);
 314        cleanup_command_queues(cptvf);
 315}
 316
 317static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
 318{
 319        struct pci_dev *pdev = cptvf->pdev;
 320        int ret = 0;
 321        u32 max_dev_queues = 0;
 322
 323        max_dev_queues = CPT_NUM_QS_PER_VF;
 324        /* possible cpus */
 325        nr_queues = min_t(u32, nr_queues, max_dev_queues);
 326        cptvf->nr_queues = nr_queues;
 327
 328        ret = init_command_queues(cptvf, qlen);
 329        if (ret) {
 330                dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
 331                        nr_queues);
 332                return ret;
 333        }
 334
 335        ret = init_pending_queues(cptvf, qlen, nr_queues);
 336        if (ret) {
 337                dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
 338                        nr_queues);
 339                goto setup_pqfail;
 340        }
 341
 342        /* Create worker threads for BH processing */
 343        ret = init_worker_threads(cptvf);
 344        if (ret) {
 345                dev_err(&pdev->dev, "Failed to setup worker threads\n");
 346                goto init_work_fail;
 347        }
 348
 349        return 0;
 350
 351init_work_fail:
 352        cleanup_worker_threads(cptvf);
 353        cleanup_pending_queues(cptvf);
 354
 355setup_pqfail:
 356        cleanup_command_queues(cptvf);
 357
 358        return ret;
 359}
 360
 361static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
 362{
 363        irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
 364        free_cpumask_var(cptvf->affinity_mask[vec]);
 365}
 366
 367static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
 368{
 369        union cptx_vqx_ctl vqx_ctl;
 370
 371        vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
 372        vqx_ctl.s.ena = val;
 373        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
 374}
 375
 376void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
 377{
 378        union cptx_vqx_doorbell vqx_dbell;
 379
 380        vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
 381                                     CPTX_VQX_DOORBELL(0, 0));
 382        vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
 383        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
 384                        vqx_dbell.u);
 385}
 386
 387static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
 388{
 389        union cptx_vqx_inprog vqx_inprg;
 390
 391        vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
 392        vqx_inprg.s.inflight = val;
 393        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
 394}
 395
 396static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
 397{
 398        union cptx_vqx_done_wait vqx_dwait;
 399
 400        vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
 401                                     CPTX_VQX_DONE_WAIT(0, 0));
 402        vqx_dwait.s.num_wait = val;
 403        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
 404                        vqx_dwait.u);
 405}
 406
 407static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
 408{
 409        union cptx_vqx_done_wait vqx_dwait;
 410
 411        vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
 412                                     CPTX_VQX_DONE_WAIT(0, 0));
 413        vqx_dwait.s.time_wait = time;
 414        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
 415                        vqx_dwait.u);
 416}
 417
 418static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
 419{
 420        union cptx_vqx_misc_ena_w1s vqx_misc_ena;
 421
 422        vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
 423                                        CPTX_VQX_MISC_ENA_W1S(0, 0));
 424        /* Set mbox(0) interupts for the requested vf */
 425        vqx_misc_ena.s.swerr = 1;
 426        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
 427                        vqx_misc_ena.u);
 428}
 429
 430static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
 431{
 432        union cptx_vqx_misc_ena_w1s vqx_misc_ena;
 433
 434        vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
 435                                        CPTX_VQX_MISC_ENA_W1S(0, 0));
 436        /* Set mbox(0) interupts for the requested vf */
 437        vqx_misc_ena.s.mbox = 1;
 438        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
 439                        vqx_misc_ena.u);
 440}
 441
 442static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
 443{
 444        union cptx_vqx_done_ena_w1s vqx_done_ena;
 445
 446        vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
 447                                        CPTX_VQX_DONE_ENA_W1S(0, 0));
 448        /* Set DONE interrupt for the requested vf */
 449        vqx_done_ena.s.done = 1;
 450        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
 451                        vqx_done_ena.u);
 452}
 453
 454static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
 455{
 456        union cptx_vqx_misc_int vqx_misc_int;
 457
 458        vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 459                                        CPTX_VQX_MISC_INT(0, 0));
 460        /* W1C for the VF */
 461        vqx_misc_int.s.dovf = 1;
 462        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 463                        vqx_misc_int.u);
 464}
 465
 466static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
 467{
 468        union cptx_vqx_misc_int vqx_misc_int;
 469
 470        vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 471                                        CPTX_VQX_MISC_INT(0, 0));
 472        /* W1C for the VF */
 473        vqx_misc_int.s.irde = 1;
 474        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 475                        vqx_misc_int.u);
 476}
 477
 478static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
 479{
 480        union cptx_vqx_misc_int vqx_misc_int;
 481
 482        vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 483                                        CPTX_VQX_MISC_INT(0, 0));
 484        /* W1C for the VF */
 485        vqx_misc_int.s.nwrp = 1;
 486        cpt_write_csr64(cptvf->reg_base,
 487                        CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
 488}
 489
 490static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
 491{
 492        union cptx_vqx_misc_int vqx_misc_int;
 493
 494        vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 495                                        CPTX_VQX_MISC_INT(0, 0));
 496        /* W1C for the VF */
 497        vqx_misc_int.s.mbox = 1;
 498        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 499                        vqx_misc_int.u);
 500}
 501
 502static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
 503{
 504        union cptx_vqx_misc_int vqx_misc_int;
 505
 506        vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 507                                        CPTX_VQX_MISC_INT(0, 0));
 508        /* W1C for the VF */
 509        vqx_misc_int.s.swerr = 1;
 510        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 511                        vqx_misc_int.u);
 512}
 513
 514static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
 515{
 516        return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
 517}
 518
 519static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
 520{
 521        struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
 522        struct pci_dev *pdev = cptvf->pdev;
 523        u64 intr;
 524
 525        intr = cptvf_read_vf_misc_intr_status(cptvf);
 526        /*Check for MISC interrupt types*/
 527        if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
 528                dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
 529                        intr, cptvf->vfid);
 530                cptvf_handle_mbox_intr(cptvf);
 531                cptvf_clear_mbox_intr(cptvf);
 532        } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
 533                cptvf_clear_dovf_intr(cptvf);
 534                /*Clear doorbell count*/
 535                cptvf_write_vq_doorbell(cptvf, 0);
 536                dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
 537                        intr, cptvf->vfid);
 538        } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
 539                cptvf_clear_irde_intr(cptvf);
 540                dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
 541                        intr, cptvf->vfid);
 542        } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
 543                cptvf_clear_nwrp_intr(cptvf);
 544                dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
 545                        intr, cptvf->vfid);
 546        } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
 547                cptvf_clear_swerr_intr(cptvf);
 548                dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
 549                        intr, cptvf->vfid);
 550        } else {
 551                dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
 552                        cptvf->vfid);
 553        }
 554
 555        return IRQ_HANDLED;
 556}
 557
 558static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
 559                                                 int qno)
 560{
 561        struct cptvf_wqe_info *nwqe_info;
 562
 563        if (unlikely(qno >= cptvf->nr_queues))
 564                return NULL;
 565        nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
 566
 567        return &nwqe_info->vq_wqe[qno];
 568}
 569
 570static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
 571{
 572        union cptx_vqx_done vqx_done;
 573
 574        vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
 575        return vqx_done.s.done;
 576}
 577
 578static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
 579                                           u32 ackcnt)
 580{
 581        union cptx_vqx_done_ack vqx_dack_cnt;
 582
 583        vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
 584                                        CPTX_VQX_DONE_ACK(0, 0));
 585        vqx_dack_cnt.s.done_ack = ackcnt;
 586        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
 587                        vqx_dack_cnt.u);
 588}
 589
 590static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
 591{
 592        struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
 593        struct pci_dev *pdev = cptvf->pdev;
 594        /* Read the number of completions */
 595        u32 intr = cptvf_read_vq_done_count(cptvf);
 596
 597        if (intr) {
 598                struct cptvf_wqe *wqe;
 599
 600                /* Acknowledge the number of
 601                 * scheduled completions for processing
 602                 */
 603                cptvf_write_vq_done_ack(cptvf, intr);
 604                wqe = get_cptvf_vq_wqe(cptvf, 0);
 605                if (unlikely(!wqe)) {
 606                        dev_err(&pdev->dev, "No work to schedule for VF (%d)",
 607                                cptvf->vfid);
 608                        return IRQ_NONE;
 609                }
 610                tasklet_hi_schedule(&wqe->twork);
 611        }
 612
 613        return IRQ_HANDLED;
 614}
 615
 616static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
 617{
 618        struct pci_dev *pdev = cptvf->pdev;
 619        int cpu;
 620
 621        if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
 622                                GFP_KERNEL)) {
 623                dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
 624                        cptvf->vfid);
 625                return;
 626        }
 627
 628        cpu = cptvf->vfid % num_online_cpus();
 629        cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
 630                        cptvf->affinity_mask[vec]);
 631        irq_set_affinity_hint(pci_irq_vector(pdev, vec),
 632                        cptvf->affinity_mask[vec]);
 633}
 634
 635static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
 636{
 637        union cptx_vqx_saddr vqx_saddr;
 638
 639        vqx_saddr.u = val;
 640        cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
 641}
 642
 643void cptvf_device_init(struct cpt_vf *cptvf)
 644{
 645        u64 base_addr = 0;
 646
 647        /* Disable the VQ */
 648        cptvf_write_vq_ctl(cptvf, 0);
 649        /* Reset the doorbell */
 650        cptvf_write_vq_doorbell(cptvf, 0);
 651        /* Clear inflight */
 652        cptvf_write_vq_inprog(cptvf, 0);
 653        /* Write VQ SADDR */
 654        /* TODO: for now only one queue, so hard coded */
 655        base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
 656        cptvf_write_vq_saddr(cptvf, base_addr);
 657        /* Configure timerhold / coalescence */
 658        cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
 659        cptvf_write_vq_done_numwait(cptvf, 1);
 660        /* Enable the VQ */
 661        cptvf_write_vq_ctl(cptvf, 1);
 662        /* Flag the VF ready */
 663        cptvf->flags |= CPT_FLAG_DEVICE_READY;
 664}
 665
 666static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 667{
 668        struct device *dev = &pdev->dev;
 669        struct cpt_vf *cptvf;
 670        int    err;
 671
 672        cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
 673        if (!cptvf)
 674                return -ENOMEM;
 675
 676        pci_set_drvdata(pdev, cptvf);
 677        cptvf->pdev = pdev;
 678        err = pci_enable_device(pdev);
 679        if (err) {
 680                dev_err(dev, "Failed to enable PCI device\n");
 681                pci_set_drvdata(pdev, NULL);
 682                return err;
 683        }
 684
 685        err = pci_request_regions(pdev, DRV_NAME);
 686        if (err) {
 687                dev_err(dev, "PCI request regions failed 0x%x\n", err);
 688                goto cptvf_err_disable_device;
 689        }
 690        /* Mark as VF driver */
 691        cptvf->flags |= CPT_FLAG_VF_DRIVER;
 692        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
 693        if (err) {
 694                dev_err(dev, "Unable to get usable DMA configuration\n");
 695                goto cptvf_err_release_regions;
 696        }
 697
 698        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
 699        if (err) {
 700                dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
 701                goto cptvf_err_release_regions;
 702        }
 703
 704        /* MAP PF's configuration registers */
 705        cptvf->reg_base = pcim_iomap(pdev, 0, 0);
 706        if (!cptvf->reg_base) {
 707                dev_err(dev, "Cannot map config register space, aborting\n");
 708                err = -ENOMEM;
 709                goto cptvf_err_release_regions;
 710        }
 711
 712        cptvf->node = dev_to_node(&pdev->dev);
 713        err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
 714                        CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
 715        if (err < 0) {
 716                dev_err(dev, "Request for #%d msix vectors failed\n",
 717                        CPT_VF_MSIX_VECTORS);
 718                goto cptvf_err_release_regions;
 719        }
 720
 721        err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
 722                          cptvf_misc_intr_handler, 0, "CPT VF misc intr",
 723                          cptvf);
 724        if (err) {
 725                dev_err(dev, "Request misc irq failed");
 726                goto cptvf_free_vectors;
 727        }
 728
 729        /* Enable mailbox interrupt */
 730        cptvf_enable_mbox_interrupts(cptvf);
 731        cptvf_enable_swerr_interrupts(cptvf);
 732
 733        /* Check ready with PF */
 734        /* Gets chip ID / device Id from PF if ready */
 735        err = cptvf_check_pf_ready(cptvf);
 736        if (err) {
 737                dev_err(dev, "PF not responding to READY msg");
 738                goto cptvf_free_misc_irq;
 739        }
 740
 741        /* CPT VF software resources initialization */
 742        cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
 743        err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
 744        if (err) {
 745                dev_err(dev, "cptvf_sw_init() failed");
 746                goto cptvf_free_misc_irq;
 747        }
 748        /* Convey VQ LEN to PF */
 749        err = cptvf_send_vq_size_msg(cptvf);
 750        if (err) {
 751                dev_err(dev, "PF not responding to QLEN msg");
 752                goto cptvf_free_misc_irq;
 753        }
 754
 755        /* CPT VF device initialization */
 756        cptvf_device_init(cptvf);
 757        /* Send msg to PF to assign currnet Q to required group */
 758        cptvf->vfgrp = 1;
 759        err = cptvf_send_vf_to_grp_msg(cptvf);
 760        if (err) {
 761                dev_err(dev, "PF not responding to VF_GRP msg");
 762                goto cptvf_free_misc_irq;
 763        }
 764
 765        cptvf->priority = 1;
 766        err = cptvf_send_vf_priority_msg(cptvf);
 767        if (err) {
 768                dev_err(dev, "PF not responding to VF_PRIO msg");
 769                goto cptvf_free_misc_irq;
 770        }
 771
 772        err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
 773                          cptvf_done_intr_handler, 0, "CPT VF done intr",
 774                          cptvf);
 775        if (err) {
 776                dev_err(dev, "Request done irq failed\n");
 777                goto cptvf_free_misc_irq;
 778        }
 779
 780        /* Enable mailbox interrupt */
 781        cptvf_enable_done_interrupts(cptvf);
 782
 783        /* Set irq affinity masks */
 784        cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 785        cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 786
 787        err = cptvf_send_vf_up(cptvf);
 788        if (err) {
 789                dev_err(dev, "PF not responding to UP msg");
 790                goto cptvf_free_irq_affinity;
 791        }
 792        err = cvm_crypto_init(cptvf);
 793        if (err) {
 794                dev_err(dev, "Algorithm register failed\n");
 795                goto cptvf_free_irq_affinity;
 796        }
 797        return 0;
 798
 799cptvf_free_irq_affinity:
 800        cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 801        cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 802cptvf_free_misc_irq:
 803        free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
 804cptvf_free_vectors:
 805        pci_free_irq_vectors(cptvf->pdev);
 806cptvf_err_release_regions:
 807        pci_release_regions(pdev);
 808cptvf_err_disable_device:
 809        pci_disable_device(pdev);
 810        pci_set_drvdata(pdev, NULL);
 811
 812        return err;
 813}
 814
 815static void cptvf_remove(struct pci_dev *pdev)
 816{
 817        struct cpt_vf *cptvf = pci_get_drvdata(pdev);
 818
 819        if (!cptvf) {
 820                dev_err(&pdev->dev, "Invalid CPT-VF device\n");
 821                return;
 822        }
 823
 824        /* Convey DOWN to PF */
 825        if (cptvf_send_vf_down(cptvf)) {
 826                dev_err(&pdev->dev, "PF not responding to DOWN msg");
 827        } else {
 828                cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 829                cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 830                free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
 831                free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
 832                pci_free_irq_vectors(cptvf->pdev);
 833                cptvf_sw_cleanup(cptvf);
 834                pci_set_drvdata(pdev, NULL);
 835                pci_release_regions(pdev);
 836                pci_disable_device(pdev);
 837                cvm_crypto_exit();
 838        }
 839}
 840
 841static void cptvf_shutdown(struct pci_dev *pdev)
 842{
 843        cptvf_remove(pdev);
 844}
 845
 846/* Supported devices */
 847static const struct pci_device_id cptvf_id_table[] = {
 848        {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
 849        { 0, }  /* end of table */
 850};
 851
 852static struct pci_driver cptvf_pci_driver = {
 853        .name = DRV_NAME,
 854        .id_table = cptvf_id_table,
 855        .probe = cptvf_probe,
 856        .remove = cptvf_remove,
 857        .shutdown = cptvf_shutdown,
 858};
 859
 860module_pci_driver(cptvf_pci_driver);
 861
 862MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
 863MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
 864MODULE_LICENSE("GPL v2");
 865MODULE_VERSION(DRV_VERSION);
 866MODULE_DEVICE_TABLE(pci, cptvf_id_table);
 867