linux/drivers/crypto/cavium/nitrox/nitrox_mbx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/workqueue.h>
   3
   4#include "nitrox_csr.h"
   5#include "nitrox_hal.h"
   6#include "nitrox_dev.h"
   7
   8#define RING_TO_VFNO(_x, _y)    ((_x) / (_y))
   9
  10/**
  11 * mbx_msg_type - Mailbox message types
  12 */
  13enum mbx_msg_type {
  14        MBX_MSG_TYPE_NOP,
  15        MBX_MSG_TYPE_REQ,
  16        MBX_MSG_TYPE_ACK,
  17        MBX_MSG_TYPE_NACK,
  18};
  19
  20/**
  21 * mbx_msg_opcode - Mailbox message opcodes
  22 */
  23enum mbx_msg_opcode {
  24        MSG_OP_VF_MODE = 1,
  25        MSG_OP_VF_UP,
  26        MSG_OP_VF_DOWN,
  27        MSG_OP_CHIPID_VFID,
  28};
  29
  30struct pf2vf_work {
  31        struct nitrox_vfdev *vfdev;
  32        struct nitrox_device *ndev;
  33        struct work_struct pf2vf_resp;
  34};
  35
  36static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
  37{
  38        u64 reg_addr;
  39
  40        reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
  41        return nitrox_read_csr(ndev, reg_addr);
  42}
  43
  44static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
  45                                    int ring)
  46{
  47        u64 reg_addr;
  48
  49        reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
  50        nitrox_write_csr(ndev, reg_addr, value);
  51}
  52
  53static void pf2vf_send_response(struct nitrox_device *ndev,
  54                                struct nitrox_vfdev *vfdev)
  55{
  56        union mbox_msg msg;
  57
  58        msg.value = vfdev->msg.value;
  59
  60        switch (vfdev->msg.opcode) {
  61        case MSG_OP_VF_MODE:
  62                msg.data = ndev->mode;
  63                break;
  64        case MSG_OP_VF_UP:
  65                vfdev->nr_queues = vfdev->msg.data;
  66                atomic_set(&vfdev->state, __NDEV_READY);
  67                break;
  68        case MSG_OP_CHIPID_VFID:
  69                msg.id.chipid = ndev->idx;
  70                msg.id.vfid = vfdev->vfno;
  71                break;
  72        case MSG_OP_VF_DOWN:
  73                vfdev->nr_queues = 0;
  74                atomic_set(&vfdev->state, __NDEV_NOT_READY);
  75                break;
  76        default:
  77                msg.type = MBX_MSG_TYPE_NOP;
  78                break;
  79        }
  80
  81        if (msg.type == MBX_MSG_TYPE_NOP)
  82                return;
  83
  84        /* send ACK to VF */
  85        msg.type = MBX_MSG_TYPE_ACK;
  86        pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
  87
  88        vfdev->msg.value = 0;
  89        atomic64_inc(&vfdev->mbx_resp);
  90}
  91
  92static void pf2vf_resp_handler(struct work_struct *work)
  93{
  94        struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
  95                                                     pf2vf_resp);
  96        struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
  97        struct nitrox_device *ndev = pf2vf_resp->ndev;
  98
  99        switch (vfdev->msg.type) {
 100        case MBX_MSG_TYPE_REQ:
 101                /* process the request from VF */
 102                pf2vf_send_response(ndev, vfdev);
 103                break;
 104        case MBX_MSG_TYPE_ACK:
 105        case MBX_MSG_TYPE_NACK:
 106                break;
 107        };
 108
 109        kfree(pf2vf_resp);
 110}
 111
 112void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
 113{
 114        struct nitrox_vfdev *vfdev;
 115        struct pf2vf_work *pfwork;
 116        u64 value, reg_addr;
 117        u32 i;
 118        int vfno;
 119
 120        /* loop for VF(0..63) */
 121        reg_addr = NPS_PKT_MBOX_INT_LO;
 122        value = nitrox_read_csr(ndev, reg_addr);
 123        for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
 124                /* get the vfno from ring */
 125                vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
 126                vfdev = ndev->iov.vfdev + vfno;
 127                vfdev->ring = i;
 128                /* fill the vf mailbox data */
 129                vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
 130                pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
 131                if (!pfwork)
 132                        continue;
 133
 134                pfwork->vfdev = vfdev;
 135                pfwork->ndev = ndev;
 136                INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
 137                queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
 138                /* clear the corresponding vf bit */
 139                nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
 140        }
 141
 142        /* loop for VF(64..127) */
 143        reg_addr = NPS_PKT_MBOX_INT_HI;
 144        value = nitrox_read_csr(ndev, reg_addr);
 145        for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
 146                /* get the vfno from ring */
 147                vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
 148                vfdev = ndev->iov.vfdev + vfno;
 149                vfdev->ring = (i + 64);
 150                /* fill the vf mailbox data */
 151                vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
 152
 153                pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
 154                if (!pfwork)
 155                        continue;
 156
 157                pfwork->vfdev = vfdev;
 158                pfwork->ndev = ndev;
 159                INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
 160                queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
 161                /* clear the corresponding vf bit */
 162                nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
 163        }
 164}
 165
 166int nitrox_mbox_init(struct nitrox_device *ndev)
 167{
 168        struct nitrox_vfdev *vfdev;
 169        int i;
 170
 171        ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
 172                                  sizeof(struct nitrox_vfdev), GFP_KERNEL);
 173        if (!ndev->iov.vfdev)
 174                return -ENOMEM;
 175
 176        for (i = 0; i < ndev->iov.num_vfs; i++) {
 177                vfdev = ndev->iov.vfdev + i;
 178                vfdev->vfno = i;
 179        }
 180
 181        /* allocate pf2vf response workqueue */
 182        ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
 183        if (!ndev->iov.pf2vf_wq) {
 184                kfree(ndev->iov.vfdev);
 185                return -ENOMEM;
 186        }
 187        /* enable pf2vf mailbox interrupts */
 188        enable_pf2vf_mbox_interrupts(ndev);
 189
 190        return 0;
 191}
 192
 193void nitrox_mbox_cleanup(struct nitrox_device *ndev)
 194{
 195        /* disable pf2vf mailbox interrupts */
 196        disable_pf2vf_mbox_interrupts(ndev);
 197        /* destroy workqueue */
 198        if (ndev->iov.pf2vf_wq)
 199                destroy_workqueue(ndev->iov.pf2vf_wq);
 200
 201        kfree(ndev->iov.vfdev);
 202        ndev->iov.pf2vf_wq = NULL;
 203        ndev->iov.vfdev = NULL;
 204}
 205