linux/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTX CPT driver
   3 *
   4 * Copyright (C) 2019 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include "otx_cpt_common.h"
  12#include "otx_cptpf.h"
  13
  14static char *get_mbox_opcode_str(int msg_opcode)
  15{
  16        char *str = "Unknown";
  17
  18        switch (msg_opcode) {
  19        case OTX_CPT_MSG_VF_UP:
  20                str = "UP";
  21                break;
  22
  23        case OTX_CPT_MSG_VF_DOWN:
  24                str = "DOWN";
  25                break;
  26
  27        case OTX_CPT_MSG_READY:
  28                str = "READY";
  29                break;
  30
  31        case OTX_CPT_MSG_QLEN:
  32                str = "QLEN";
  33                break;
  34
  35        case OTX_CPT_MSG_QBIND_GRP:
  36                str = "QBIND_GRP";
  37                break;
  38
  39        case OTX_CPT_MSG_VQ_PRIORITY:
  40                str = "VQ_PRIORITY";
  41                break;
  42
  43        case OTX_CPT_MSG_PF_TYPE:
  44                str = "PF_TYPE";
  45                break;
  46
  47        case OTX_CPT_MSG_ACK:
  48                str = "ACK";
  49                break;
  50
  51        case OTX_CPT_MSG_NACK:
  52                str = "NACK";
  53                break;
  54        }
  55
  56        return str;
  57}
  58
  59static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
  60{
  61        char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
  62
  63        hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
  64                           raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
  65        if (vf_id >= 0)
  66                pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
  67                         get_mbox_opcode_str(mbox_msg->msg), vf_id,
  68                         raw_data_str);
  69        else
  70                pr_debug("MBOX opcode %s received from PF raw_data %s\n",
  71                         get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
  72}
  73
  74static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf,
  75                                   struct otx_cpt_mbox *mbx)
  76{
  77        /* Writing mbox(0) causes interrupt */
  78        writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
  79        writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
  80}
  81
  82/*
  83 * ACKs VF's mailbox message
  84 * @vf: VF to which ACK to be sent
  85 */
  86static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf,
  87                              struct otx_cpt_mbox *mbx)
  88{
  89        mbx->data = 0ull;
  90        mbx->msg = OTX_CPT_MSG_ACK;
  91        otx_cpt_send_msg_to_vf(cpt, vf, mbx);
  92}
  93
  94/* NACKs VF's mailbox message that PF is not able to complete the action */
  95static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf,
  96                                     struct otx_cpt_mbox *mbx)
  97{
  98        mbx->data = 0ull;
  99        mbx->msg = OTX_CPT_MSG_NACK;
 100        otx_cpt_send_msg_to_vf(cpt, vf, mbx);
 101}
 102
 103static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf)
 104{
 105        /* W1C for the VF */
 106        writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
 107}
 108
 109/*
 110 * Configure QLEN/Chunk sizes for VF
 111 */
 112static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf,
 113                                    u32 size)
 114{
 115        union otx_cptx_pf_qx_ctl pf_qx_ctl;
 116
 117        pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
 118        pf_qx_ctl.s.size = size;
 119        pf_qx_ctl.s.cont_err = true;
 120        writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
 121}
 122
 123/*
 124 * Configure VQ priority
 125 */
 126static void otx_cpt_cfg_vq_priority(struct otx_cpt_device *cpt, int vf, u32 pri)
 127{
 128        union otx_cptx_pf_qx_ctl pf_qx_ctl;
 129
 130        pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
 131        pf_qx_ctl.s.pri = pri;
 132        writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
 133}
 134
 135static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
 136{
 137        struct device *dev = &cpt->pdev->dev;
 138        struct otx_cpt_eng_grp_info *eng_grp;
 139        union otx_cptx_pf_qx_ctl pf_qx_ctl;
 140        struct otx_cpt_ucode *ucode;
 141
 142        if (q >= cpt->max_vfs) {
 143                dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
 144                        q, cpt->max_vfs);
 145                return -EINVAL;
 146        }
 147
 148        if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
 149                dev_err(dev, "Requested group %d is > than maximum avail %d\n",
 150                        grp, OTX_CPT_MAX_ENGINE_GROUPS);
 151                return -EINVAL;
 152        }
 153
 154        eng_grp = &cpt->eng_grps.grp[grp];
 155        if (!eng_grp->is_enabled) {
 156                dev_err(dev, "Requested engine group %d is disabled\n", grp);
 157                return -EINVAL;
 158        }
 159
 160        pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
 161        pf_qx_ctl.s.grp = grp;
 162        writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
 163
 164        if (eng_grp->mirror.is_ena)
 165                ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
 166        else
 167                ucode = &eng_grp->ucode[0];
 168
 169        if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_SE_TYPES))
 170                return OTX_CPT_SE_TYPES;
 171        else if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_AE_TYPES))
 172                return OTX_CPT_AE_TYPES;
 173        else
 174                return BAD_OTX_CPTVF_TYPE;
 175}
 176
 177/* Interrupt handler to handle mailbox messages from VFs */
 178static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
 179{
 180        int vftype = 0;
 181        struct otx_cpt_mbox mbx = {};
 182        struct device *dev = &cpt->pdev->dev;
 183        /*
 184         * MBOX[0] contains msg
 185         * MBOX[1] contains data
 186         */
 187        mbx.msg  = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
 188        mbx.data = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
 189
 190        dump_mbox_msg(&mbx, vf);
 191
 192        switch (mbx.msg) {
 193        case OTX_CPT_MSG_VF_UP:
 194                mbx.msg  = OTX_CPT_MSG_VF_UP;
 195                mbx.data = cpt->vfs_enabled;
 196                otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
 197                break;
 198        case OTX_CPT_MSG_READY:
 199                mbx.msg  = OTX_CPT_MSG_READY;
 200                mbx.data = vf;
 201                otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
 202                break;
 203        case OTX_CPT_MSG_VF_DOWN:
 204                /* First msg in VF teardown sequence */
 205                otx_cpt_mbox_send_ack(cpt, vf, &mbx);
 206                break;
 207        case OTX_CPT_MSG_QLEN:
 208                otx_cpt_cfg_qlen_for_vf(cpt, vf, mbx.data);
 209                otx_cpt_mbox_send_ack(cpt, vf, &mbx);
 210                break;
 211        case OTX_CPT_MSG_QBIND_GRP:
 212                vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
 213                if ((vftype != OTX_CPT_AE_TYPES) &&
 214                    (vftype != OTX_CPT_SE_TYPES)) {
 215                        dev_err(dev, "VF%d binding to eng group %llu failed\n",
 216                                vf, mbx.data);
 217                        otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
 218                } else {
 219                        mbx.msg = OTX_CPT_MSG_QBIND_GRP;
 220                        mbx.data = vftype;
 221                        otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
 222                }
 223                break;
 224        case OTX_CPT_MSG_PF_TYPE:
 225                mbx.msg = OTX_CPT_MSG_PF_TYPE;
 226                mbx.data = cpt->pf_type;
 227                otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
 228                break;
 229        case OTX_CPT_MSG_VQ_PRIORITY:
 230                otx_cpt_cfg_vq_priority(cpt, vf, mbx.data);
 231                otx_cpt_mbox_send_ack(cpt, vf, &mbx);
 232                break;
 233        default:
 234                dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
 235                        vf, mbx.msg);
 236                break;
 237        }
 238}
 239
 240void otx_cpt_mbox_intr_handler (struct otx_cpt_device *cpt, int mbx)
 241{
 242        u64 intr;
 243        u8  vf;
 244
 245        intr = readq(cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
 246        pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx, intr);
 247        for (vf = 0; vf < cpt->max_vfs; vf++) {
 248                if (intr & (1ULL << vf)) {
 249                        otx_cpt_handle_mbox_intr(cpt, vf);
 250                        otx_cpt_clear_mbox_intr(cpt, vf);
 251                }
 252        }
 253}
 254