dpdk/drivers/crypto/virtio/virtio_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
   3 */
   4#include <rte_cryptodev_pmd.h>
   5
   6#include "virtqueue.h"
   7#include "virtio_cryptodev.h"
   8#include "virtio_crypto_algs.h"
   9
  10static void
  11vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
  12{
  13        struct vring_desc *dp, *dp_tail;
  14        struct vq_desc_extra *dxp;
  15        uint16_t desc_idx_last = desc_idx;
  16
  17        dp = &vq->vq_ring.desc[desc_idx];
  18        dxp = &vq->vq_descx[desc_idx];
  19        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
  20        if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
  21                while (dp->flags & VRING_DESC_F_NEXT) {
  22                        desc_idx_last = dp->next;
  23                        dp = &vq->vq_ring.desc[dp->next];
  24                }
  25        }
  26        dxp->ndescs = 0;
  27
  28        /*
  29         * We must append the existing free chain, if any, to the end of
  30         * newly freed chain. If the virtqueue was completely used, then
  31         * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
  32         */
  33        if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
  34                vq->vq_desc_head_idx = desc_idx;
  35        } else {
  36                dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
  37                dp_tail->next = desc_idx;
  38        }
  39
  40        vq->vq_desc_tail_idx = desc_idx_last;
  41        dp->next = VQ_RING_DESC_CHAIN_END;
  42}
  43
  44static uint16_t
  45virtqueue_dequeue_burst_rx(struct virtqueue *vq,
  46                struct rte_crypto_op **rx_pkts, uint16_t num)
  47{
  48        struct vring_used_elem *uep;
  49        struct rte_crypto_op *cop;
  50        uint16_t used_idx, desc_idx;
  51        uint16_t i;
  52        struct virtio_crypto_inhdr *inhdr;
  53        struct virtio_crypto_op_cookie *op_cookie;
  54
  55        /* Caller does the check */
  56        for (i = 0; i < num ; i++) {
  57                used_idx = (uint16_t)(vq->vq_used_cons_idx
  58                                & (vq->vq_nentries - 1));
  59                uep = &vq->vq_ring.used->ring[used_idx];
  60                desc_idx = (uint16_t)uep->id;
  61                cop = (struct rte_crypto_op *)
  62                                vq->vq_descx[desc_idx].crypto_op;
  63                if (unlikely(cop == NULL)) {
  64                        VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
  65                                        "mbuf cookie at %u",
  66                                        vq->vq_used_cons_idx);
  67                        break;
  68                }
  69
  70                op_cookie = (struct virtio_crypto_op_cookie *)
  71                                                vq->vq_descx[desc_idx].cookie;
  72                inhdr = &(op_cookie->inhdr);
  73                switch (inhdr->status) {
  74                case VIRTIO_CRYPTO_OK:
  75                        cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
  76                        break;
  77                case VIRTIO_CRYPTO_ERR:
  78                        cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
  79                        vq->packets_received_failed++;
  80                        break;
  81                case VIRTIO_CRYPTO_BADMSG:
  82                        cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
  83                        vq->packets_received_failed++;
  84                        break;
  85                case VIRTIO_CRYPTO_NOTSUPP:
  86                        cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
  87                        vq->packets_received_failed++;
  88                        break;
  89                case VIRTIO_CRYPTO_INVSESS:
  90                        cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
  91                        vq->packets_received_failed++;
  92                        break;
  93                default:
  94                        break;
  95                }
  96
  97                vq->packets_received_total++;
  98
  99                rx_pkts[i] = cop;
 100                rte_mempool_put(vq->mpool, op_cookie);
 101
 102                vq->vq_used_cons_idx++;
 103                vq_ring_free_chain(vq, desc_idx);
 104                vq->vq_descx[desc_idx].crypto_op = NULL;
 105        }
 106
 107        return i;
 108}
 109
 110static int
 111virtqueue_crypto_sym_pkt_header_arrange(
 112                struct rte_crypto_op *cop,
 113                struct virtio_crypto_op_data_req *data,
 114                struct virtio_crypto_session *session)
 115{
 116        struct rte_crypto_sym_op *sym_op = cop->sym;
 117        struct virtio_crypto_op_data_req *req_data = data;
 118        struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
 119        struct virtio_crypto_sym_create_session_req *sym_sess_req =
 120                &ctrl->u.sym_create_session;
 121        struct virtio_crypto_alg_chain_session_para *chain_para =
 122                &sym_sess_req->u.chain.para;
 123        struct virtio_crypto_cipher_session_para *cipher_para;
 124
 125        req_data->header.session_id = session->session_id;
 126
 127        switch (sym_sess_req->op_type) {
 128        case VIRTIO_CRYPTO_SYM_OP_CIPHER:
 129                req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
 130
 131                cipher_para = &sym_sess_req->u.cipher.para;
 132                if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
 133                        req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
 134                else
 135                        req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
 136
 137                req_data->u.sym_req.u.cipher.para.iv_len
 138                        = session->iv.length;
 139
 140                req_data->u.sym_req.u.cipher.para.src_data_len =
 141                        (sym_op->cipher.data.length +
 142                                sym_op->cipher.data.offset);
 143                req_data->u.sym_req.u.cipher.para.dst_data_len =
 144                        req_data->u.sym_req.u.cipher.para.src_data_len;
 145                break;
 146        case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
 147                req_data->u.sym_req.op_type =
 148                        VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
 149
 150                cipher_para = &chain_para->cipher_param;
 151                if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
 152                        req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
 153                else
 154                        req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
 155
 156                req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
 157                req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
 158
 159                req_data->u.sym_req.u.chain.para.src_data_len =
 160                        (sym_op->cipher.data.length +
 161                                sym_op->cipher.data.offset);
 162                req_data->u.sym_req.u.chain.para.dst_data_len =
 163                        req_data->u.sym_req.u.chain.para.src_data_len;
 164                req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
 165                        sym_op->cipher.data.offset;
 166                req_data->u.sym_req.u.chain.para.len_to_cipher =
 167                        sym_op->cipher.data.length;
 168                req_data->u.sym_req.u.chain.para.hash_start_src_offset =
 169                        sym_op->auth.data.offset;
 170                req_data->u.sym_req.u.chain.para.len_to_hash =
 171                        sym_op->auth.data.length;
 172                req_data->u.sym_req.u.chain.para.aad_len =
 173                        chain_para->aad_len;
 174
 175                if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
 176                        req_data->u.sym_req.u.chain.para.hash_result_len =
 177                                chain_para->u.hash_param.hash_result_len;
 178                if (chain_para->hash_mode ==
 179                        VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
 180                        req_data->u.sym_req.u.chain.para.hash_result_len =
 181                                chain_para->u.mac_param.hash_result_len;
 182                break;
 183        default:
 184                return -1;
 185        }
 186
 187        return 0;
 188}
 189
 190static int
 191virtqueue_crypto_sym_enqueue_xmit(
 192                struct virtqueue *txvq,
 193                struct rte_crypto_op *cop)
 194{
 195        uint16_t idx = 0;
 196        uint16_t num_entry;
 197        uint16_t needed = 1;
 198        uint16_t head_idx;
 199        struct vq_desc_extra *dxp;
 200        struct vring_desc *start_dp;
 201        struct vring_desc *desc;
 202        uint64_t indirect_op_data_req_phys_addr;
 203        uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
 204        uint32_t indirect_vring_addr_offset = req_data_len +
 205                sizeof(struct virtio_crypto_inhdr);
 206        uint32_t indirect_iv_addr_offset =
 207                        offsetof(struct virtio_crypto_op_cookie, iv);
 208        struct rte_crypto_sym_op *sym_op = cop->sym;
 209        struct virtio_crypto_session *session =
 210                (struct virtio_crypto_session *)get_sym_session_private_data(
 211                cop->sym->session, cryptodev_virtio_driver_id);
 212        struct virtio_crypto_op_data_req *op_data_req;
 213        uint32_t hash_result_len = 0;
 214        struct virtio_crypto_op_cookie *crypto_op_cookie;
 215        struct virtio_crypto_alg_chain_session_para *para;
 216
 217        if (unlikely(sym_op->m_src->nb_segs != 1))
 218                return -EMSGSIZE;
 219        if (unlikely(txvq->vq_free_cnt == 0))
 220                return -ENOSPC;
 221        if (unlikely(txvq->vq_free_cnt < needed))
 222                return -EMSGSIZE;
 223        head_idx = txvq->vq_desc_head_idx;
 224        if (unlikely(head_idx >= txvq->vq_nentries))
 225                return -EFAULT;
 226        if (unlikely(session == NULL))
 227                return -EFAULT;
 228
 229        dxp = &txvq->vq_descx[head_idx];
 230
 231        if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
 232                VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
 233                return -EFAULT;
 234        }
 235        crypto_op_cookie = dxp->cookie;
 236        indirect_op_data_req_phys_addr =
 237                rte_mempool_virt2iova(crypto_op_cookie);
 238        op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
 239
 240        if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
 241                return -EFAULT;
 242
 243        /* status is initialized to VIRTIO_CRYPTO_ERR */
 244        ((struct virtio_crypto_inhdr *)
 245                ((uint8_t *)op_data_req + req_data_len))->status =
 246                VIRTIO_CRYPTO_ERR;
 247
 248        /* point to indirect vring entry */
 249        desc = (struct vring_desc *)
 250                ((uint8_t *)op_data_req + indirect_vring_addr_offset);
 251        for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
 252                desc[idx].next = idx + 1;
 253        desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
 254
 255        idx = 0;
 256
 257        /* indirect vring: first part, virtio_crypto_op_data_req */
 258        desc[idx].addr = indirect_op_data_req_phys_addr;
 259        desc[idx].len = req_data_len;
 260        desc[idx++].flags = VRING_DESC_F_NEXT;
 261
 262        /* indirect vring: iv of cipher */
 263        if (session->iv.length) {
 264                if (cop->phys_addr)
 265                        desc[idx].addr = cop->phys_addr + session->iv.offset;
 266                else {
 267                        rte_memcpy(crypto_op_cookie->iv,
 268                                        rte_crypto_op_ctod_offset(cop,
 269                                        uint8_t *, session->iv.offset),
 270                                        session->iv.length);
 271                        desc[idx].addr = indirect_op_data_req_phys_addr +
 272                                indirect_iv_addr_offset;
 273                }
 274
 275                desc[idx].len = session->iv.length;
 276                desc[idx++].flags = VRING_DESC_F_NEXT;
 277        }
 278
 279        /* indirect vring: additional auth data */
 280        if (session->aad.length) {
 281                desc[idx].addr = session->aad.phys_addr;
 282                desc[idx].len = session->aad.length;
 283                desc[idx++].flags = VRING_DESC_F_NEXT;
 284        }
 285
 286        /* indirect vring: src data */
 287        desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
 288        desc[idx].len = (sym_op->cipher.data.offset
 289                + sym_op->cipher.data.length);
 290        desc[idx++].flags = VRING_DESC_F_NEXT;
 291
 292        /* indirect vring: dst data */
 293        if (sym_op->m_dst) {
 294                desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
 295                desc[idx].len = (sym_op->cipher.data.offset
 296                        + sym_op->cipher.data.length);
 297        } else {
 298                desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
 299                desc[idx].len = (sym_op->cipher.data.offset
 300                        + sym_op->cipher.data.length);
 301        }
 302        desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
 303
 304        /* indirect vring: digest result */
 305        para = &(session->ctrl.u.sym_create_session.u.chain.para);
 306        if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
 307                hash_result_len = para->u.hash_param.hash_result_len;
 308        if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
 309                hash_result_len = para->u.mac_param.hash_result_len;
 310        if (hash_result_len > 0) {
 311                desc[idx].addr = sym_op->auth.digest.phys_addr;
 312                desc[idx].len = hash_result_len;
 313                desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
 314        }
 315
 316        /* indirect vring: last part, status returned */
 317        desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
 318        desc[idx].len = sizeof(struct virtio_crypto_inhdr);
 319        desc[idx++].flags = VRING_DESC_F_WRITE;
 320
 321        num_entry = idx;
 322
 323        /* save the infos to use when receiving packets */
 324        dxp->crypto_op = (void *)cop;
 325        dxp->ndescs = needed;
 326
 327        /* use a single buffer */
 328        start_dp = txvq->vq_ring.desc;
 329        start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
 330                indirect_vring_addr_offset;
 331        start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
 332        start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
 333
 334        idx = start_dp[head_idx].next;
 335        txvq->vq_desc_head_idx = idx;
 336        if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
 337                txvq->vq_desc_tail_idx = idx;
 338        txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
 339        vq_update_avail_ring(txvq, head_idx);
 340
 341        return 0;
 342}
 343
 344static int
 345virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
 346                struct rte_crypto_op *cop)
 347{
 348        int ret;
 349
 350        switch (cop->type) {
 351        case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
 352                ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
 353                break;
 354        default:
 355                VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
 356                                cop->type);
 357                ret = -EFAULT;
 358                break;
 359        }
 360
 361        return ret;
 362}
 363
 364static int
 365virtio_crypto_vring_start(struct virtqueue *vq)
 366{
 367        struct virtio_crypto_hw *hw = vq->hw;
 368        int i, size = vq->vq_nentries;
 369        struct vring *vr = &vq->vq_ring;
 370        uint8_t *ring_mem = vq->vq_ring_virt_mem;
 371
 372        PMD_INIT_FUNC_TRACE();
 373
 374        vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
 375        vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
 376        vq->vq_free_cnt = vq->vq_nentries;
 377
 378        /* Chain all the descriptors in the ring with an END */
 379        for (i = 0; i < size - 1; i++)
 380                vr->desc[i].next = (uint16_t)(i + 1);
 381        vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
 382
 383        /*
 384         * Disable device(host) interrupting guest
 385         */
 386        virtqueue_disable_intr(vq);
 387
 388        /*
 389         * Set guest physical address of the virtqueue
 390         * in VIRTIO_PCI_QUEUE_PFN config register of device
 391         * to share with the backend
 392         */
 393        if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
 394                VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
 395                return -EINVAL;
 396        }
 397
 398        return 0;
 399}
 400
 401void
 402virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
 403{
 404        struct virtio_crypto_hw *hw = dev->data->dev_private;
 405
 406        if (hw->cvq) {
 407                virtio_crypto_vring_start(hw->cvq);
 408                VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
 409        }
 410}
 411
 412void
 413virtio_crypto_dataq_start(struct rte_cryptodev *dev)
 414{
 415        /*
 416         * Start data vrings
 417         * -    Setup vring structure for data queues
 418         */
 419        uint16_t i;
 420        struct virtio_crypto_hw *hw = dev->data->dev_private;
 421
 422        PMD_INIT_FUNC_TRACE();
 423
 424        /* Start data vring. */
 425        for (i = 0; i < hw->max_dataqueues; i++) {
 426                virtio_crypto_vring_start(dev->data->queue_pairs[i]);
 427                VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
 428        }
 429}
 430
 431/* vring size of data queue is 1024 */
 432#define VIRTIO_MBUF_BURST_SZ 1024
 433
 434uint16_t
 435virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
 436                uint16_t nb_pkts)
 437{
 438        struct virtqueue *txvq = tx_queue;
 439        uint16_t nb_used, num, nb_rx;
 440
 441        nb_used = VIRTQUEUE_NUSED(txvq);
 442
 443        virtio_rmb();
 444
 445        num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
 446        num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
 447                ? num : VIRTIO_MBUF_BURST_SZ);
 448
 449        if (num == 0)
 450                return 0;
 451
 452        nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
 453        VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
 454
 455        return nb_rx;
 456}
 457
 458uint16_t
 459virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
 460                uint16_t nb_pkts)
 461{
 462        struct virtqueue *txvq;
 463        uint16_t nb_tx;
 464        int error;
 465
 466        if (unlikely(nb_pkts < 1))
 467                return nb_pkts;
 468        if (unlikely(tx_queue == NULL)) {
 469                VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
 470                return 0;
 471        }
 472        txvq = tx_queue;
 473
 474        VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
 475
 476        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 477                struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
 478                /* nb_segs is always 1 at virtio crypto situation */
 479                int need = txm->nb_segs - txvq->vq_free_cnt;
 480
 481                /*
 482                 * Positive value indicates it hasn't enough space in vring
 483                 * descriptors
 484                 */
 485                if (unlikely(need > 0)) {
 486                        /*
 487                         * try it again because the receive process may be
 488                         * free some space
 489                         */
 490                        need = txm->nb_segs - txvq->vq_free_cnt;
 491                        if (unlikely(need > 0)) {
 492                                VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
 493                                        "descriptors to transmit");
 494                                break;
 495                        }
 496                }
 497
 498                txvq->packets_sent_total++;
 499
 500                /* Enqueue Packet buffers */
 501                error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
 502                if (unlikely(error)) {
 503                        if (error == ENOSPC)
 504                                VIRTIO_CRYPTO_TX_LOG_ERR(
 505                                        "virtqueue_enqueue Free count = 0");
 506                        else if (error == EMSGSIZE)
 507                                VIRTIO_CRYPTO_TX_LOG_ERR(
 508                                        "virtqueue_enqueue Free count < 1");
 509                        else
 510                                VIRTIO_CRYPTO_TX_LOG_ERR(
 511                                        "virtqueue_enqueue error: %d", error);
 512                        txvq->packets_sent_failed++;
 513                        break;
 514                }
 515        }
 516
 517        if (likely(nb_tx)) {
 518                vq_update_avail_idx(txvq);
 519
 520                if (unlikely(virtqueue_kick_prepare(txvq))) {
 521                        virtqueue_notify(txvq);
 522                        VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
 523                }
 524        }
 525
 526        return nb_tx;
 527}
 528