linux/drivers/infiniband/hw/hfi1/vnic_sdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
   2/*
   3 * Copyright(c) 2017 - 2018 Intel Corporation.
   4 */
   5
   6/*
   7 * This file contains HFI1 support for VNIC SDMA functionality
   8 */
   9
  10#include "sdma.h"
  11#include "vnic.h"
  12
  13#define HFI1_VNIC_SDMA_Q_ACTIVE   BIT(0)
  14#define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
  15
  16#define HFI1_VNIC_TXREQ_NAME_LEN   32
  17#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
  18
  19/*
  20 * struct vnic_txreq - VNIC transmit descriptor
  21 * @txreq: sdma transmit request
  22 * @sdma: vnic sdma pointer
  23 * @skb: skb to send
  24 * @pad: pad buffer
  25 * @plen: pad length
  26 * @pbc_val: pbc value
  27 */
  28struct vnic_txreq {
  29        struct sdma_txreq       txreq;
  30        struct hfi1_vnic_sdma   *sdma;
  31
  32        struct sk_buff         *skb;
  33        unsigned char           pad[HFI1_VNIC_MAX_PAD];
  34        u16                     plen;
  35        __le64                  pbc_val;
  36};
  37
  38static void vnic_sdma_complete(struct sdma_txreq *txreq,
  39                               int status)
  40{
  41        struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  42        struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
  43
  44        sdma_txclean(vnic_sdma->dd, txreq);
  45        dev_kfree_skb_any(tx->skb);
  46        kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
  47}
  48
  49static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
  50                                           struct vnic_txreq *tx)
  51{
  52        int i, ret = 0;
  53
  54        ret = sdma_txadd_kvaddr(
  55                sde->dd,
  56                &tx->txreq,
  57                tx->skb->data,
  58                skb_headlen(tx->skb));
  59        if (unlikely(ret))
  60                goto bail_txadd;
  61
  62        for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
  63                skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
  64
  65                /* combine physically continuous fragments later? */
  66                ret = sdma_txadd_page(sde->dd,
  67                                      &tx->txreq,
  68                                      skb_frag_page(frag),
  69                                      skb_frag_off(frag),
  70                                      skb_frag_size(frag));
  71                if (unlikely(ret))
  72                        goto bail_txadd;
  73        }
  74
  75        if (tx->plen)
  76                ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
  77                                        tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
  78                                        tx->plen);
  79
  80bail_txadd:
  81        return ret;
  82}
  83
  84static int build_vnic_tx_desc(struct sdma_engine *sde,
  85                              struct vnic_txreq *tx,
  86                              u64 pbc)
  87{
  88        int ret = 0;
  89        u16 hdrbytes = 2 << 2;  /* PBC */
  90
  91        ret = sdma_txinit_ahg(
  92                &tx->txreq,
  93                0,
  94                hdrbytes + tx->skb->len + tx->plen,
  95                0,
  96                0,
  97                NULL,
  98                0,
  99                vnic_sdma_complete);
 100        if (unlikely(ret))
 101                goto bail_txadd;
 102
 103        /* add pbc */
 104        tx->pbc_val = cpu_to_le64(pbc);
 105        ret = sdma_txadd_kvaddr(
 106                sde->dd,
 107                &tx->txreq,
 108                &tx->pbc_val,
 109                hdrbytes);
 110        if (unlikely(ret))
 111                goto bail_txadd;
 112
 113        /* add the ulp payload */
 114        ret = build_vnic_ulp_payload(sde, tx);
 115bail_txadd:
 116        return ret;
 117}
 118
 119/* setup the last plen bypes of pad */
 120static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
 121{
 122        pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
 123}
 124
 125int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
 126                       struct hfi1_vnic_vport_info *vinfo,
 127                       struct sk_buff *skb, u64 pbc, u8 plen)
 128{
 129        struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
 130        struct sdma_engine *sde = vnic_sdma->sde;
 131        struct vnic_txreq *tx;
 132        int ret = -ECOMM;
 133
 134        if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
 135                goto tx_err;
 136
 137        if (unlikely(!sde || !sdma_running(sde)))
 138                goto tx_err;
 139
 140        tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
 141        if (unlikely(!tx)) {
 142                ret = -ENOMEM;
 143                goto tx_err;
 144        }
 145
 146        tx->sdma = vnic_sdma;
 147        tx->skb = skb;
 148        hfi1_vnic_update_pad(tx->pad, plen);
 149        tx->plen = plen;
 150        ret = build_vnic_tx_desc(sde, tx, pbc);
 151        if (unlikely(ret))
 152                goto free_desc;
 153
 154        ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
 155                              &tx->txreq, vnic_sdma->pkts_sent);
 156        /* When -ECOMM, sdma callback will be called with ABORT status */
 157        if (unlikely(ret && unlikely(ret != -ECOMM)))
 158                goto free_desc;
 159
 160        if (!ret) {
 161                vnic_sdma->pkts_sent = true;
 162                iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
 163        }
 164        return ret;
 165
 166free_desc:
 167        sdma_txclean(dd, &tx->txreq);
 168        kmem_cache_free(dd->vnic.txreq_cache, tx);
 169tx_err:
 170        if (ret != -EBUSY)
 171                dev_kfree_skb_any(skb);
 172        else
 173                vnic_sdma->pkts_sent = false;
 174        return ret;
 175}
 176
 177/*
 178 * hfi1_vnic_sdma_sleep - vnic sdma sleep function
 179 *
 180 * This function gets called from sdma_send_txreq() when there are not enough
 181 * sdma descriptors available to send the packet. It adds Tx queue's wait
 182 * structure to sdma engine's dmawait list to be woken up when descriptors
 183 * become available.
 184 */
 185static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
 186                                struct iowait_work *wait,
 187                                struct sdma_txreq *txreq,
 188                                uint seq,
 189                                bool pkts_sent)
 190{
 191        struct hfi1_vnic_sdma *vnic_sdma =
 192                container_of(wait->iow, struct hfi1_vnic_sdma, wait);
 193
 194        write_seqlock(&sde->waitlock);
 195        if (sdma_progress(sde, seq, txreq)) {
 196                write_sequnlock(&sde->waitlock);
 197                return -EAGAIN;
 198        }
 199
 200        vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
 201        if (list_empty(&vnic_sdma->wait.list)) {
 202                iowait_get_priority(wait->iow);
 203                iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
 204        }
 205        write_sequnlock(&sde->waitlock);
 206        return -EBUSY;
 207}
 208
 209/*
 210 * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
 211 *
 212 * This function gets called when SDMA descriptors becomes available and Tx
 213 * queue's wait structure was previously added to sdma engine's dmawait list.
 214 * It notifies the upper driver about Tx queue wakeup.
 215 */
 216static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
 217{
 218        struct hfi1_vnic_sdma *vnic_sdma =
 219                container_of(wait, struct hfi1_vnic_sdma, wait);
 220        struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
 221
 222        vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
 223        if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
 224                netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
 225};
 226
 227inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
 228                                       u8 q_idx)
 229{
 230        struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
 231
 232        return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
 233}
 234
 235void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
 236{
 237        int i;
 238
 239        for (i = 0; i < vinfo->num_tx_q; i++) {
 240                struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
 241
 242                iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
 243                            hfi1_vnic_sdma_sleep,
 244                            hfi1_vnic_sdma_wakeup, NULL, NULL);
 245                vnic_sdma->sde = &vinfo->dd->per_sdma[i];
 246                vnic_sdma->dd = vinfo->dd;
 247                vnic_sdma->vinfo = vinfo;
 248                vnic_sdma->q_idx = i;
 249                vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
 250
 251                /* Add a free descriptor watermark for wakeups */
 252                if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
 253                        struct iowait_work *work;
 254
 255                        INIT_LIST_HEAD(&vnic_sdma->stx.list);
 256                        vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
 257                        work = iowait_get_ib_work(&vnic_sdma->wait);
 258                        list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
 259                }
 260        }
 261}
 262
 263int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
 264{
 265        char buf[HFI1_VNIC_TXREQ_NAME_LEN];
 266
 267        snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
 268        dd->vnic.txreq_cache = kmem_cache_create(buf,
 269                                                 sizeof(struct vnic_txreq),
 270                                                 0, SLAB_HWCACHE_ALIGN,
 271                                                 NULL);
 272        if (!dd->vnic.txreq_cache)
 273                return -ENOMEM;
 274        return 0;
 275}
 276
 277void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
 278{
 279        kmem_cache_destroy(dd->vnic.txreq_cache);
 280        dd->vnic.txreq_cache = NULL;
 281}
 282