linux/drivers/vdpa/ifcvf/ifcvf_base.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel IFC VF NIC driver for virtio dataplane offloading
   4 *
   5 * Copyright (C) 2020 Intel Corporation.
   6 *
   7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
   8 *
   9 */
  10
  11#include "ifcvf_base.h"
  12
  13static inline u8 ifc_ioread8(u8 __iomem *addr)
  14{
  15        return ioread8(addr);
  16}
  17static inline u16 ifc_ioread16 (__le16 __iomem *addr)
  18{
  19        return ioread16(addr);
  20}
  21
  22static inline u32 ifc_ioread32(__le32 __iomem *addr)
  23{
  24        return ioread32(addr);
  25}
  26
  27static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
  28{
  29        iowrite8(value, addr);
  30}
  31
  32static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
  33{
  34        iowrite16(value, addr);
  35}
  36
  37static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
  38{
  39        iowrite32(value, addr);
  40}
  41
  42static void ifc_iowrite64_twopart(u64 val,
  43                                  __le32 __iomem *lo, __le32 __iomem *hi)
  44{
  45        ifc_iowrite32((u32)val, lo);
  46        ifc_iowrite32(val >> 32, hi);
  47}
  48
  49struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
  50{
  51        return container_of(hw, struct ifcvf_adapter, vf);
  52}
  53
  54static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
  55                                  struct virtio_pci_cap *cap)
  56{
  57        struct ifcvf_adapter *ifcvf;
  58        struct pci_dev *pdev;
  59        u32 length, offset;
  60        u8 bar;
  61
  62        length = le32_to_cpu(cap->length);
  63        offset = le32_to_cpu(cap->offset);
  64        bar = cap->bar;
  65
  66        ifcvf= vf_to_adapter(hw);
  67        pdev = ifcvf->pdev;
  68
  69        if (bar >= IFCVF_PCI_MAX_RESOURCE) {
  70                IFCVF_DBG(pdev,
  71                          "Invalid bar number %u to get capabilities\n", bar);
  72                return NULL;
  73        }
  74
  75        if (offset + length > pci_resource_len(pdev, bar)) {
  76                IFCVF_DBG(pdev,
  77                          "offset(%u) + len(%u) overflows bar%u's capability\n",
  78                          offset, length, bar);
  79                return NULL;
  80        }
  81
  82        return hw->base[bar] + offset;
  83}
  84
  85static int ifcvf_read_config_range(struct pci_dev *dev,
  86                                   uint32_t *val, int size, int where)
  87{
  88        int ret, i;
  89
  90        for (i = 0; i < size; i += 4) {
  91                ret = pci_read_config_dword(dev, where + i, val + i / 4);
  92                if (ret < 0)
  93                        return ret;
  94        }
  95
  96        return 0;
  97}
  98
  99int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
 100{
 101        struct virtio_pci_cap cap;
 102        u16 notify_off;
 103        int ret;
 104        u8 pos;
 105        u32 i;
 106
 107        ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
 108        if (ret < 0) {
 109                IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
 110                return -EIO;
 111        }
 112
 113        while (pos) {
 114                ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
 115                                              sizeof(cap), pos);
 116                if (ret < 0) {
 117                        IFCVF_ERR(pdev,
 118                                  "Failed to get PCI capability at %x\n", pos);
 119                        break;
 120                }
 121
 122                if (cap.cap_vndr != PCI_CAP_ID_VNDR)
 123                        goto next;
 124
 125                switch (cap.cfg_type) {
 126                case VIRTIO_PCI_CAP_COMMON_CFG:
 127                        hw->common_cfg = get_cap_addr(hw, &cap);
 128                        IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
 129                                  hw->common_cfg);
 130                        break;
 131                case VIRTIO_PCI_CAP_NOTIFY_CFG:
 132                        pci_read_config_dword(pdev, pos + sizeof(cap),
 133                                              &hw->notify_off_multiplier);
 134                        hw->notify_bar = cap.bar;
 135                        hw->notify_base = get_cap_addr(hw, &cap);
 136                        IFCVF_DBG(pdev, "hw->notify_base = %p\n",
 137                                  hw->notify_base);
 138                        break;
 139                case VIRTIO_PCI_CAP_ISR_CFG:
 140                        hw->isr = get_cap_addr(hw, &cap);
 141                        IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
 142                        break;
 143                case VIRTIO_PCI_CAP_DEVICE_CFG:
 144                        hw->net_cfg = get_cap_addr(hw, &cap);
 145                        IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
 146                        break;
 147                }
 148
 149next:
 150                pos = cap.cap_next;
 151        }
 152
 153        if (hw->common_cfg == NULL || hw->notify_base == NULL ||
 154            hw->isr == NULL || hw->net_cfg == NULL) {
 155                IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
 156                return -EIO;
 157        }
 158
 159        for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
 160                ifc_iowrite16(i, &hw->common_cfg->queue_select);
 161                notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
 162                hw->vring[i].notify_addr = hw->notify_base +
 163                        notify_off * hw->notify_off_multiplier;
 164        }
 165
 166        hw->lm_cfg = hw->base[IFCVF_LM_BAR];
 167
 168        IFCVF_DBG(pdev,
 169                  "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
 170                  hw->common_cfg, hw->notify_base, hw->isr,
 171                  hw->net_cfg, hw->notify_off_multiplier);
 172
 173        return 0;
 174}
 175
 176u8 ifcvf_get_status(struct ifcvf_hw *hw)
 177{
 178        return ifc_ioread8(&hw->common_cfg->device_status);
 179}
 180
 181void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 182{
 183        ifc_iowrite8(status, &hw->common_cfg->device_status);
 184}
 185
 186void ifcvf_reset(struct ifcvf_hw *hw)
 187{
 188        hw->config_cb.callback = NULL;
 189        hw->config_cb.private = NULL;
 190
 191        ifcvf_set_status(hw, 0);
 192        /* flush set_status, make sure VF is stopped, reset */
 193        ifcvf_get_status(hw);
 194}
 195
 196static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
 197{
 198        if (status != 0)
 199                status |= ifcvf_get_status(hw);
 200
 201        ifcvf_set_status(hw, status);
 202        ifcvf_get_status(hw);
 203}
 204
 205u64 ifcvf_get_features(struct ifcvf_hw *hw)
 206{
 207        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 208        u32 features_lo, features_hi;
 209
 210        ifc_iowrite32(0, &cfg->device_feature_select);
 211        features_lo = ifc_ioread32(&cfg->device_feature);
 212
 213        ifc_iowrite32(1, &cfg->device_feature_select);
 214        features_hi = ifc_ioread32(&cfg->device_feature);
 215
 216        return ((u64)features_hi << 32) | features_lo;
 217}
 218
 219void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
 220                           void *dst, int length)
 221{
 222        u8 old_gen, new_gen, *p;
 223        int i;
 224
 225        WARN_ON(offset + length > sizeof(struct virtio_net_config));
 226        do {
 227                old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
 228                p = dst;
 229                for (i = 0; i < length; i++)
 230                        *p++ = ifc_ioread8(hw->net_cfg + offset + i);
 231
 232                new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
 233        } while (old_gen != new_gen);
 234}
 235
 236void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
 237                            const void *src, int length)
 238{
 239        const u8 *p;
 240        int i;
 241
 242        p = src;
 243        WARN_ON(offset + length > sizeof(struct virtio_net_config));
 244        for (i = 0; i < length; i++)
 245                ifc_iowrite8(*p++, hw->net_cfg + offset + i);
 246}
 247
 248static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 249{
 250        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 251
 252        ifc_iowrite32(0, &cfg->guest_feature_select);
 253        ifc_iowrite32((u32)features, &cfg->guest_feature);
 254
 255        ifc_iowrite32(1, &cfg->guest_feature_select);
 256        ifc_iowrite32(features >> 32, &cfg->guest_feature);
 257}
 258
 259static int ifcvf_config_features(struct ifcvf_hw *hw)
 260{
 261        struct ifcvf_adapter *ifcvf;
 262
 263        ifcvf = vf_to_adapter(hw);
 264        ifcvf_set_features(hw, hw->req_features);
 265        ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
 266
 267        if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
 268                IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
 269                return -EIO;
 270        }
 271
 272        return 0;
 273}
 274
 275u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
 276{
 277        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
 278        void __iomem *avail_idx_addr;
 279        u16 last_avail_idx;
 280        u32 q_pair_id;
 281
 282        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 283        q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
 284        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 285        last_avail_idx = ifc_ioread16(avail_idx_addr);
 286
 287        return last_avail_idx;
 288}
 289
 290int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 291{
 292        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
 293        void __iomem *avail_idx_addr;
 294        u32 q_pair_id;
 295
 296        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 297        q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
 298        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 299        hw->vring[qid].last_avail_idx = num;
 300        ifc_iowrite16(num, avail_idx_addr);
 301
 302        return 0;
 303}
 304
 305static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 306{
 307        struct virtio_pci_common_cfg __iomem *cfg;
 308        struct ifcvf_adapter *ifcvf;
 309        u32 i;
 310
 311        ifcvf = vf_to_adapter(hw);
 312        cfg = hw->common_cfg;
 313        ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
 314
 315        if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
 316                IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
 317                return -EINVAL;
 318        }
 319
 320        for (i = 0; i < hw->nr_vring; i++) {
 321                if (!hw->vring[i].ready)
 322                        break;
 323
 324                ifc_iowrite16(i, &cfg->queue_select);
 325                ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
 326                                     &cfg->queue_desc_hi);
 327                ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
 328                                      &cfg->queue_avail_hi);
 329                ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
 330                                     &cfg->queue_used_hi);
 331                ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
 332                ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
 333
 334                if (ifc_ioread16(&cfg->queue_msix_vector) ==
 335                    VIRTIO_MSI_NO_VECTOR) {
 336                        IFCVF_ERR(ifcvf->pdev,
 337                                  "No msix vector for queue %u\n", i);
 338                        return -EINVAL;
 339                }
 340
 341                ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
 342                ifc_iowrite16(1, &cfg->queue_enable);
 343        }
 344
 345        return 0;
 346}
 347
 348static void ifcvf_hw_disable(struct ifcvf_hw *hw)
 349{
 350        struct virtio_pci_common_cfg __iomem *cfg;
 351        u32 i;
 352
 353        cfg = hw->common_cfg;
 354        ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
 355
 356        for (i = 0; i < hw->nr_vring; i++) {
 357                ifc_iowrite16(i, &cfg->queue_select);
 358                ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
 359        }
 360
 361        ifc_ioread16(&cfg->queue_msix_vector);
 362}
 363
 364int ifcvf_start_hw(struct ifcvf_hw *hw)
 365{
 366        ifcvf_reset(hw);
 367        ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 368        ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
 369
 370        if (ifcvf_config_features(hw) < 0)
 371                return -EINVAL;
 372
 373        if (ifcvf_hw_enable(hw) < 0)
 374                return -EINVAL;
 375
 376        ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
 377
 378        return 0;
 379}
 380
 381void ifcvf_stop_hw(struct ifcvf_hw *hw)
 382{
 383        ifcvf_hw_disable(hw);
 384        ifcvf_reset(hw);
 385}
 386
 387void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
 388{
 389        ifc_iowrite16(qid, hw->vring[qid].notify_addr);
 390}
 391