linux/drivers/vdpa/ifcvf/ifcvf_base.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel IFC VF NIC driver for virtio dataplane offloading
   4 *
   5 * Copyright (C) 2020 Intel Corporation.
   6 *
   7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
   8 *
   9 */
  10
  11#include "ifcvf_base.h"
  12
  13static inline u8 ifc_ioread8(u8 __iomem *addr)
  14{
  15        return ioread8(addr);
  16}
  17static inline u16 ifc_ioread16 (__le16 __iomem *addr)
  18{
  19        return ioread16(addr);
  20}
  21
  22static inline u32 ifc_ioread32(__le32 __iomem *addr)
  23{
  24        return ioread32(addr);
  25}
  26
  27static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
  28{
  29        iowrite8(value, addr);
  30}
  31
  32static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
  33{
  34        iowrite16(value, addr);
  35}
  36
  37static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
  38{
  39        iowrite32(value, addr);
  40}
  41
  42static void ifc_iowrite64_twopart(u64 val,
  43                                  __le32 __iomem *lo, __le32 __iomem *hi)
  44{
  45        ifc_iowrite32((u32)val, lo);
  46        ifc_iowrite32(val >> 32, hi);
  47}
  48
  49struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
  50{
  51        return container_of(hw, struct ifcvf_adapter, vf);
  52}
  53
  54static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
  55                                  struct virtio_pci_cap *cap)
  56{
  57        struct ifcvf_adapter *ifcvf;
  58        struct pci_dev *pdev;
  59        u32 length, offset;
  60        u8 bar;
  61
  62        length = le32_to_cpu(cap->length);
  63        offset = le32_to_cpu(cap->offset);
  64        bar = cap->bar;
  65
  66        ifcvf= vf_to_adapter(hw);
  67        pdev = ifcvf->pdev;
  68
  69        if (bar >= IFCVF_PCI_MAX_RESOURCE) {
  70                IFCVF_DBG(pdev,
  71                          "Invalid bar number %u to get capabilities\n", bar);
  72                return NULL;
  73        }
  74
  75        if (offset + length > pci_resource_len(pdev, bar)) {
  76                IFCVF_DBG(pdev,
  77                          "offset(%u) + len(%u) overflows bar%u's capability\n",
  78                          offset, length, bar);
  79                return NULL;
  80        }
  81
  82        return hw->base[bar] + offset;
  83}
  84
  85static int ifcvf_read_config_range(struct pci_dev *dev,
  86                                   uint32_t *val, int size, int where)
  87{
  88        int ret, i;
  89
  90        for (i = 0; i < size; i += 4) {
  91                ret = pci_read_config_dword(dev, where + i, val + i / 4);
  92                if (ret < 0)
  93                        return ret;
  94        }
  95
  96        return 0;
  97}
  98
  99int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
 100{
 101        struct virtio_pci_cap cap;
 102        u16 notify_off;
 103        int ret;
 104        u8 pos;
 105        u32 i;
 106
 107        ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
 108        if (ret < 0) {
 109                IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
 110                return -EIO;
 111        }
 112
 113        while (pos) {
 114                ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
 115                                              sizeof(cap), pos);
 116                if (ret < 0) {
 117                        IFCVF_ERR(pdev,
 118                                  "Failed to get PCI capability at %x\n", pos);
 119                        break;
 120                }
 121
 122                if (cap.cap_vndr != PCI_CAP_ID_VNDR)
 123                        goto next;
 124
 125                switch (cap.cfg_type) {
 126                case VIRTIO_PCI_CAP_COMMON_CFG:
 127                        hw->common_cfg = get_cap_addr(hw, &cap);
 128                        IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
 129                                  hw->common_cfg);
 130                        break;
 131                case VIRTIO_PCI_CAP_NOTIFY_CFG:
 132                        pci_read_config_dword(pdev, pos + sizeof(cap),
 133                                              &hw->notify_off_multiplier);
 134                        hw->notify_bar = cap.bar;
 135                        hw->notify_base = get_cap_addr(hw, &cap);
 136                        hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
 137                                        le32_to_cpu(cap.offset);
 138                        IFCVF_DBG(pdev, "hw->notify_base = %p\n",
 139                                  hw->notify_base);
 140                        break;
 141                case VIRTIO_PCI_CAP_ISR_CFG:
 142                        hw->isr = get_cap_addr(hw, &cap);
 143                        IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
 144                        break;
 145                case VIRTIO_PCI_CAP_DEVICE_CFG:
 146                        hw->net_cfg = get_cap_addr(hw, &cap);
 147                        IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
 148                        break;
 149                }
 150
 151next:
 152                pos = cap.cap_next;
 153        }
 154
 155        if (hw->common_cfg == NULL || hw->notify_base == NULL ||
 156            hw->isr == NULL || hw->net_cfg == NULL) {
 157                IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
 158                return -EIO;
 159        }
 160
 161        hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
 162
 163        for (i = 0; i < hw->nr_vring; i++) {
 164                ifc_iowrite16(i, &hw->common_cfg->queue_select);
 165                notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
 166                hw->vring[i].notify_addr = hw->notify_base +
 167                        notify_off * hw->notify_off_multiplier;
 168                hw->vring[i].notify_pa = hw->notify_base_pa +
 169                        notify_off * hw->notify_off_multiplier;
 170        }
 171
 172        hw->lm_cfg = hw->base[IFCVF_LM_BAR];
 173
 174        IFCVF_DBG(pdev,
 175                  "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
 176                  hw->common_cfg, hw->notify_base, hw->isr,
 177                  hw->net_cfg, hw->notify_off_multiplier);
 178
 179        return 0;
 180}
 181
 182u8 ifcvf_get_status(struct ifcvf_hw *hw)
 183{
 184        return ifc_ioread8(&hw->common_cfg->device_status);
 185}
 186
 187void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 188{
 189        ifc_iowrite8(status, &hw->common_cfg->device_status);
 190}
 191
 192void ifcvf_reset(struct ifcvf_hw *hw)
 193{
 194        hw->config_cb.callback = NULL;
 195        hw->config_cb.private = NULL;
 196
 197        ifcvf_set_status(hw, 0);
 198        /* flush set_status, make sure VF is stopped, reset */
 199        ifcvf_get_status(hw);
 200}
 201
 202static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
 203{
 204        if (status != 0)
 205                status |= ifcvf_get_status(hw);
 206
 207        ifcvf_set_status(hw, status);
 208        ifcvf_get_status(hw);
 209}
 210
 211u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
 212{
 213        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 214        u32 features_lo, features_hi;
 215        u64 features;
 216
 217        ifc_iowrite32(0, &cfg->device_feature_select);
 218        features_lo = ifc_ioread32(&cfg->device_feature);
 219
 220        ifc_iowrite32(1, &cfg->device_feature_select);
 221        features_hi = ifc_ioread32(&cfg->device_feature);
 222
 223        features = ((u64)features_hi << 32) | features_lo;
 224
 225        return features;
 226}
 227
 228u64 ifcvf_get_features(struct ifcvf_hw *hw)
 229{
 230        return hw->hw_features;
 231}
 232
 233int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
 234{
 235        struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
 236
 237        if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
 238                IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
 239                return -EINVAL;
 240        }
 241
 242        return 0;
 243}
 244
 245void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
 246                           void *dst, int length)
 247{
 248        u8 old_gen, new_gen, *p;
 249        int i;
 250
 251        WARN_ON(offset + length > sizeof(struct virtio_net_config));
 252        do {
 253                old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
 254                p = dst;
 255                for (i = 0; i < length; i++)
 256                        *p++ = ifc_ioread8(hw->net_cfg + offset + i);
 257
 258                new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
 259        } while (old_gen != new_gen);
 260}
 261
 262void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
 263                            const void *src, int length)
 264{
 265        const u8 *p;
 266        int i;
 267
 268        p = src;
 269        WARN_ON(offset + length > sizeof(struct virtio_net_config));
 270        for (i = 0; i < length; i++)
 271                ifc_iowrite8(*p++, hw->net_cfg + offset + i);
 272}
 273
 274static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 275{
 276        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 277
 278        ifc_iowrite32(0, &cfg->guest_feature_select);
 279        ifc_iowrite32((u32)features, &cfg->guest_feature);
 280
 281        ifc_iowrite32(1, &cfg->guest_feature_select);
 282        ifc_iowrite32(features >> 32, &cfg->guest_feature);
 283}
 284
 285static int ifcvf_config_features(struct ifcvf_hw *hw)
 286{
 287        struct ifcvf_adapter *ifcvf;
 288
 289        ifcvf = vf_to_adapter(hw);
 290        ifcvf_set_features(hw, hw->req_features);
 291        ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
 292
 293        if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
 294                IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
 295                return -EIO;
 296        }
 297
 298        return 0;
 299}
 300
 301u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
 302{
 303        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
 304        void __iomem *avail_idx_addr;
 305        u16 last_avail_idx;
 306        u32 q_pair_id;
 307
 308        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 309        q_pair_id = qid / hw->nr_vring;
 310        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 311        last_avail_idx = ifc_ioread16(avail_idx_addr);
 312
 313        return last_avail_idx;
 314}
 315
 316int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 317{
 318        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
 319        void __iomem *avail_idx_addr;
 320        u32 q_pair_id;
 321
 322        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 323        q_pair_id = qid / hw->nr_vring;
 324        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 325        hw->vring[qid].last_avail_idx = num;
 326        ifc_iowrite16(num, avail_idx_addr);
 327
 328        return 0;
 329}
 330
 331static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 332{
 333        struct virtio_pci_common_cfg __iomem *cfg;
 334        struct ifcvf_adapter *ifcvf;
 335        u32 i;
 336
 337        ifcvf = vf_to_adapter(hw);
 338        cfg = hw->common_cfg;
 339        ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
 340
 341        if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
 342                IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
 343                return -EINVAL;
 344        }
 345
 346        for (i = 0; i < hw->nr_vring; i++) {
 347                if (!hw->vring[i].ready)
 348                        break;
 349
 350                ifc_iowrite16(i, &cfg->queue_select);
 351                ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
 352                                     &cfg->queue_desc_hi);
 353                ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
 354                                      &cfg->queue_avail_hi);
 355                ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
 356                                     &cfg->queue_used_hi);
 357                ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
 358                ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
 359
 360                if (ifc_ioread16(&cfg->queue_msix_vector) ==
 361                    VIRTIO_MSI_NO_VECTOR) {
 362                        IFCVF_ERR(ifcvf->pdev,
 363                                  "No msix vector for queue %u\n", i);
 364                        return -EINVAL;
 365                }
 366
 367                ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
 368                ifc_iowrite16(1, &cfg->queue_enable);
 369        }
 370
 371        return 0;
 372}
 373
 374static void ifcvf_hw_disable(struct ifcvf_hw *hw)
 375{
 376        struct virtio_pci_common_cfg __iomem *cfg;
 377        u32 i;
 378
 379        cfg = hw->common_cfg;
 380        ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
 381
 382        for (i = 0; i < hw->nr_vring; i++) {
 383                ifc_iowrite16(i, &cfg->queue_select);
 384                ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
 385        }
 386
 387        ifc_ioread16(&cfg->queue_msix_vector);
 388}
 389
 390int ifcvf_start_hw(struct ifcvf_hw *hw)
 391{
 392        ifcvf_reset(hw);
 393        ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 394        ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
 395
 396        if (ifcvf_config_features(hw) < 0)
 397                return -EINVAL;
 398
 399        if (ifcvf_hw_enable(hw) < 0)
 400                return -EINVAL;
 401
 402        ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
 403
 404        return 0;
 405}
 406
 407void ifcvf_stop_hw(struct ifcvf_hw *hw)
 408{
 409        ifcvf_hw_disable(hw);
 410        ifcvf_reset(hw);
 411}
 412
 413void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
 414{
 415        ifc_iowrite16(qid, hw->vring[qid].notify_addr);
 416}
 417