linux/drivers/net/ethernet/google/gve/gve_adminq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2/* Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2019 Google, Inc.
   5 */
   6
   7#include <linux/etherdevice.h>
   8#include <linux/pci.h>
   9#include "gve.h"
  10#include "gve_adminq.h"
  11#include "gve_register.h"
  12
  13#define GVE_MAX_ADMINQ_RELEASE_CHECK    500
  14#define GVE_ADMINQ_SLEEP_LEN            20
  15#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK      100
  16
  17int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
  18{
  19        priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
  20                                          &priv->adminq_bus_addr, GFP_KERNEL);
  21        if (unlikely(!priv->adminq))
  22                return -ENOMEM;
  23
  24        priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
  25        priv->adminq_prod_cnt = 0;
  26
  27        /* Setup Admin queue with the device */
  28        iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
  29                    &priv->reg_bar0->adminq_pfn);
  30
  31        gve_set_admin_queue_ok(priv);
  32        return 0;
  33}
  34
  35void gve_adminq_release(struct gve_priv *priv)
  36{
  37        int i = 0;
  38
  39        /* Tell the device the adminq is leaving */
  40        iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
  41        while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
  42                /* If this is reached the device is unrecoverable and still
  43                 * holding memory. Continue looping to avoid memory corruption,
  44                 * but WARN so it is visible what is going on.
  45                 */
  46                if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
  47                        WARN(1, "Unrecoverable platform error!");
  48                i++;
  49                msleep(GVE_ADMINQ_SLEEP_LEN);
  50        }
  51        gve_clear_device_rings_ok(priv);
  52        gve_clear_device_resources_ok(priv);
  53        gve_clear_admin_queue_ok(priv);
  54}
  55
  56void gve_adminq_free(struct device *dev, struct gve_priv *priv)
  57{
  58        if (!gve_get_admin_queue_ok(priv))
  59                return;
  60        gve_adminq_release(priv);
  61        dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
  62        gve_clear_admin_queue_ok(priv);
  63}
  64
  65static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
  66{
  67        iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
  68}
  69
  70static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
  71{
  72        int i;
  73
  74        for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
  75                if (ioread32be(&priv->reg_bar0->adminq_event_counter)
  76                    == prod_cnt)
  77                        return true;
  78                msleep(GVE_ADMINQ_SLEEP_LEN);
  79        }
  80
  81        return false;
  82}
  83
  84static int gve_adminq_parse_err(struct device *dev, u32 status)
  85{
  86        if (status != GVE_ADMINQ_COMMAND_PASSED &&
  87            status != GVE_ADMINQ_COMMAND_UNSET)
  88                dev_err(dev, "AQ command failed with status %d\n", status);
  89
  90        switch (status) {
  91        case GVE_ADMINQ_COMMAND_PASSED:
  92                return 0;
  93        case GVE_ADMINQ_COMMAND_UNSET:
  94                dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
  95                return -EINVAL;
  96        case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
  97        case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
  98        case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
  99        case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
 100        case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
 101                return -EAGAIN;
 102        case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
 103        case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
 104        case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
 105        case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
 106        case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
 107        case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
 108                return -EINVAL;
 109        case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
 110                return -ETIME;
 111        case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
 112        case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
 113                return -EACCES;
 114        case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
 115                return -ENOMEM;
 116        case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
 117                return -ENOTSUPP;
 118        default:
 119                dev_err(dev, "parse_aq_err: unknown status code %d\n", status);
 120                return -EINVAL;
 121        }
 122}
 123
 124/* This function is not threadsafe - the caller is responsible for any
 125 * necessary locks.
 126 */
 127int gve_adminq_execute_cmd(struct gve_priv *priv,
 128                           union gve_adminq_command *cmd_orig)
 129{
 130        union gve_adminq_command *cmd;
 131        u32 status = 0;
 132        u32 prod_cnt;
 133
 134        cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
 135        priv->adminq_prod_cnt++;
 136        prod_cnt = priv->adminq_prod_cnt;
 137
 138        memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
 139
 140        gve_adminq_kick_cmd(priv, prod_cnt);
 141        if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
 142                dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
 143                return -ENOTRECOVERABLE;
 144        }
 145
 146        memcpy(cmd_orig, cmd, sizeof(*cmd));
 147        status = be32_to_cpu(READ_ONCE(cmd->status));
 148        return gve_adminq_parse_err(&priv->pdev->dev, status);
 149}
 150
 151/* The device specifies that the management vector can either be the first irq
 152 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
 153 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
 154 * the management vector is first.
 155 *
 156 * gve arranges the msix vectors so that the management vector is last.
 157 */
 158#define GVE_NTFY_BLK_BASE_MSIX_IDX      0
 159int gve_adminq_configure_device_resources(struct gve_priv *priv,
 160                                          dma_addr_t counter_array_bus_addr,
 161                                          u32 num_counters,
 162                                          dma_addr_t db_array_bus_addr,
 163                                          u32 num_ntfy_blks)
 164{
 165        union gve_adminq_command cmd;
 166
 167        memset(&cmd, 0, sizeof(cmd));
 168        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
 169        cmd.configure_device_resources =
 170                (struct gve_adminq_configure_device_resources) {
 171                .counter_array = cpu_to_be64(counter_array_bus_addr),
 172                .num_counters = cpu_to_be32(num_counters),
 173                .irq_db_addr = cpu_to_be64(db_array_bus_addr),
 174                .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
 175                .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
 176                .ntfy_blk_msix_base_idx =
 177                                        cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
 178        };
 179
 180        return gve_adminq_execute_cmd(priv, &cmd);
 181}
 182
 183int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
 184{
 185        union gve_adminq_command cmd;
 186
 187        memset(&cmd, 0, sizeof(cmd));
 188        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
 189
 190        return gve_adminq_execute_cmd(priv, &cmd);
 191}
 192
 193int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 194{
 195        struct gve_tx_ring *tx = &priv->tx[queue_index];
 196        union gve_adminq_command cmd;
 197
 198        memset(&cmd, 0, sizeof(cmd));
 199        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
 200        cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
 201                .queue_id = cpu_to_be32(queue_index),
 202                .reserved = 0,
 203                .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
 204                .tx_ring_addr = cpu_to_be64(tx->bus),
 205                .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
 206                .ntfy_id = cpu_to_be32(tx->ntfy_id),
 207        };
 208
 209        return gve_adminq_execute_cmd(priv, &cmd);
 210}
 211
 212int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 213{
 214        struct gve_rx_ring *rx = &priv->rx[queue_index];
 215        union gve_adminq_command cmd;
 216
 217        memset(&cmd, 0, sizeof(cmd));
 218        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
 219        cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
 220                .queue_id = cpu_to_be32(queue_index),
 221                .index = cpu_to_be32(queue_index),
 222                .reserved = 0,
 223                .ntfy_id = cpu_to_be32(rx->ntfy_id),
 224                .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
 225                .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
 226                .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
 227                .queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
 228        };
 229
 230        return gve_adminq_execute_cmd(priv, &cmd);
 231}
 232
 233int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
 234{
 235        union gve_adminq_command cmd;
 236
 237        memset(&cmd, 0, sizeof(cmd));
 238        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
 239        cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
 240                .queue_id = cpu_to_be32(queue_index),
 241        };
 242
 243        return gve_adminq_execute_cmd(priv, &cmd);
 244}
 245
 246int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
 247{
 248        union gve_adminq_command cmd;
 249
 250        memset(&cmd, 0, sizeof(cmd));
 251        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
 252        cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
 253                .queue_id = cpu_to_be32(queue_index),
 254        };
 255
 256        return gve_adminq_execute_cmd(priv, &cmd);
 257}
 258
 259int gve_adminq_describe_device(struct gve_priv *priv)
 260{
 261        struct gve_device_descriptor *descriptor;
 262        union gve_adminq_command cmd;
 263        dma_addr_t descriptor_bus;
 264        int err = 0;
 265        u8 *mac;
 266        u16 mtu;
 267
 268        memset(&cmd, 0, sizeof(cmd));
 269        descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
 270                                        &descriptor_bus, GFP_KERNEL);
 271        if (!descriptor)
 272                return -ENOMEM;
 273        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
 274        cmd.describe_device.device_descriptor_addr =
 275                                                cpu_to_be64(descriptor_bus);
 276        cmd.describe_device.device_descriptor_version =
 277                        cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
 278        cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
 279
 280        err = gve_adminq_execute_cmd(priv, &cmd);
 281        if (err)
 282                goto free_device_descriptor;
 283
 284        priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
 285        if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
 286                netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
 287                          priv->tx_desc_cnt);
 288                err = -EINVAL;
 289                goto free_device_descriptor;
 290        }
 291        priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
 292        if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
 293            < PAGE_SIZE ||
 294            priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
 295            < PAGE_SIZE) {
 296                netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
 297                          priv->rx_desc_cnt);
 298                err = -EINVAL;
 299                goto free_device_descriptor;
 300        }
 301        priv->max_registered_pages =
 302                                be64_to_cpu(descriptor->max_registered_pages);
 303        mtu = be16_to_cpu(descriptor->mtu);
 304        if (mtu < ETH_MIN_MTU) {
 305                netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
 306                          mtu);
 307                err = -EINVAL;
 308                goto free_device_descriptor;
 309        }
 310        priv->dev->max_mtu = mtu;
 311        priv->num_event_counters = be16_to_cpu(descriptor->counters);
 312        ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
 313        mac = descriptor->mac;
 314        netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
 315        priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
 316        priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
 317        if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
 318                netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
 319                          priv->rx_pages_per_qpl);
 320                priv->rx_desc_cnt = priv->rx_pages_per_qpl;
 321        }
 322        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
 323
 324free_device_descriptor:
 325        dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
 326                          descriptor_bus);
 327        return err;
 328}
 329
 330int gve_adminq_register_page_list(struct gve_priv *priv,
 331                                  struct gve_queue_page_list *qpl)
 332{
 333        struct device *hdev = &priv->pdev->dev;
 334        u32 num_entries = qpl->num_entries;
 335        u32 size = num_entries * sizeof(qpl->page_buses[0]);
 336        union gve_adminq_command cmd;
 337        dma_addr_t page_list_bus;
 338        __be64 *page_list;
 339        int err;
 340        int i;
 341
 342        memset(&cmd, 0, sizeof(cmd));
 343        page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
 344        if (!page_list)
 345                return -ENOMEM;
 346
 347        for (i = 0; i < num_entries; i++)
 348                page_list[i] = cpu_to_be64(qpl->page_buses[i]);
 349
 350        cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
 351        cmd.reg_page_list = (struct gve_adminq_register_page_list) {
 352                .page_list_id = cpu_to_be32(qpl->id),
 353                .num_pages = cpu_to_be32(num_entries),
 354                .page_address_list_addr = cpu_to_be64(page_list_bus),
 355        };
 356
 357        err = gve_adminq_execute_cmd(priv, &cmd);
 358        dma_free_coherent(hdev, size, page_list, page_list_bus);
 359        return err;
 360}
 361
 362int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
 363{
 364        union gve_adminq_command cmd;
 365
 366        memset(&cmd, 0, sizeof(cmd));
 367        cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
 368        cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
 369                .page_list_id = cpu_to_be32(page_list_id),
 370        };
 371
 372        return gve_adminq_execute_cmd(priv, &cmd);
 373}
 374
 375int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
 376{
 377        union gve_adminq_command cmd;
 378
 379        memset(&cmd, 0, sizeof(cmd));
 380        cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
 381        cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
 382                .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
 383                .parameter_value = cpu_to_be64(mtu),
 384        };
 385
 386        return gve_adminq_execute_cmd(priv, &cmd);
 387}
 388