linux/drivers/net/ethernet/microsoft/mana/gdma_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright (c) 2021, Microsoft Corporation. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/utsname.h>
   7#include <linux/version.h>
   8
   9#include "mana.h"
  10
  11static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
  12{
  13        return readl(g->bar0_va + offset);
  14}
  15
  16static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
  17{
  18        return readq(g->bar0_va + offset);
  19}
  20
  21static void mana_gd_init_registers(struct pci_dev *pdev)
  22{
  23        struct gdma_context *gc = pci_get_drvdata(pdev);
  24
  25        gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
  26
  27        gc->db_page_base = gc->bar0_va +
  28                                mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
  29
  30        gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
  31}
  32
  33static int mana_gd_query_max_resources(struct pci_dev *pdev)
  34{
  35        struct gdma_context *gc = pci_get_drvdata(pdev);
  36        struct gdma_query_max_resources_resp resp = {};
  37        struct gdma_general_req req = {};
  38        int err;
  39
  40        mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
  41                             sizeof(req), sizeof(resp));
  42
  43        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
  44        if (err || resp.hdr.status) {
  45                dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
  46                        err, resp.hdr.status);
  47                return err ? err : -EPROTO;
  48        }
  49
  50        if (gc->num_msix_usable > resp.max_msix)
  51                gc->num_msix_usable = resp.max_msix;
  52
  53        if (gc->num_msix_usable <= 1)
  54                return -ENOSPC;
  55
  56        gc->max_num_queues = num_online_cpus();
  57        if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
  58                gc->max_num_queues = MANA_MAX_NUM_QUEUES;
  59
  60        if (gc->max_num_queues > resp.max_eq)
  61                gc->max_num_queues = resp.max_eq;
  62
  63        if (gc->max_num_queues > resp.max_cq)
  64                gc->max_num_queues = resp.max_cq;
  65
  66        if (gc->max_num_queues > resp.max_sq)
  67                gc->max_num_queues = resp.max_sq;
  68
  69        if (gc->max_num_queues > resp.max_rq)
  70                gc->max_num_queues = resp.max_rq;
  71
  72        /* The Hardware Channel (HWC) used 1 MSI-X */
  73        if (gc->max_num_queues > gc->num_msix_usable - 1)
  74                gc->max_num_queues = gc->num_msix_usable - 1;
  75
  76        return 0;
  77}
  78
  79static int mana_gd_detect_devices(struct pci_dev *pdev)
  80{
  81        struct gdma_context *gc = pci_get_drvdata(pdev);
  82        struct gdma_list_devices_resp resp = {};
  83        struct gdma_general_req req = {};
  84        struct gdma_dev_id dev;
  85        u32 i, max_num_devs;
  86        u16 dev_type;
  87        int err;
  88
  89        mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
  90                             sizeof(resp));
  91
  92        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
  93        if (err || resp.hdr.status) {
  94                dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
  95                        resp.hdr.status);
  96                return err ? err : -EPROTO;
  97        }
  98
  99        max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
 100
 101        for (i = 0; i < max_num_devs; i++) {
 102                dev = resp.devs[i];
 103                dev_type = dev.type;
 104
 105                /* HWC is already detected in mana_hwc_create_channel(). */
 106                if (dev_type == GDMA_DEVICE_HWC)
 107                        continue;
 108
 109                if (dev_type == GDMA_DEVICE_MANA) {
 110                        gc->mana.gdma_context = gc;
 111                        gc->mana.dev_id = dev;
 112                }
 113        }
 114
 115        return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
 116}
 117
 118int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
 119                         u32 resp_len, void *resp)
 120{
 121        struct hw_channel_context *hwc = gc->hwc.driver_data;
 122
 123        return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
 124}
 125
 126int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
 127                         struct gdma_mem_info *gmi)
 128{
 129        dma_addr_t dma_handle;
 130        void *buf;
 131
 132        if (length < PAGE_SIZE || !is_power_of_2(length))
 133                return -EINVAL;
 134
 135        gmi->dev = gc->dev;
 136        buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
 137        if (!buf)
 138                return -ENOMEM;
 139
 140        gmi->dma_handle = dma_handle;
 141        gmi->virt_addr = buf;
 142        gmi->length = length;
 143
 144        return 0;
 145}
 146
 147void mana_gd_free_memory(struct gdma_mem_info *gmi)
 148{
 149        dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
 150                          gmi->dma_handle);
 151}
 152
 153static int mana_gd_create_hw_eq(struct gdma_context *gc,
 154                                struct gdma_queue *queue)
 155{
 156        struct gdma_create_queue_resp resp = {};
 157        struct gdma_create_queue_req req = {};
 158        int err;
 159
 160        if (queue->type != GDMA_EQ)
 161                return -EINVAL;
 162
 163        mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
 164                             sizeof(req), sizeof(resp));
 165
 166        req.hdr.dev_id = queue->gdma_dev->dev_id;
 167        req.type = queue->type;
 168        req.pdid = queue->gdma_dev->pdid;
 169        req.doolbell_id = queue->gdma_dev->doorbell;
 170        req.gdma_region = queue->mem_info.gdma_region;
 171        req.queue_size = queue->queue_size;
 172        req.log2_throttle_limit = queue->eq.log2_throttle_limit;
 173        req.eq_pci_msix_index = queue->eq.msix_index;
 174
 175        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 176        if (err || resp.hdr.status) {
 177                dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
 178                        resp.hdr.status);
 179                return err ? err : -EPROTO;
 180        }
 181
 182        queue->id = resp.queue_index;
 183        queue->eq.disable_needed = true;
 184        queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
 185        return 0;
 186}
 187
 188static int mana_gd_disable_queue(struct gdma_queue *queue)
 189{
 190        struct gdma_context *gc = queue->gdma_dev->gdma_context;
 191        struct gdma_disable_queue_req req = {};
 192        struct gdma_general_resp resp = {};
 193        int err;
 194
 195        WARN_ON(queue->type != GDMA_EQ);
 196
 197        mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
 198                             sizeof(req), sizeof(resp));
 199
 200        req.hdr.dev_id = queue->gdma_dev->dev_id;
 201        req.type = queue->type;
 202        req.queue_index =  queue->id;
 203        req.alloc_res_id_on_creation = 1;
 204
 205        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 206        if (err || resp.hdr.status) {
 207                dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
 208                        resp.hdr.status);
 209                return err ? err : -EPROTO;
 210        }
 211
 212        return 0;
 213}
 214
 215#define DOORBELL_OFFSET_SQ      0x0
 216#define DOORBELL_OFFSET_RQ      0x400
 217#define DOORBELL_OFFSET_CQ      0x800
 218#define DOORBELL_OFFSET_EQ      0xFF8
 219
 220static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
 221                                  enum gdma_queue_type q_type, u32 qid,
 222                                  u32 tail_ptr, u8 num_req)
 223{
 224        void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
 225        union gdma_doorbell_entry e = {};
 226
 227        switch (q_type) {
 228        case GDMA_EQ:
 229                e.eq.id = qid;
 230                e.eq.tail_ptr = tail_ptr;
 231                e.eq.arm = num_req;
 232
 233                addr += DOORBELL_OFFSET_EQ;
 234                break;
 235
 236        case GDMA_CQ:
 237                e.cq.id = qid;
 238                e.cq.tail_ptr = tail_ptr;
 239                e.cq.arm = num_req;
 240
 241                addr += DOORBELL_OFFSET_CQ;
 242                break;
 243
 244        case GDMA_RQ:
 245                e.rq.id = qid;
 246                e.rq.tail_ptr = tail_ptr;
 247                e.rq.wqe_cnt = num_req;
 248
 249                addr += DOORBELL_OFFSET_RQ;
 250                break;
 251
 252        case GDMA_SQ:
 253                e.sq.id = qid;
 254                e.sq.tail_ptr = tail_ptr;
 255
 256                addr += DOORBELL_OFFSET_SQ;
 257                break;
 258
 259        default:
 260                WARN_ON(1);
 261                return;
 262        }
 263
 264        /* Ensure all writes are done before ring doorbell */
 265        wmb();
 266
 267        writeq(e.as_uint64, addr);
 268}
 269
 270void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
 271{
 272        mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
 273                              queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
 274}
 275
 276void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
 277{
 278        struct gdma_context *gc = cq->gdma_dev->gdma_context;
 279
 280        u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
 281
 282        u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
 283
 284        mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
 285                              head, arm_bit);
 286}
 287
 288static void mana_gd_process_eqe(struct gdma_queue *eq)
 289{
 290        u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
 291        struct gdma_context *gc = eq->gdma_dev->gdma_context;
 292        struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
 293        union gdma_eqe_info eqe_info;
 294        enum gdma_eqe_type type;
 295        struct gdma_event event;
 296        struct gdma_queue *cq;
 297        struct gdma_eqe *eqe;
 298        u32 cq_id;
 299
 300        eqe = &eq_eqe_ptr[head];
 301        eqe_info.as_uint32 = eqe->eqe_info;
 302        type = eqe_info.type;
 303
 304        switch (type) {
 305        case GDMA_EQE_COMPLETION:
 306                cq_id = eqe->details[0] & 0xFFFFFF;
 307                if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
 308                        break;
 309
 310                cq = gc->cq_table[cq_id];
 311                if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
 312                        break;
 313
 314                if (cq->cq.callback)
 315                        cq->cq.callback(cq->cq.context, cq);
 316
 317                break;
 318
 319        case GDMA_EQE_TEST_EVENT:
 320                gc->test_event_eq_id = eq->id;
 321                complete(&gc->eq_test_event);
 322                break;
 323
 324        case GDMA_EQE_HWC_INIT_EQ_ID_DB:
 325        case GDMA_EQE_HWC_INIT_DATA:
 326        case GDMA_EQE_HWC_INIT_DONE:
 327                if (!eq->eq.callback)
 328                        break;
 329
 330                event.type = type;
 331                memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
 332                eq->eq.callback(eq->eq.context, eq, &event);
 333                break;
 334
 335        default:
 336                break;
 337        }
 338}
 339
 340static void mana_gd_process_eq_events(void *arg)
 341{
 342        u32 owner_bits, new_bits, old_bits;
 343        union gdma_eqe_info eqe_info;
 344        struct gdma_eqe *eq_eqe_ptr;
 345        struct gdma_queue *eq = arg;
 346        struct gdma_context *gc;
 347        struct gdma_eqe *eqe;
 348        u32 head, num_eqe;
 349        int i;
 350
 351        gc = eq->gdma_dev->gdma_context;
 352
 353        num_eqe = eq->queue_size / GDMA_EQE_SIZE;
 354        eq_eqe_ptr = eq->queue_mem_ptr;
 355
 356        /* Process up to 5 EQEs at a time, and update the HW head. */
 357        for (i = 0; i < 5; i++) {
 358                eqe = &eq_eqe_ptr[eq->head % num_eqe];
 359                eqe_info.as_uint32 = eqe->eqe_info;
 360                owner_bits = eqe_info.owner_bits;
 361
 362                old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
 363                /* No more entries */
 364                if (owner_bits == old_bits)
 365                        break;
 366
 367                new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
 368                if (owner_bits != new_bits) {
 369                        dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
 370                        break;
 371                }
 372
 373                mana_gd_process_eqe(eq);
 374
 375                eq->head++;
 376        }
 377
 378        head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
 379
 380        mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
 381                              head, SET_ARM_BIT);
 382}
 383
 384static int mana_gd_register_irq(struct gdma_queue *queue,
 385                                const struct gdma_queue_spec *spec)
 386{
 387        struct gdma_dev *gd = queue->gdma_dev;
 388        struct gdma_irq_context *gic;
 389        struct gdma_context *gc;
 390        struct gdma_resource *r;
 391        unsigned int msi_index;
 392        unsigned long flags;
 393        struct device *dev;
 394        int err = 0;
 395
 396        gc = gd->gdma_context;
 397        r = &gc->msix_resource;
 398        dev = gc->dev;
 399
 400        spin_lock_irqsave(&r->lock, flags);
 401
 402        msi_index = find_first_zero_bit(r->map, r->size);
 403        if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
 404                err = -ENOSPC;
 405        } else {
 406                bitmap_set(r->map, msi_index, 1);
 407                queue->eq.msix_index = msi_index;
 408        }
 409
 410        spin_unlock_irqrestore(&r->lock, flags);
 411
 412        if (err) {
 413                dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
 414                        err, msi_index, r->size, gc->num_msix_usable);
 415
 416                return err;
 417        }
 418
 419        gic = &gc->irq_contexts[msi_index];
 420
 421        WARN_ON(gic->handler || gic->arg);
 422
 423        gic->arg = queue;
 424
 425        gic->handler = mana_gd_process_eq_events;
 426
 427        return 0;
 428}
 429
 430static void mana_gd_deregiser_irq(struct gdma_queue *queue)
 431{
 432        struct gdma_dev *gd = queue->gdma_dev;
 433        struct gdma_irq_context *gic;
 434        struct gdma_context *gc;
 435        struct gdma_resource *r;
 436        unsigned int msix_index;
 437        unsigned long flags;
 438
 439        gc = gd->gdma_context;
 440        r = &gc->msix_resource;
 441
 442        /* At most num_online_cpus() + 1 interrupts are used. */
 443        msix_index = queue->eq.msix_index;
 444        if (WARN_ON(msix_index >= gc->num_msix_usable))
 445                return;
 446
 447        gic = &gc->irq_contexts[msix_index];
 448        gic->handler = NULL;
 449        gic->arg = NULL;
 450
 451        spin_lock_irqsave(&r->lock, flags);
 452        bitmap_clear(r->map, msix_index, 1);
 453        spin_unlock_irqrestore(&r->lock, flags);
 454
 455        queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
 456}
 457
 458int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
 459{
 460        struct gdma_generate_test_event_req req = {};
 461        struct gdma_general_resp resp = {};
 462        struct device *dev = gc->dev;
 463        int err;
 464
 465        mutex_lock(&gc->eq_test_event_mutex);
 466
 467        init_completion(&gc->eq_test_event);
 468        gc->test_event_eq_id = INVALID_QUEUE_ID;
 469
 470        mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
 471                             sizeof(req), sizeof(resp));
 472
 473        req.hdr.dev_id = eq->gdma_dev->dev_id;
 474        req.queue_index = eq->id;
 475
 476        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 477        if (err) {
 478                dev_err(dev, "test_eq failed: %d\n", err);
 479                goto out;
 480        }
 481
 482        err = -EPROTO;
 483
 484        if (resp.hdr.status) {
 485                dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
 486                goto out;
 487        }
 488
 489        if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
 490                dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
 491                goto out;
 492        }
 493
 494        if (eq->id != gc->test_event_eq_id) {
 495                dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
 496                        gc->test_event_eq_id, eq->id);
 497                goto out;
 498        }
 499
 500        err = 0;
 501out:
 502        mutex_unlock(&gc->eq_test_event_mutex);
 503        return err;
 504}
 505
 506static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
 507                               struct gdma_queue *queue)
 508{
 509        int err;
 510
 511        if (flush_evenets) {
 512                err = mana_gd_test_eq(gc, queue);
 513                if (err)
 514                        dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
 515        }
 516
 517        mana_gd_deregiser_irq(queue);
 518
 519        if (queue->eq.disable_needed)
 520                mana_gd_disable_queue(queue);
 521}
 522
 523static int mana_gd_create_eq(struct gdma_dev *gd,
 524                             const struct gdma_queue_spec *spec,
 525                             bool create_hwq, struct gdma_queue *queue)
 526{
 527        struct gdma_context *gc = gd->gdma_context;
 528        struct device *dev = gc->dev;
 529        u32 log2_num_entries;
 530        int err;
 531
 532        queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
 533
 534        log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
 535
 536        if (spec->eq.log2_throttle_limit > log2_num_entries) {
 537                dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
 538                        spec->eq.log2_throttle_limit, log2_num_entries);
 539                return -EINVAL;
 540        }
 541
 542        err = mana_gd_register_irq(queue, spec);
 543        if (err) {
 544                dev_err(dev, "Failed to register irq: %d\n", err);
 545                return err;
 546        }
 547
 548        queue->eq.callback = spec->eq.callback;
 549        queue->eq.context = spec->eq.context;
 550        queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
 551        queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
 552
 553        if (create_hwq) {
 554                err = mana_gd_create_hw_eq(gc, queue);
 555                if (err)
 556                        goto out;
 557
 558                err = mana_gd_test_eq(gc, queue);
 559                if (err)
 560                        goto out;
 561        }
 562
 563        return 0;
 564out:
 565        dev_err(dev, "Failed to create EQ: %d\n", err);
 566        mana_gd_destroy_eq(gc, false, queue);
 567        return err;
 568}
 569
 570static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
 571                              struct gdma_queue *queue)
 572{
 573        u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
 574
 575        queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
 576        queue->cq.parent = spec->cq.parent_eq;
 577        queue->cq.context = spec->cq.context;
 578        queue->cq.callback = spec->cq.callback;
 579}
 580
 581static void mana_gd_destroy_cq(struct gdma_context *gc,
 582                               struct gdma_queue *queue)
 583{
 584        u32 id = queue->id;
 585
 586        if (id >= gc->max_num_cqs)
 587                return;
 588
 589        if (!gc->cq_table[id])
 590                return;
 591
 592        gc->cq_table[id] = NULL;
 593}
 594
 595int mana_gd_create_hwc_queue(struct gdma_dev *gd,
 596                             const struct gdma_queue_spec *spec,
 597                             struct gdma_queue **queue_ptr)
 598{
 599        struct gdma_context *gc = gd->gdma_context;
 600        struct gdma_mem_info *gmi;
 601        struct gdma_queue *queue;
 602        int err;
 603
 604        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
 605        if (!queue)
 606                return -ENOMEM;
 607
 608        gmi = &queue->mem_info;
 609        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 610        if (err)
 611                goto free_q;
 612
 613        queue->head = 0;
 614        queue->tail = 0;
 615        queue->queue_mem_ptr = gmi->virt_addr;
 616        queue->queue_size = spec->queue_size;
 617        queue->monitor_avl_buf = spec->monitor_avl_buf;
 618        queue->type = spec->type;
 619        queue->gdma_dev = gd;
 620
 621        if (spec->type == GDMA_EQ)
 622                err = mana_gd_create_eq(gd, spec, false, queue);
 623        else if (spec->type == GDMA_CQ)
 624                mana_gd_create_cq(spec, queue);
 625
 626        if (err)
 627                goto out;
 628
 629        *queue_ptr = queue;
 630        return 0;
 631out:
 632        mana_gd_free_memory(gmi);
 633free_q:
 634        kfree(queue);
 635        return err;
 636}
 637
 638static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
 639{
 640        struct gdma_destroy_dma_region_req req = {};
 641        struct gdma_general_resp resp = {};
 642        int err;
 643
 644        if (gdma_region == GDMA_INVALID_DMA_REGION)
 645                return;
 646
 647        mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
 648                             sizeof(resp));
 649        req.gdma_region = gdma_region;
 650
 651        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 652        if (err || resp.hdr.status)
 653                dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
 654                        err, resp.hdr.status);
 655}
 656
 657static int mana_gd_create_dma_region(struct gdma_dev *gd,
 658                                     struct gdma_mem_info *gmi)
 659{
 660        unsigned int num_page = gmi->length / PAGE_SIZE;
 661        struct gdma_create_dma_region_req *req = NULL;
 662        struct gdma_create_dma_region_resp resp = {};
 663        struct gdma_context *gc = gd->gdma_context;
 664        struct hw_channel_context *hwc;
 665        u32 length = gmi->length;
 666        size_t req_msg_size;
 667        int err;
 668        int i;
 669
 670        if (length < PAGE_SIZE || !is_power_of_2(length))
 671                return -EINVAL;
 672
 673        if (offset_in_page(gmi->virt_addr) != 0)
 674                return -EINVAL;
 675
 676        hwc = gc->hwc.driver_data;
 677        req_msg_size = struct_size(req, page_addr_list, num_page);
 678        if (req_msg_size > hwc->max_req_msg_size)
 679                return -EINVAL;
 680
 681        req = kzalloc(req_msg_size, GFP_KERNEL);
 682        if (!req)
 683                return -ENOMEM;
 684
 685        mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
 686                             req_msg_size, sizeof(resp));
 687        req->length = length;
 688        req->offset_in_page = 0;
 689        req->gdma_page_type = GDMA_PAGE_TYPE_4K;
 690        req->page_count = num_page;
 691        req->page_addr_list_len = num_page;
 692
 693        for (i = 0; i < num_page; i++)
 694                req->page_addr_list[i] = gmi->dma_handle +  i * PAGE_SIZE;
 695
 696        err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
 697        if (err)
 698                goto out;
 699
 700        if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
 701                dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
 702                        resp.hdr.status);
 703                err = -EPROTO;
 704                goto out;
 705        }
 706
 707        gmi->gdma_region = resp.gdma_region;
 708out:
 709        kfree(req);
 710        return err;
 711}
 712
 713int mana_gd_create_mana_eq(struct gdma_dev *gd,
 714                           const struct gdma_queue_spec *spec,
 715                           struct gdma_queue **queue_ptr)
 716{
 717        struct gdma_context *gc = gd->gdma_context;
 718        struct gdma_mem_info *gmi;
 719        struct gdma_queue *queue;
 720        int err;
 721
 722        if (spec->type != GDMA_EQ)
 723                return -EINVAL;
 724
 725        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
 726        if (!queue)
 727                return -ENOMEM;
 728
 729        gmi = &queue->mem_info;
 730        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 731        if (err)
 732                goto free_q;
 733
 734        err = mana_gd_create_dma_region(gd, gmi);
 735        if (err)
 736                goto out;
 737
 738        queue->head = 0;
 739        queue->tail = 0;
 740        queue->queue_mem_ptr = gmi->virt_addr;
 741        queue->queue_size = spec->queue_size;
 742        queue->monitor_avl_buf = spec->monitor_avl_buf;
 743        queue->type = spec->type;
 744        queue->gdma_dev = gd;
 745
 746        err = mana_gd_create_eq(gd, spec, true, queue);
 747        if (err)
 748                goto out;
 749
 750        *queue_ptr = queue;
 751        return 0;
 752out:
 753        mana_gd_free_memory(gmi);
 754free_q:
 755        kfree(queue);
 756        return err;
 757}
 758
 759int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
 760                              const struct gdma_queue_spec *spec,
 761                              struct gdma_queue **queue_ptr)
 762{
 763        struct gdma_context *gc = gd->gdma_context;
 764        struct gdma_mem_info *gmi;
 765        struct gdma_queue *queue;
 766        int err;
 767
 768        if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
 769            spec->type != GDMA_RQ)
 770                return -EINVAL;
 771
 772        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
 773        if (!queue)
 774                return -ENOMEM;
 775
 776        gmi = &queue->mem_info;
 777        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 778        if (err)
 779                goto free_q;
 780
 781        err = mana_gd_create_dma_region(gd, gmi);
 782        if (err)
 783                goto out;
 784
 785        queue->head = 0;
 786        queue->tail = 0;
 787        queue->queue_mem_ptr = gmi->virt_addr;
 788        queue->queue_size = spec->queue_size;
 789        queue->monitor_avl_buf = spec->monitor_avl_buf;
 790        queue->type = spec->type;
 791        queue->gdma_dev = gd;
 792
 793        if (spec->type == GDMA_CQ)
 794                mana_gd_create_cq(spec, queue);
 795
 796        *queue_ptr = queue;
 797        return 0;
 798out:
 799        mana_gd_free_memory(gmi);
 800free_q:
 801        kfree(queue);
 802        return err;
 803}
 804
 805void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
 806{
 807        struct gdma_mem_info *gmi = &queue->mem_info;
 808
 809        switch (queue->type) {
 810        case GDMA_EQ:
 811                mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
 812                break;
 813
 814        case GDMA_CQ:
 815                mana_gd_destroy_cq(gc, queue);
 816                break;
 817
 818        case GDMA_RQ:
 819                break;
 820
 821        case GDMA_SQ:
 822                break;
 823
 824        default:
 825                dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
 826                        queue->type);
 827                return;
 828        }
 829
 830        mana_gd_destroy_dma_region(gc, gmi->gdma_region);
 831        mana_gd_free_memory(gmi);
 832        kfree(queue);
 833}
 834
 835int mana_gd_verify_vf_version(struct pci_dev *pdev)
 836{
 837        struct gdma_context *gc = pci_get_drvdata(pdev);
 838        struct gdma_verify_ver_resp resp = {};
 839        struct gdma_verify_ver_req req = {};
 840        int err;
 841
 842        mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
 843                             sizeof(req), sizeof(resp));
 844
 845        req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
 846        req.protocol_ver_max = GDMA_PROTOCOL_LAST;
 847
 848        req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
 849        req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
 850        req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
 851        req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
 852
 853        req.drv_ver = 0;        /* Unused*/
 854        req.os_type = 0x10;     /* Linux */
 855        req.os_ver_major = LINUX_VERSION_MAJOR;
 856        req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
 857        req.os_ver_build = LINUX_VERSION_SUBLEVEL;
 858        strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
 859        strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
 860        strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
 861
 862        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 863        if (err || resp.hdr.status) {
 864                dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
 865                        err, resp.hdr.status);
 866                return err ? err : -EPROTO;
 867        }
 868
 869        return 0;
 870}
 871
 872int mana_gd_register_device(struct gdma_dev *gd)
 873{
 874        struct gdma_context *gc = gd->gdma_context;
 875        struct gdma_register_device_resp resp = {};
 876        struct gdma_general_req req = {};
 877        int err;
 878
 879        gd->pdid = INVALID_PDID;
 880        gd->doorbell = INVALID_DOORBELL;
 881        gd->gpa_mkey = INVALID_MEM_KEY;
 882
 883        mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
 884                             sizeof(resp));
 885
 886        req.hdr.dev_id = gd->dev_id;
 887
 888        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 889        if (err || resp.hdr.status) {
 890                dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
 891                        err, resp.hdr.status);
 892                return err ? err : -EPROTO;
 893        }
 894
 895        gd->pdid = resp.pdid;
 896        gd->gpa_mkey = resp.gpa_mkey;
 897        gd->doorbell = resp.db_id;
 898
 899        return 0;
 900}
 901
 902int mana_gd_deregister_device(struct gdma_dev *gd)
 903{
 904        struct gdma_context *gc = gd->gdma_context;
 905        struct gdma_general_resp resp = {};
 906        struct gdma_general_req req = {};
 907        int err;
 908
 909        if (gd->pdid == INVALID_PDID)
 910                return -EINVAL;
 911
 912        mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
 913                             sizeof(resp));
 914
 915        req.hdr.dev_id = gd->dev_id;
 916
 917        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 918        if (err || resp.hdr.status) {
 919                dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
 920                        err, resp.hdr.status);
 921                if (!err)
 922                        err = -EPROTO;
 923        }
 924
 925        gd->pdid = INVALID_PDID;
 926        gd->doorbell = INVALID_DOORBELL;
 927        gd->gpa_mkey = INVALID_MEM_KEY;
 928
 929        return err;
 930}
 931
 932u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
 933{
 934        u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
 935        u32 wq_size = wq->queue_size;
 936
 937        WARN_ON_ONCE(used_space > wq_size);
 938
 939        return wq_size - used_space;
 940}
 941
 942u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
 943{
 944        u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
 945
 946        WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
 947
 948        return wq->queue_mem_ptr + offset;
 949}
 950
 951static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
 952                                    enum gdma_queue_type q_type,
 953                                    u32 client_oob_size, u32 sgl_data_size,
 954                                    u8 *wqe_ptr)
 955{
 956        bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
 957        bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
 958        struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
 959        u8 *ptr;
 960
 961        memset(header, 0, sizeof(struct gdma_wqe));
 962        header->num_sge = wqe_req->num_sge;
 963        header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
 964
 965        if (oob_in_sgl) {
 966                WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
 967
 968                header->client_oob_in_sgl = 1;
 969
 970                if (pad_data)
 971                        header->last_vbytes = wqe_req->sgl[0].size;
 972        }
 973
 974        if (q_type == GDMA_SQ)
 975                header->client_data_unit = wqe_req->client_data_unit;
 976
 977        /* The size of gdma_wqe + client_oob_size must be less than or equal
 978         * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
 979         * the queue memory buffer boundary.
 980         */
 981        ptr = wqe_ptr + sizeof(header);
 982
 983        if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
 984                memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
 985
 986                if (client_oob_size > wqe_req->inline_oob_size)
 987                        memset(ptr + wqe_req->inline_oob_size, 0,
 988                               client_oob_size - wqe_req->inline_oob_size);
 989        }
 990
 991        return sizeof(header) + client_oob_size;
 992}
 993
 994static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
 995                              const struct gdma_wqe_request *wqe_req)
 996{
 997        u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
 998        const u8 *address = (u8 *)wqe_req->sgl;
 999        u8 *base_ptr, *end_ptr;
1000        u32 size_to_end;
1001
1002        base_ptr = wq->queue_mem_ptr;
1003        end_ptr = base_ptr + wq->queue_size;
1004        size_to_end = (u32)(end_ptr - wqe_ptr);
1005
1006        if (size_to_end < sgl_size) {
1007                memcpy(wqe_ptr, address, size_to_end);
1008
1009                wqe_ptr = base_ptr;
1010                address += size_to_end;
1011                sgl_size -= size_to_end;
1012        }
1013
1014        memcpy(wqe_ptr, address, sgl_size);
1015}
1016
1017int mana_gd_post_work_request(struct gdma_queue *wq,
1018                              const struct gdma_wqe_request *wqe_req,
1019                              struct gdma_posted_wqe_info *wqe_info)
1020{
1021        u32 client_oob_size = wqe_req->inline_oob_size;
1022        struct gdma_context *gc;
1023        u32 sgl_data_size;
1024        u32 max_wqe_size;
1025        u32 wqe_size;
1026        u8 *wqe_ptr;
1027
1028        if (wqe_req->num_sge == 0)
1029                return -EINVAL;
1030
1031        if (wq->type == GDMA_RQ) {
1032                if (client_oob_size != 0)
1033                        return -EINVAL;
1034
1035                client_oob_size = INLINE_OOB_SMALL_SIZE;
1036
1037                max_wqe_size = GDMA_MAX_RQE_SIZE;
1038        } else {
1039                if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1040                    client_oob_size != INLINE_OOB_LARGE_SIZE)
1041                        return -EINVAL;
1042
1043                max_wqe_size = GDMA_MAX_SQE_SIZE;
1044        }
1045
1046        sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1047        wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1048                         sgl_data_size, GDMA_WQE_BU_SIZE);
1049        if (wqe_size > max_wqe_size)
1050                return -EINVAL;
1051
1052        if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1053                gc = wq->gdma_dev->gdma_context;
1054                dev_err(gc->dev, "unsuccessful flow control!\n");
1055                return -ENOSPC;
1056        }
1057
1058        if (wqe_info)
1059                wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1060
1061        wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1062        wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1063                                            sgl_data_size, wqe_ptr);
1064        if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1065                wqe_ptr -= wq->queue_size;
1066
1067        mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1068
1069        wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1070
1071        return 0;
1072}
1073
1074int mana_gd_post_and_ring(struct gdma_queue *queue,
1075                          const struct gdma_wqe_request *wqe_req,
1076                          struct gdma_posted_wqe_info *wqe_info)
1077{
1078        struct gdma_context *gc = queue->gdma_dev->gdma_context;
1079        int err;
1080
1081        err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1082        if (err)
1083                return err;
1084
1085        mana_gd_wq_ring_doorbell(gc, queue);
1086
1087        return 0;
1088}
1089
1090static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1091{
1092        unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1093        struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1094        u32 owner_bits, new_bits, old_bits;
1095        struct gdma_cqe *cqe;
1096
1097        cqe = &cq_cqe[cq->head % num_cqe];
1098        owner_bits = cqe->cqe_info.owner_bits;
1099
1100        old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1101        /* Return 0 if no more entries. */
1102        if (owner_bits == old_bits)
1103                return 0;
1104
1105        new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1106        /* Return -1 if overflow detected. */
1107        if (WARN_ON_ONCE(owner_bits != new_bits))
1108                return -1;
1109
1110        comp->wq_num = cqe->cqe_info.wq_num;
1111        comp->is_sq = cqe->cqe_info.is_sq;
1112        memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1113
1114        return 1;
1115}
1116
1117int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1118{
1119        int cqe_idx;
1120        int ret;
1121
1122        for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1123                ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1124
1125                if (ret < 0) {
1126                        cq->head -= cqe_idx;
1127                        return ret;
1128                }
1129
1130                if (ret == 0)
1131                        break;
1132
1133                cq->head++;
1134        }
1135
1136        return cqe_idx;
1137}
1138
1139static irqreturn_t mana_gd_intr(int irq, void *arg)
1140{
1141        struct gdma_irq_context *gic = arg;
1142
1143        if (gic->handler)
1144                gic->handler(gic->arg);
1145
1146        return IRQ_HANDLED;
1147}
1148
1149int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1150{
1151        r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1152        if (!r->map)
1153                return -ENOMEM;
1154
1155        r->size = res_avail;
1156        spin_lock_init(&r->lock);
1157
1158        return 0;
1159}
1160
1161void mana_gd_free_res_map(struct gdma_resource *r)
1162{
1163        bitmap_free(r->map);
1164        r->map = NULL;
1165        r->size = 0;
1166}
1167
1168static int mana_gd_setup_irqs(struct pci_dev *pdev)
1169{
1170        unsigned int max_queues_per_port = num_online_cpus();
1171        struct gdma_context *gc = pci_get_drvdata(pdev);
1172        struct gdma_irq_context *gic;
1173        unsigned int max_irqs;
1174        int nvec, irq;
1175        int err, i, j;
1176
1177        if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1178                max_queues_per_port = MANA_MAX_NUM_QUEUES;
1179
1180        /* Need 1 interrupt for the Hardware communication Channel (HWC) */
1181        max_irqs = max_queues_per_port + 1;
1182
1183        nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1184        if (nvec < 0)
1185                return nvec;
1186
1187        gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1188                                   GFP_KERNEL);
1189        if (!gc->irq_contexts) {
1190                err = -ENOMEM;
1191                goto free_irq_vector;
1192        }
1193
1194        for (i = 0; i < nvec; i++) {
1195                gic = &gc->irq_contexts[i];
1196                gic->handler = NULL;
1197                gic->arg = NULL;
1198
1199                irq = pci_irq_vector(pdev, i);
1200                if (irq < 0) {
1201                        err = irq;
1202                        goto free_irq;
1203                }
1204
1205                err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
1206                if (err)
1207                        goto free_irq;
1208        }
1209
1210        err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1211        if (err)
1212                goto free_irq;
1213
1214        gc->max_num_msix = nvec;
1215        gc->num_msix_usable = nvec;
1216
1217        return 0;
1218
1219free_irq:
1220        for (j = i - 1; j >= 0; j--) {
1221                irq = pci_irq_vector(pdev, j);
1222                gic = &gc->irq_contexts[j];
1223                free_irq(irq, gic);
1224        }
1225
1226        kfree(gc->irq_contexts);
1227        gc->irq_contexts = NULL;
1228free_irq_vector:
1229        pci_free_irq_vectors(pdev);
1230        return err;
1231}
1232
1233static void mana_gd_remove_irqs(struct pci_dev *pdev)
1234{
1235        struct gdma_context *gc = pci_get_drvdata(pdev);
1236        struct gdma_irq_context *gic;
1237        int irq, i;
1238
1239        if (gc->max_num_msix < 1)
1240                return;
1241
1242        mana_gd_free_res_map(&gc->msix_resource);
1243
1244        for (i = 0; i < gc->max_num_msix; i++) {
1245                irq = pci_irq_vector(pdev, i);
1246                if (irq < 0)
1247                        continue;
1248
1249                gic = &gc->irq_contexts[i];
1250                free_irq(irq, gic);
1251        }
1252
1253        pci_free_irq_vectors(pdev);
1254
1255        gc->max_num_msix = 0;
1256        gc->num_msix_usable = 0;
1257        kfree(gc->irq_contexts);
1258        gc->irq_contexts = NULL;
1259}
1260
1261static int mana_gd_setup(struct pci_dev *pdev)
1262{
1263        struct gdma_context *gc = pci_get_drvdata(pdev);
1264        int err;
1265
1266        mana_gd_init_registers(pdev);
1267        mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1268
1269        err = mana_gd_setup_irqs(pdev);
1270        if (err)
1271                return err;
1272
1273        err = mana_hwc_create_channel(gc);
1274        if (err)
1275                goto remove_irq;
1276
1277        err = mana_gd_verify_vf_version(pdev);
1278        if (err)
1279                goto destroy_hwc;
1280
1281        err = mana_gd_query_max_resources(pdev);
1282        if (err)
1283                goto destroy_hwc;
1284
1285        err = mana_gd_detect_devices(pdev);
1286        if (err)
1287                goto destroy_hwc;
1288
1289        return 0;
1290
1291destroy_hwc:
1292        mana_hwc_destroy_channel(gc);
1293remove_irq:
1294        mana_gd_remove_irqs(pdev);
1295        return err;
1296}
1297
1298static void mana_gd_cleanup(struct pci_dev *pdev)
1299{
1300        struct gdma_context *gc = pci_get_drvdata(pdev);
1301
1302        mana_hwc_destroy_channel(gc);
1303
1304        mana_gd_remove_irqs(pdev);
1305}
1306
1307static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1308{
1309        struct gdma_context *gc;
1310        void __iomem *bar0_va;
1311        int bar = 0;
1312        int err;
1313
1314        /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
1315        BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
1316
1317        err = pci_enable_device(pdev);
1318        if (err)
1319                return -ENXIO;
1320
1321        pci_set_master(pdev);
1322
1323        err = pci_request_regions(pdev, "mana");
1324        if (err)
1325                goto disable_dev;
1326
1327        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1328        if (err)
1329                goto release_region;
1330
1331        err = -ENOMEM;
1332        gc = vzalloc(sizeof(*gc));
1333        if (!gc)
1334                goto release_region;
1335
1336        mutex_init(&gc->eq_test_event_mutex);
1337        pci_set_drvdata(pdev, gc);
1338
1339        bar0_va = pci_iomap(pdev, bar, 0);
1340        if (!bar0_va)
1341                goto free_gc;
1342
1343        gc->bar0_va = bar0_va;
1344        gc->dev = &pdev->dev;
1345
1346
1347        err = mana_gd_setup(pdev);
1348        if (err)
1349                goto unmap_bar;
1350
1351        err = mana_probe(&gc->mana, false);
1352        if (err)
1353                goto cleanup_gd;
1354
1355        return 0;
1356
1357cleanup_gd:
1358        mana_gd_cleanup(pdev);
1359unmap_bar:
1360        pci_iounmap(pdev, bar0_va);
1361free_gc:
1362        pci_set_drvdata(pdev, NULL);
1363        vfree(gc);
1364release_region:
1365        pci_release_regions(pdev);
1366disable_dev:
1367        pci_clear_master(pdev);
1368        pci_disable_device(pdev);
1369        dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1370        return err;
1371}
1372
1373static void mana_gd_remove(struct pci_dev *pdev)
1374{
1375        struct gdma_context *gc = pci_get_drvdata(pdev);
1376
1377        mana_remove(&gc->mana, false);
1378
1379        mana_gd_cleanup(pdev);
1380
1381        pci_iounmap(pdev, gc->bar0_va);
1382
1383        vfree(gc);
1384
1385        pci_release_regions(pdev);
1386        pci_clear_master(pdev);
1387        pci_disable_device(pdev);
1388}
1389
1390/* The 'state' parameter is not used. */
1391static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
1392{
1393        struct gdma_context *gc = pci_get_drvdata(pdev);
1394
1395        mana_remove(&gc->mana, true);
1396
1397        mana_gd_cleanup(pdev);
1398
1399        return 0;
1400}
1401
1402/* In case the NIC hardware stops working, the suspend and resume callbacks will
1403 * fail -- if this happens, it's safer to just report an error than try to undo
1404 * what has been done.
1405 */
1406static int mana_gd_resume(struct pci_dev *pdev)
1407{
1408        struct gdma_context *gc = pci_get_drvdata(pdev);
1409        int err;
1410
1411        err = mana_gd_setup(pdev);
1412        if (err)
1413                return err;
1414
1415        err = mana_probe(&gc->mana, true);
1416        if (err)
1417                return err;
1418
1419        return 0;
1420}
1421
1422/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
1423static void mana_gd_shutdown(struct pci_dev *pdev)
1424{
1425        struct gdma_context *gc = pci_get_drvdata(pdev);
1426
1427        dev_info(&pdev->dev, "Shutdown was called\n");
1428
1429        mana_remove(&gc->mana, true);
1430
1431        mana_gd_cleanup(pdev);
1432
1433        pci_disable_device(pdev);
1434}
1435
1436#ifndef PCI_VENDOR_ID_MICROSOFT
1437#define PCI_VENDOR_ID_MICROSOFT 0x1414
1438#endif
1439
1440static const struct pci_device_id mana_id_table[] = {
1441        { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
1442        { }
1443};
1444
1445static struct pci_driver mana_driver = {
1446        .name           = "mana",
1447        .id_table       = mana_id_table,
1448        .probe          = mana_gd_probe,
1449        .remove         = mana_gd_remove,
1450        .suspend        = mana_gd_suspend,
1451        .resume         = mana_gd_resume,
1452        .shutdown       = mana_gd_shutdown,
1453};
1454
1455module_pci_driver(mana_driver);
1456
1457MODULE_DEVICE_TABLE(pci, mana_id_table);
1458
1459MODULE_LICENSE("Dual BSD/GPL");
1460MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
1461