linux/drivers/infiniband/hw/hfi1/init.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015 - 2020 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48#include <linux/pci.h>
  49#include <linux/netdevice.h>
  50#include <linux/vmalloc.h>
  51#include <linux/delay.h>
  52#include <linux/xarray.h>
  53#include <linux/module.h>
  54#include <linux/printk.h>
  55#include <linux/hrtimer.h>
  56#include <linux/bitmap.h>
  57#include <linux/numa.h>
  58#include <rdma/rdma_vt.h>
  59
  60#include "hfi.h"
  61#include "device.h"
  62#include "common.h"
  63#include "trace.h"
  64#include "mad.h"
  65#include "sdma.h"
  66#include "debugfs.h"
  67#include "verbs.h"
  68#include "aspm.h"
  69#include "affinity.h"
  70#include "vnic.h"
  71#include "exp_rcv.h"
  72#include "netdev.h"
  73
  74#undef pr_fmt
  75#define pr_fmt(fmt) DRIVER_NAME ": " fmt
  76
  77/*
  78 * min buffers we want to have per context, after driver
  79 */
  80#define HFI1_MIN_USER_CTXT_BUFCNT 7
  81
  82#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
  83#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
  84
  85#define NUM_IB_PORTS 1
  86
  87/*
  88 * Number of user receive contexts we are configured to use (to allow for more
  89 * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
  90 */
  91int num_user_contexts = -1;
  92module_param_named(num_user_contexts, num_user_contexts, int, 0444);
  93MODULE_PARM_DESC(
  94        num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
  95
  96uint krcvqs[RXE_NUM_DATA_VL];
  97int krcvqsset;
  98module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
  99MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
 100
 101/* computed based on above array */
 102unsigned long n_krcvqs;
 103
 104static unsigned hfi1_rcvarr_split = 25;
 105module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
 106MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
 107
 108static uint eager_buffer_size = (8 << 20); /* 8MB */
 109module_param(eager_buffer_size, uint, S_IRUGO);
 110MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
 111
 112static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
 113module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
 114MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
 115
 116static uint hfi1_hdrq_entsize = 32;
 117module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
 118MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
 119
 120unsigned int user_credit_return_threshold = 33; /* default is 33% */
 121module_param(user_credit_return_threshold, uint, S_IRUGO);
 122MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
 123
 124DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
 125
 126static int hfi1_create_kctxt(struct hfi1_devdata *dd,
 127                             struct hfi1_pportdata *ppd)
 128{
 129        struct hfi1_ctxtdata *rcd;
 130        int ret;
 131
 132        /* Control context has to be always 0 */
 133        BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
 134
 135        ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
 136        if (ret < 0) {
 137                dd_dev_err(dd, "Kernel receive context allocation failed\n");
 138                return ret;
 139        }
 140
 141        /*
 142         * Set up the kernel context flags here and now because they use
 143         * default values for all receive side memories.  User contexts will
 144         * be handled as they are created.
 145         */
 146        rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
 147                HFI1_CAP_KGET(NODROP_RHQ_FULL) |
 148                HFI1_CAP_KGET(NODROP_EGR_FULL) |
 149                HFI1_CAP_KGET(DMA_RTAIL);
 150
 151        /* Control context must use DMA_RTAIL */
 152        if (rcd->ctxt == HFI1_CTRL_CTXT)
 153                rcd->flags |= HFI1_CAP_DMA_RTAIL;
 154        rcd->fast_handler = get_dma_rtail_setting(rcd) ?
 155                                handle_receive_interrupt_dma_rtail :
 156                                handle_receive_interrupt_nodma_rtail;
 157        rcd->slow_handler = handle_receive_interrupt;
 158
 159        hfi1_set_seq_cnt(rcd, 1);
 160
 161        rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
 162        if (!rcd->sc) {
 163                dd_dev_err(dd, "Kernel send context allocation failed\n");
 164                return -ENOMEM;
 165        }
 166        hfi1_init_ctxt(rcd->sc);
 167
 168        return 0;
 169}
 170
 171/*
 172 * Create the receive context array and one or more kernel contexts
 173 */
 174int hfi1_create_kctxts(struct hfi1_devdata *dd)
 175{
 176        u16 i;
 177        int ret;
 178
 179        dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
 180                               GFP_KERNEL, dd->node);
 181        if (!dd->rcd)
 182                return -ENOMEM;
 183
 184        for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
 185                ret = hfi1_create_kctxt(dd, dd->pport);
 186                if (ret)
 187                        goto bail;
 188        }
 189
 190        return 0;
 191bail:
 192        for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
 193                hfi1_free_ctxt(dd->rcd[i]);
 194
 195        /* All the contexts should be freed, free the array */
 196        kfree(dd->rcd);
 197        dd->rcd = NULL;
 198        return ret;
 199}
 200
 201/*
 202 * Helper routines for the receive context reference count (rcd and uctxt).
 203 */
 204static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
 205{
 206        kref_init(&rcd->kref);
 207}
 208
 209/**
 210 * hfi1_rcd_free - When reference is zero clean up.
 211 * @kref: pointer to an initialized rcd data structure
 212 *
 213 */
 214static void hfi1_rcd_free(struct kref *kref)
 215{
 216        unsigned long flags;
 217        struct hfi1_ctxtdata *rcd =
 218                container_of(kref, struct hfi1_ctxtdata, kref);
 219
 220        spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
 221        rcd->dd->rcd[rcd->ctxt] = NULL;
 222        spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
 223
 224        hfi1_free_ctxtdata(rcd->dd, rcd);
 225
 226        kfree(rcd);
 227}
 228
 229/**
 230 * hfi1_rcd_put - decrement reference for rcd
 231 * @rcd: pointer to an initialized rcd data structure
 232 *
 233 * Use this to put a reference after the init.
 234 */
 235int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
 236{
 237        if (rcd)
 238                return kref_put(&rcd->kref, hfi1_rcd_free);
 239
 240        return 0;
 241}
 242
 243/**
 244 * hfi1_rcd_get - increment reference for rcd
 245 * @rcd: pointer to an initialized rcd data structure
 246 *
 247 * Use this to get a reference after the init.
 248 *
 249 * Return : reflect kref_get_unless_zero(), which returns non-zero on
 250 * increment, otherwise 0.
 251 */
 252int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
 253{
 254        return kref_get_unless_zero(&rcd->kref);
 255}
 256
 257/**
 258 * allocate_rcd_index - allocate an rcd index from the rcd array
 259 * @dd: pointer to a valid devdata structure
 260 * @rcd: rcd data structure to assign
 261 * @index: pointer to index that is allocated
 262 *
 263 * Find an empty index in the rcd array, and assign the given rcd to it.
 264 * If the array is full, we are EBUSY.
 265 *
 266 */
 267static int allocate_rcd_index(struct hfi1_devdata *dd,
 268                              struct hfi1_ctxtdata *rcd, u16 *index)
 269{
 270        unsigned long flags;
 271        u16 ctxt;
 272
 273        spin_lock_irqsave(&dd->uctxt_lock, flags);
 274        for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
 275                if (!dd->rcd[ctxt])
 276                        break;
 277
 278        if (ctxt < dd->num_rcv_contexts) {
 279                rcd->ctxt = ctxt;
 280                dd->rcd[ctxt] = rcd;
 281                hfi1_rcd_init(rcd);
 282        }
 283        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 284
 285        if (ctxt >= dd->num_rcv_contexts)
 286                return -EBUSY;
 287
 288        *index = ctxt;
 289
 290        return 0;
 291}
 292
 293/**
 294 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
 295 * array
 296 * @dd: pointer to a valid devdata structure
 297 * @ctxt: the index of an possilbe rcd
 298 *
 299 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
 300 * ctxt index is valid.
 301 *
 302 * The caller is responsible for making the _put().
 303 *
 304 */
 305struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
 306                                                 u16 ctxt)
 307{
 308        if (ctxt < dd->num_rcv_contexts)
 309                return hfi1_rcd_get_by_index(dd, ctxt);
 310
 311        return NULL;
 312}
 313
 314/**
 315 * hfi1_rcd_get_by_index
 316 * @dd: pointer to a valid devdata structure
 317 * @ctxt: the index of an possilbe rcd
 318 *
 319 * We need to protect access to the rcd array.  If access is needed to
 320 * one or more index, get the protecting spinlock and then increment the
 321 * kref.
 322 *
 323 * The caller is responsible for making the _put().
 324 *
 325 */
 326struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
 327{
 328        unsigned long flags;
 329        struct hfi1_ctxtdata *rcd = NULL;
 330
 331        spin_lock_irqsave(&dd->uctxt_lock, flags);
 332        if (dd->rcd[ctxt]) {
 333                rcd = dd->rcd[ctxt];
 334                if (!hfi1_rcd_get(rcd))
 335                        rcd = NULL;
 336        }
 337        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 338
 339        return rcd;
 340}
 341
 342/*
 343 * Common code for user and kernel context create and setup.
 344 * NOTE: the initial kref is done here (hf1_rcd_init()).
 345 */
 346int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
 347                         struct hfi1_ctxtdata **context)
 348{
 349        struct hfi1_devdata *dd = ppd->dd;
 350        struct hfi1_ctxtdata *rcd;
 351        unsigned kctxt_ngroups = 0;
 352        u32 base;
 353
 354        if (dd->rcv_entries.nctxt_extra >
 355            dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
 356                kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
 357                         (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
 358        rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
 359        if (rcd) {
 360                u32 rcvtids, max_entries;
 361                u16 ctxt;
 362                int ret;
 363
 364                ret = allocate_rcd_index(dd, rcd, &ctxt);
 365                if (ret) {
 366                        *context = NULL;
 367                        kfree(rcd);
 368                        return ret;
 369                }
 370
 371                INIT_LIST_HEAD(&rcd->qp_wait_list);
 372                hfi1_exp_tid_group_init(rcd);
 373                rcd->ppd = ppd;
 374                rcd->dd = dd;
 375                rcd->numa_id = numa;
 376                rcd->rcv_array_groups = dd->rcv_entries.ngroups;
 377                rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
 378                rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
 379
 380                mutex_init(&rcd->exp_mutex);
 381                spin_lock_init(&rcd->exp_lock);
 382                INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
 383                INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
 384
 385                hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
 386
 387                /*
 388                 * Calculate the context's RcvArray entry starting point.
 389                 * We do this here because we have to take into account all
 390                 * the RcvArray entries that previous context would have
 391                 * taken and we have to account for any extra groups assigned
 392                 * to the static (kernel) or dynamic (vnic/user) contexts.
 393                 */
 394                if (ctxt < dd->first_dyn_alloc_ctxt) {
 395                        if (ctxt < kctxt_ngroups) {
 396                                base = ctxt * (dd->rcv_entries.ngroups + 1);
 397                                rcd->rcv_array_groups++;
 398                        } else {
 399                                base = kctxt_ngroups +
 400                                        (ctxt * dd->rcv_entries.ngroups);
 401                        }
 402                } else {
 403                        u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
 404
 405                        base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
 406                                kctxt_ngroups);
 407                        if (ct < dd->rcv_entries.nctxt_extra) {
 408                                base += ct * (dd->rcv_entries.ngroups + 1);
 409                                rcd->rcv_array_groups++;
 410                        } else {
 411                                base += dd->rcv_entries.nctxt_extra +
 412                                        (ct * dd->rcv_entries.ngroups);
 413                        }
 414                }
 415                rcd->eager_base = base * dd->rcv_entries.group_size;
 416
 417                rcd->rcvhdrq_cnt = rcvhdrcnt;
 418                rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
 419                rcd->rhf_offset =
 420                        rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
 421                /*
 422                 * Simple Eager buffer allocation: we have already pre-allocated
 423                 * the number of RcvArray entry groups. Each ctxtdata structure
 424                 * holds the number of groups for that context.
 425                 *
 426                 * To follow CSR requirements and maintain cacheline alignment,
 427                 * make sure all sizes and bases are multiples of group_size.
 428                 *
 429                 * The expected entry count is what is left after assigning
 430                 * eager.
 431                 */
 432                max_entries = rcd->rcv_array_groups *
 433                        dd->rcv_entries.group_size;
 434                rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
 435                rcd->egrbufs.count = round_down(rcvtids,
 436                                                dd->rcv_entries.group_size);
 437                if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
 438                        dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
 439                                   rcd->ctxt);
 440                        rcd->egrbufs.count = MAX_EAGER_ENTRIES;
 441                }
 442                hfi1_cdbg(PROC,
 443                          "ctxt%u: max Eager buffer RcvArray entries: %u\n",
 444                          rcd->ctxt, rcd->egrbufs.count);
 445
 446                /*
 447                 * Allocate array that will hold the eager buffer accounting
 448                 * data.
 449                 * This will allocate the maximum possible buffer count based
 450                 * on the value of the RcvArray split parameter.
 451                 * The resulting value will be rounded down to the closest
 452                 * multiple of dd->rcv_entries.group_size.
 453                 */
 454                rcd->egrbufs.buffers =
 455                        kcalloc_node(rcd->egrbufs.count,
 456                                     sizeof(*rcd->egrbufs.buffers),
 457                                     GFP_KERNEL, numa);
 458                if (!rcd->egrbufs.buffers)
 459                        goto bail;
 460                rcd->egrbufs.rcvtids =
 461                        kcalloc_node(rcd->egrbufs.count,
 462                                     sizeof(*rcd->egrbufs.rcvtids),
 463                                     GFP_KERNEL, numa);
 464                if (!rcd->egrbufs.rcvtids)
 465                        goto bail;
 466                rcd->egrbufs.size = eager_buffer_size;
 467                /*
 468                 * The size of the buffers programmed into the RcvArray
 469                 * entries needs to be big enough to handle the highest
 470                 * MTU supported.
 471                 */
 472                if (rcd->egrbufs.size < hfi1_max_mtu) {
 473                        rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
 474                        hfi1_cdbg(PROC,
 475                                  "ctxt%u: eager bufs size too small. Adjusting to %u\n",
 476                                    rcd->ctxt, rcd->egrbufs.size);
 477                }
 478                rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
 479
 480                /* Applicable only for statically created kernel contexts */
 481                if (ctxt < dd->first_dyn_alloc_ctxt) {
 482                        rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
 483                                                    GFP_KERNEL, numa);
 484                        if (!rcd->opstats)
 485                                goto bail;
 486
 487                        /* Initialize TID flow generations for the context */
 488                        hfi1_kern_init_ctxt_generations(rcd);
 489                }
 490
 491                *context = rcd;
 492                return 0;
 493        }
 494
 495bail:
 496        *context = NULL;
 497        hfi1_free_ctxt(rcd);
 498        return -ENOMEM;
 499}
 500
 501/**
 502 * hfi1_free_ctxt
 503 * @rcd: pointer to an initialized rcd data structure
 504 *
 505 * This wrapper is the free function that matches hfi1_create_ctxtdata().
 506 * When a context is done being used (kernel or user), this function is called
 507 * for the "final" put to match the kref init from hf1i_create_ctxtdata().
 508 * Other users of the context do a get/put sequence to make sure that the
 509 * structure isn't removed while in use.
 510 */
 511void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
 512{
 513        hfi1_rcd_put(rcd);
 514}
 515
 516/*
 517 * Select the largest ccti value over all SLs to determine the intra-
 518 * packet gap for the link.
 519 *
 520 * called with cca_timer_lock held (to protect access to cca_timer
 521 * array), and rcu_read_lock() (to protect access to cc_state).
 522 */
 523void set_link_ipg(struct hfi1_pportdata *ppd)
 524{
 525        struct hfi1_devdata *dd = ppd->dd;
 526        struct cc_state *cc_state;
 527        int i;
 528        u16 cce, ccti_limit, max_ccti = 0;
 529        u16 shift, mult;
 530        u64 src;
 531        u32 current_egress_rate; /* Mbits /sec */
 532        u32 max_pkt_time;
 533        /*
 534         * max_pkt_time is the maximum packet egress time in units
 535         * of the fabric clock period 1/(805 MHz).
 536         */
 537
 538        cc_state = get_cc_state(ppd);
 539
 540        if (!cc_state)
 541                /*
 542                 * This should _never_ happen - rcu_read_lock() is held,
 543                 * and set_link_ipg() should not be called if cc_state
 544                 * is NULL.
 545                 */
 546                return;
 547
 548        for (i = 0; i < OPA_MAX_SLS; i++) {
 549                u16 ccti = ppd->cca_timer[i].ccti;
 550
 551                if (ccti > max_ccti)
 552                        max_ccti = ccti;
 553        }
 554
 555        ccti_limit = cc_state->cct.ccti_limit;
 556        if (max_ccti > ccti_limit)
 557                max_ccti = ccti_limit;
 558
 559        cce = cc_state->cct.entries[max_ccti].entry;
 560        shift = (cce & 0xc000) >> 14;
 561        mult = (cce & 0x3fff);
 562
 563        current_egress_rate = active_egress_rate(ppd);
 564
 565        max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
 566
 567        src = (max_pkt_time >> shift) * mult;
 568
 569        src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
 570        src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
 571
 572        write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
 573}
 574
 575static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
 576{
 577        struct cca_timer *cca_timer;
 578        struct hfi1_pportdata *ppd;
 579        int sl;
 580        u16 ccti_timer, ccti_min;
 581        struct cc_state *cc_state;
 582        unsigned long flags;
 583        enum hrtimer_restart ret = HRTIMER_NORESTART;
 584
 585        cca_timer = container_of(t, struct cca_timer, hrtimer);
 586        ppd = cca_timer->ppd;
 587        sl = cca_timer->sl;
 588
 589        rcu_read_lock();
 590
 591        cc_state = get_cc_state(ppd);
 592
 593        if (!cc_state) {
 594                rcu_read_unlock();
 595                return HRTIMER_NORESTART;
 596        }
 597
 598        /*
 599         * 1) decrement ccti for SL
 600         * 2) calculate IPG for link (set_link_ipg())
 601         * 3) restart timer, unless ccti is at min value
 602         */
 603
 604        ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
 605        ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
 606
 607        spin_lock_irqsave(&ppd->cca_timer_lock, flags);
 608
 609        if (cca_timer->ccti > ccti_min) {
 610                cca_timer->ccti--;
 611                set_link_ipg(ppd);
 612        }
 613
 614        if (cca_timer->ccti > ccti_min) {
 615                unsigned long nsec = 1024 * ccti_timer;
 616                /* ccti_timer is in units of 1.024 usec */
 617                hrtimer_forward_now(t, ns_to_ktime(nsec));
 618                ret = HRTIMER_RESTART;
 619        }
 620
 621        spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
 622        rcu_read_unlock();
 623        return ret;
 624}
 625
 626/*
 627 * Common code for initializing the physical port structure.
 628 */
 629void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
 630                         struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
 631{
 632        int i;
 633        uint default_pkey_idx;
 634        struct cc_state *cc_state;
 635
 636        ppd->dd = dd;
 637        ppd->hw_pidx = hw_pidx;
 638        ppd->port = port; /* IB port number, not index */
 639        ppd->prev_link_width = LINK_WIDTH_DEFAULT;
 640        /*
 641         * There are C_VL_COUNT number of PortVLXmitWait counters.
 642         * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
 643         */
 644        for (i = 0; i < C_VL_COUNT + 1; i++) {
 645                ppd->port_vl_xmit_wait_last[i] = 0;
 646                ppd->vl_xmit_flit_cnt[i] = 0;
 647        }
 648
 649        default_pkey_idx = 1;
 650
 651        ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
 652        ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
 653
 654        if (loopback) {
 655                dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
 656                           !default_pkey_idx);
 657                ppd->pkeys[!default_pkey_idx] = 0x8001;
 658        }
 659
 660        INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
 661        INIT_WORK(&ppd->link_up_work, handle_link_up);
 662        INIT_WORK(&ppd->link_down_work, handle_link_down);
 663        INIT_WORK(&ppd->freeze_work, handle_freeze);
 664        INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
 665        INIT_WORK(&ppd->sma_message_work, handle_sma_message);
 666        INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
 667        INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
 668        INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
 669        INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
 670
 671        mutex_init(&ppd->hls_lock);
 672        spin_lock_init(&ppd->qsfp_info.qsfp_lock);
 673
 674        ppd->qsfp_info.ppd = ppd;
 675        ppd->sm_trap_qp = 0x0;
 676        ppd->sa_qp = 0x1;
 677
 678        ppd->hfi1_wq = NULL;
 679
 680        spin_lock_init(&ppd->cca_timer_lock);
 681
 682        for (i = 0; i < OPA_MAX_SLS; i++) {
 683                hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
 684                             HRTIMER_MODE_REL);
 685                ppd->cca_timer[i].ppd = ppd;
 686                ppd->cca_timer[i].sl = i;
 687                ppd->cca_timer[i].ccti = 0;
 688                ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
 689        }
 690
 691        ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
 692
 693        spin_lock_init(&ppd->cc_state_lock);
 694        spin_lock_init(&ppd->cc_log_lock);
 695        cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
 696        RCU_INIT_POINTER(ppd->cc_state, cc_state);
 697        if (!cc_state)
 698                goto bail;
 699        return;
 700
 701bail:
 702        dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
 703}
 704
 705/*
 706 * Do initialization for device that is only needed on
 707 * first detect, not on resets.
 708 */
 709static int loadtime_init(struct hfi1_devdata *dd)
 710{
 711        return 0;
 712}
 713
 714/**
 715 * init_after_reset - re-initialize after a reset
 716 * @dd: the hfi1_ib device
 717 *
 718 * sanity check at least some of the values after reset, and
 719 * ensure no receive or transmit (explicitly, in case reset
 720 * failed
 721 */
 722static int init_after_reset(struct hfi1_devdata *dd)
 723{
 724        int i;
 725        struct hfi1_ctxtdata *rcd;
 726        /*
 727         * Ensure chip does no sends or receives, tail updates, or
 728         * pioavail updates while we re-initialize.  This is mostly
 729         * for the driver data structures, not chip registers.
 730         */
 731        for (i = 0; i < dd->num_rcv_contexts; i++) {
 732                rcd = hfi1_rcd_get_by_index(dd, i);
 733                hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
 734                             HFI1_RCVCTRL_INTRAVAIL_DIS |
 735                             HFI1_RCVCTRL_TAILUPD_DIS, rcd);
 736                hfi1_rcd_put(rcd);
 737        }
 738        pio_send_control(dd, PSC_GLOBAL_DISABLE);
 739        for (i = 0; i < dd->num_send_contexts; i++)
 740                sc_disable(dd->send_contexts[i].sc);
 741
 742        return 0;
 743}
 744
 745static void enable_chip(struct hfi1_devdata *dd)
 746{
 747        struct hfi1_ctxtdata *rcd;
 748        u32 rcvmask;
 749        u16 i;
 750
 751        /* enable PIO send */
 752        pio_send_control(dd, PSC_GLOBAL_ENABLE);
 753
 754        /*
 755         * Enable kernel ctxts' receive and receive interrupt.
 756         * Other ctxts done as user opens and initializes them.
 757         */
 758        for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
 759                rcd = hfi1_rcd_get_by_index(dd, i);
 760                if (!rcd)
 761                        continue;
 762                rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
 763                rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
 764                        HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
 765                if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
 766                        rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
 767                if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
 768                        rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
 769                if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
 770                        rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
 771                if (HFI1_CAP_IS_KSET(TID_RDMA))
 772                        rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
 773                hfi1_rcvctrl(dd, rcvmask, rcd);
 774                sc_enable(rcd->sc);
 775                hfi1_rcd_put(rcd);
 776        }
 777}
 778
 779/**
 780 * create_workqueues - create per port workqueues
 781 * @dd: the hfi1_ib device
 782 */
 783static int create_workqueues(struct hfi1_devdata *dd)
 784{
 785        int pidx;
 786        struct hfi1_pportdata *ppd;
 787
 788        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 789                ppd = dd->pport + pidx;
 790                if (!ppd->hfi1_wq) {
 791                        ppd->hfi1_wq =
 792                                alloc_workqueue(
 793                                    "hfi%d_%d",
 794                                    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
 795                                    WQ_MEM_RECLAIM,
 796                                    HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
 797                                    dd->unit, pidx);
 798                        if (!ppd->hfi1_wq)
 799                                goto wq_error;
 800                }
 801                if (!ppd->link_wq) {
 802                        /*
 803                         * Make the link workqueue single-threaded to enforce
 804                         * serialization.
 805                         */
 806                        ppd->link_wq =
 807                                alloc_workqueue(
 808                                    "hfi_link_%d_%d",
 809                                    WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
 810                                    1, /* max_active */
 811                                    dd->unit, pidx);
 812                        if (!ppd->link_wq)
 813                                goto wq_error;
 814                }
 815        }
 816        return 0;
 817wq_error:
 818        pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
 819        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 820                ppd = dd->pport + pidx;
 821                if (ppd->hfi1_wq) {
 822                        destroy_workqueue(ppd->hfi1_wq);
 823                        ppd->hfi1_wq = NULL;
 824                }
 825                if (ppd->link_wq) {
 826                        destroy_workqueue(ppd->link_wq);
 827                        ppd->link_wq = NULL;
 828                }
 829        }
 830        return -ENOMEM;
 831}
 832
 833/**
 834 * destroy_workqueues - destroy per port workqueues
 835 * @dd: the hfi1_ib device
 836 */
 837static void destroy_workqueues(struct hfi1_devdata *dd)
 838{
 839        int pidx;
 840        struct hfi1_pportdata *ppd;
 841
 842        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 843                ppd = dd->pport + pidx;
 844
 845                if (ppd->hfi1_wq) {
 846                        destroy_workqueue(ppd->hfi1_wq);
 847                        ppd->hfi1_wq = NULL;
 848                }
 849                if (ppd->link_wq) {
 850                        destroy_workqueue(ppd->link_wq);
 851                        ppd->link_wq = NULL;
 852                }
 853        }
 854}
 855
 856/**
 857 * enable_general_intr() - Enable the IRQs that will be handled by the
 858 * general interrupt handler.
 859 * @dd: valid devdata
 860 *
 861 */
 862static void enable_general_intr(struct hfi1_devdata *dd)
 863{
 864        set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
 865        set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
 866        set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
 867        set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
 868        set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
 869        set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
 870        set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
 871}
 872
 873/**
 874 * hfi1_init - do the actual initialization sequence on the chip
 875 * @dd: the hfi1_ib device
 876 * @reinit: re-initializing, so don't allocate new memory
 877 *
 878 * Do the actual initialization sequence on the chip.  This is done
 879 * both from the init routine called from the PCI infrastructure, and
 880 * when we reset the chip, or detect that it was reset internally,
 881 * or it's administratively re-enabled.
 882 *
 883 * Memory allocation here and in called routines is only done in
 884 * the first case (reinit == 0).  We have to be careful, because even
 885 * without memory allocation, we need to re-write all the chip registers
 886 * TIDs, etc. after the reset or enable has completed.
 887 */
 888int hfi1_init(struct hfi1_devdata *dd, int reinit)
 889{
 890        int ret = 0, pidx, lastfail = 0;
 891        unsigned long len;
 892        u16 i;
 893        struct hfi1_ctxtdata *rcd;
 894        struct hfi1_pportdata *ppd;
 895
 896        /* Set up send low level handlers */
 897        dd->process_pio_send = hfi1_verbs_send_pio;
 898        dd->process_dma_send = hfi1_verbs_send_dma;
 899        dd->pio_inline_send = pio_copy;
 900        dd->process_vnic_dma_send = hfi1_vnic_send_dma;
 901
 902        if (is_ax(dd)) {
 903                atomic_set(&dd->drop_packet, DROP_PACKET_ON);
 904                dd->do_drop = true;
 905        } else {
 906                atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
 907                dd->do_drop = false;
 908        }
 909
 910        /* make sure the link is not "up" */
 911        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 912                ppd = dd->pport + pidx;
 913                ppd->linkup = 0;
 914        }
 915
 916        if (reinit)
 917                ret = init_after_reset(dd);
 918        else
 919                ret = loadtime_init(dd);
 920        if (ret)
 921                goto done;
 922
 923        /* allocate dummy tail memory for all receive contexts */
 924        dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
 925                                                         sizeof(u64),
 926                                                         &dd->rcvhdrtail_dummy_dma,
 927                                                         GFP_KERNEL);
 928
 929        if (!dd->rcvhdrtail_dummy_kvaddr) {
 930                dd_dev_err(dd, "cannot allocate dummy tail memory\n");
 931                ret = -ENOMEM;
 932                goto done;
 933        }
 934
 935        /* dd->rcd can be NULL if early initialization failed */
 936        for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
 937                /*
 938                 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
 939                 * re-init, the simplest way to handle this is to free
 940                 * existing, and re-allocate.
 941                 * Need to re-create rest of ctxt 0 ctxtdata as well.
 942                 */
 943                rcd = hfi1_rcd_get_by_index(dd, i);
 944                if (!rcd)
 945                        continue;
 946
 947                rcd->do_interrupt = &handle_receive_interrupt;
 948
 949                lastfail = hfi1_create_rcvhdrq(dd, rcd);
 950                if (!lastfail)
 951                        lastfail = hfi1_setup_eagerbufs(rcd);
 952                if (!lastfail)
 953                        lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
 954                if (lastfail) {
 955                        dd_dev_err(dd,
 956                                   "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
 957                        ret = lastfail;
 958                }
 959                /* enable IRQ */
 960                hfi1_rcd_put(rcd);
 961        }
 962
 963        /* Allocate enough memory for user event notification. */
 964        len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
 965                         sizeof(*dd->events));
 966        dd->events = vmalloc_user(len);
 967        if (!dd->events)
 968                dd_dev_err(dd, "Failed to allocate user events page\n");
 969        /*
 970         * Allocate a page for device and port status.
 971         * Page will be shared amongst all user processes.
 972         */
 973        dd->status = vmalloc_user(PAGE_SIZE);
 974        if (!dd->status)
 975                dd_dev_err(dd, "Failed to allocate dev status page\n");
 976        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 977                ppd = dd->pport + pidx;
 978                if (dd->status)
 979                        /* Currently, we only have one port */
 980                        ppd->statusp = &dd->status->port;
 981
 982                set_mtu(ppd);
 983        }
 984
 985        /* enable chip even if we have an error, so we can debug cause */
 986        enable_chip(dd);
 987
 988done:
 989        /*
 990         * Set status even if port serdes is not initialized
 991         * so that diags will work.
 992         */
 993        if (dd->status)
 994                dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
 995                        HFI1_STATUS_INITTED;
 996        if (!ret) {
 997                /* enable all interrupts from the chip */
 998                enable_general_intr(dd);
 999                init_qsfp_int(dd);
1000
1001                /* chip is OK for user apps; mark it as initialized */
1002                for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1003                        ppd = dd->pport + pidx;
1004
1005                        /*
1006                         * start the serdes - must be after interrupts are
1007                         * enabled so we are notified when the link goes up
1008                         */
1009                        lastfail = bringup_serdes(ppd);
1010                        if (lastfail)
1011                                dd_dev_info(dd,
1012                                            "Failed to bring up port %u\n",
1013                                            ppd->port);
1014
1015                        /*
1016                         * Set status even if port serdes is not initialized
1017                         * so that diags will work.
1018                         */
1019                        if (ppd->statusp)
1020                                *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
1021                                                        HFI1_STATUS_INITTED;
1022                        if (!ppd->link_speed_enabled)
1023                                continue;
1024                }
1025        }
1026
1027        /* if ret is non-zero, we probably should do some cleanup here... */
1028        return ret;
1029}
1030
1031struct hfi1_devdata *hfi1_lookup(int unit)
1032{
1033        return xa_load(&hfi1_dev_table, unit);
1034}
1035
1036/*
1037 * Stop the timers during unit shutdown, or after an error late
1038 * in initialization.
1039 */
1040static void stop_timers(struct hfi1_devdata *dd)
1041{
1042        struct hfi1_pportdata *ppd;
1043        int pidx;
1044
1045        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1046                ppd = dd->pport + pidx;
1047                if (ppd->led_override_timer.function) {
1048                        del_timer_sync(&ppd->led_override_timer);
1049                        atomic_set(&ppd->led_override_timer_active, 0);
1050                }
1051        }
1052}
1053
1054/**
1055 * shutdown_device - shut down a device
1056 * @dd: the hfi1_ib device
1057 *
1058 * This is called to make the device quiet when we are about to
1059 * unload the driver, and also when the device is administratively
1060 * disabled.   It does not free any data structures.
1061 * Everything it does has to be setup again by hfi1_init(dd, 1)
1062 */
1063static void shutdown_device(struct hfi1_devdata *dd)
1064{
1065        struct hfi1_pportdata *ppd;
1066        struct hfi1_ctxtdata *rcd;
1067        unsigned pidx;
1068        int i;
1069
1070        if (dd->flags & HFI1_SHUTDOWN)
1071                return;
1072        dd->flags |= HFI1_SHUTDOWN;
1073
1074        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1075                ppd = dd->pport + pidx;
1076
1077                ppd->linkup = 0;
1078                if (ppd->statusp)
1079                        *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1080                                           HFI1_STATUS_IB_READY);
1081        }
1082        dd->flags &= ~HFI1_INITTED;
1083
1084        /* mask and clean up interrupts */
1085        set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1086        msix_clean_up_interrupts(dd);
1087
1088        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1089                ppd = dd->pport + pidx;
1090                for (i = 0; i < dd->num_rcv_contexts; i++) {
1091                        rcd = hfi1_rcd_get_by_index(dd, i);
1092                        hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1093                                     HFI1_RCVCTRL_CTXT_DIS |
1094                                     HFI1_RCVCTRL_INTRAVAIL_DIS |
1095                                     HFI1_RCVCTRL_PKEY_DIS |
1096                                     HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1097                        hfi1_rcd_put(rcd);
1098                }
1099                /*
1100                 * Gracefully stop all sends allowing any in progress to
1101                 * trickle out first.
1102                 */
1103                for (i = 0; i < dd->num_send_contexts; i++)
1104                        sc_flush(dd->send_contexts[i].sc);
1105        }
1106
1107        /*
1108         * Enough for anything that's going to trickle out to have actually
1109         * done so.
1110         */
1111        udelay(20);
1112
1113        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1114                ppd = dd->pport + pidx;
1115
1116                /* disable all contexts */
1117                for (i = 0; i < dd->num_send_contexts; i++)
1118                        sc_disable(dd->send_contexts[i].sc);
1119                /* disable the send device */
1120                pio_send_control(dd, PSC_GLOBAL_DISABLE);
1121
1122                shutdown_led_override(ppd);
1123
1124                /*
1125                 * Clear SerdesEnable.
1126                 * We can't count on interrupts since we are stopping.
1127                 */
1128                hfi1_quiet_serdes(ppd);
1129                if (ppd->hfi1_wq)
1130                        flush_workqueue(ppd->hfi1_wq);
1131                if (ppd->link_wq)
1132                        flush_workqueue(ppd->link_wq);
1133        }
1134        sdma_exit(dd);
1135}
1136
1137/**
1138 * hfi1_free_ctxtdata - free a context's allocated data
1139 * @dd: the hfi1_ib device
1140 * @rcd: the ctxtdata structure
1141 *
1142 * free up any allocated data for a context
1143 * It should never change any chip state, or global driver state.
1144 */
1145void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1146{
1147        u32 e;
1148
1149        if (!rcd)
1150                return;
1151
1152        if (rcd->rcvhdrq) {
1153                dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1154                                  rcd->rcvhdrq, rcd->rcvhdrq_dma);
1155                rcd->rcvhdrq = NULL;
1156                if (hfi1_rcvhdrtail_kvaddr(rcd)) {
1157                        dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1158                                          (void *)hfi1_rcvhdrtail_kvaddr(rcd),
1159                                          rcd->rcvhdrqtailaddr_dma);
1160                        rcd->rcvhdrtail_kvaddr = NULL;
1161                }
1162        }
1163
1164        /* all the RcvArray entries should have been cleared by now */
1165        kfree(rcd->egrbufs.rcvtids);
1166        rcd->egrbufs.rcvtids = NULL;
1167
1168        for (e = 0; e < rcd->egrbufs.alloced; e++) {
1169                if (rcd->egrbufs.buffers[e].dma)
1170                        dma_free_coherent(&dd->pcidev->dev,
1171                                          rcd->egrbufs.buffers[e].len,
1172                                          rcd->egrbufs.buffers[e].addr,
1173                                          rcd->egrbufs.buffers[e].dma);
1174        }
1175        kfree(rcd->egrbufs.buffers);
1176        rcd->egrbufs.alloced = 0;
1177        rcd->egrbufs.buffers = NULL;
1178
1179        sc_free(rcd->sc);
1180        rcd->sc = NULL;
1181
1182        vfree(rcd->subctxt_uregbase);
1183        vfree(rcd->subctxt_rcvegrbuf);
1184        vfree(rcd->subctxt_rcvhdr_base);
1185        kfree(rcd->opstats);
1186
1187        rcd->subctxt_uregbase = NULL;
1188        rcd->subctxt_rcvegrbuf = NULL;
1189        rcd->subctxt_rcvhdr_base = NULL;
1190        rcd->opstats = NULL;
1191}
1192
1193/*
1194 * Release our hold on the shared asic data.  If we are the last one,
1195 * return the structure to be finalized outside the lock.  Must be
1196 * holding hfi1_dev_table lock.
1197 */
1198static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1199{
1200        struct hfi1_asic_data *ad;
1201        int other;
1202
1203        if (!dd->asic_data)
1204                return NULL;
1205        dd->asic_data->dds[dd->hfi1_id] = NULL;
1206        other = dd->hfi1_id ? 0 : 1;
1207        ad = dd->asic_data;
1208        dd->asic_data = NULL;
1209        /* return NULL if the other dd still has a link */
1210        return ad->dds[other] ? NULL : ad;
1211}
1212
1213static void finalize_asic_data(struct hfi1_devdata *dd,
1214                               struct hfi1_asic_data *ad)
1215{
1216        clean_up_i2c(dd, ad);
1217        kfree(ad);
1218}
1219
1220/**
1221 * hfi1_free_devdata - cleans up and frees per-unit data structure
1222 * @dd: pointer to a valid devdata structure
1223 *
1224 * It cleans up and frees all data structures set up by
1225 * by hfi1_alloc_devdata().
1226 */
1227void hfi1_free_devdata(struct hfi1_devdata *dd)
1228{
1229        struct hfi1_asic_data *ad;
1230        unsigned long flags;
1231
1232        xa_lock_irqsave(&hfi1_dev_table, flags);
1233        __xa_erase(&hfi1_dev_table, dd->unit);
1234        ad = release_asic_data(dd);
1235        xa_unlock_irqrestore(&hfi1_dev_table, flags);
1236
1237        finalize_asic_data(dd, ad);
1238        free_platform_config(dd);
1239        rcu_barrier(); /* wait for rcu callbacks to complete */
1240        free_percpu(dd->int_counter);
1241        free_percpu(dd->rcv_limit);
1242        free_percpu(dd->send_schedule);
1243        free_percpu(dd->tx_opstats);
1244        dd->int_counter   = NULL;
1245        dd->rcv_limit     = NULL;
1246        dd->send_schedule = NULL;
1247        dd->tx_opstats    = NULL;
1248        kfree(dd->comp_vect);
1249        dd->comp_vect = NULL;
1250        sdma_clean(dd, dd->num_sdma);
1251        rvt_dealloc_device(&dd->verbs_dev.rdi);
1252}
1253
1254/**
1255 * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1256 * @pdev: Valid PCI device
1257 * @extra: How many bytes to alloc past the default
1258 *
1259 * Must be done via verbs allocator, because the verbs cleanup process
1260 * both does cleanup and free of the data structure.
1261 * "extra" is for chip-specific data.
1262 */
1263static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1264                                               size_t extra)
1265{
1266        struct hfi1_devdata *dd;
1267        int ret, nports;
1268
1269        /* extra is * number of ports */
1270        nports = extra / sizeof(struct hfi1_pportdata);
1271
1272        dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1273                                                     nports);
1274        if (!dd)
1275                return ERR_PTR(-ENOMEM);
1276        dd->num_pports = nports;
1277        dd->pport = (struct hfi1_pportdata *)(dd + 1);
1278        dd->pcidev = pdev;
1279        pci_set_drvdata(pdev, dd);
1280        dd->node = NUMA_NO_NODE;
1281
1282        ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1283                        GFP_KERNEL);
1284        if (ret < 0) {
1285                dev_err(&pdev->dev,
1286                        "Could not allocate unit ID: error %d\n", -ret);
1287                goto bail;
1288        }
1289        rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1290
1291        /*
1292         * Initialize all locks for the device. This needs to be as early as
1293         * possible so locks are usable.
1294         */
1295        spin_lock_init(&dd->sc_lock);
1296        spin_lock_init(&dd->sendctrl_lock);
1297        spin_lock_init(&dd->rcvctrl_lock);
1298        spin_lock_init(&dd->uctxt_lock);
1299        spin_lock_init(&dd->hfi1_diag_trans_lock);
1300        spin_lock_init(&dd->sc_init_lock);
1301        spin_lock_init(&dd->dc8051_memlock);
1302        seqlock_init(&dd->sc2vl_lock);
1303        spin_lock_init(&dd->sde_map_lock);
1304        spin_lock_init(&dd->pio_map_lock);
1305        mutex_init(&dd->dc8051_lock);
1306        init_waitqueue_head(&dd->event_queue);
1307        spin_lock_init(&dd->irq_src_lock);
1308
1309        dd->int_counter = alloc_percpu(u64);
1310        if (!dd->int_counter) {
1311                ret = -ENOMEM;
1312                goto bail;
1313        }
1314
1315        dd->rcv_limit = alloc_percpu(u64);
1316        if (!dd->rcv_limit) {
1317                ret = -ENOMEM;
1318                goto bail;
1319        }
1320
1321        dd->send_schedule = alloc_percpu(u64);
1322        if (!dd->send_schedule) {
1323                ret = -ENOMEM;
1324                goto bail;
1325        }
1326
1327        dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1328        if (!dd->tx_opstats) {
1329                ret = -ENOMEM;
1330                goto bail;
1331        }
1332
1333        dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1334        if (!dd->comp_vect) {
1335                ret = -ENOMEM;
1336                goto bail;
1337        }
1338
1339        atomic_set(&dd->ipoib_rsm_usr_num, 0);
1340        return dd;
1341
1342bail:
1343        hfi1_free_devdata(dd);
1344        return ERR_PTR(ret);
1345}
1346
1347/*
1348 * Called from freeze mode handlers, and from PCI error
1349 * reporting code.  Should be paranoid about state of
1350 * system and data structures.
1351 */
1352void hfi1_disable_after_error(struct hfi1_devdata *dd)
1353{
1354        if (dd->flags & HFI1_INITTED) {
1355                u32 pidx;
1356
1357                dd->flags &= ~HFI1_INITTED;
1358                if (dd->pport)
1359                        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1360                                struct hfi1_pportdata *ppd;
1361
1362                                ppd = dd->pport + pidx;
1363                                if (dd->flags & HFI1_PRESENT)
1364                                        set_link_state(ppd, HLS_DN_DISABLE);
1365
1366                                if (ppd->statusp)
1367                                        *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1368                        }
1369        }
1370
1371        /*
1372         * Mark as having had an error for driver, and also
1373         * for /sys and status word mapped to user programs.
1374         * This marks unit as not usable, until reset.
1375         */
1376        if (dd->status)
1377                dd->status->dev |= HFI1_STATUS_HWERROR;
1378}
1379
1380static void remove_one(struct pci_dev *);
1381static int init_one(struct pci_dev *, const struct pci_device_id *);
1382static void shutdown_one(struct pci_dev *);
1383
1384#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1385#define PFX DRIVER_NAME ": "
1386
1387const struct pci_device_id hfi1_pci_tbl[] = {
1388        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1389        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1390        { 0, }
1391};
1392
1393MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1394
1395static struct pci_driver hfi1_pci_driver = {
1396        .name = DRIVER_NAME,
1397        .probe = init_one,
1398        .remove = remove_one,
1399        .shutdown = shutdown_one,
1400        .id_table = hfi1_pci_tbl,
1401        .err_handler = &hfi1_pci_err_handler,
1402};
1403
1404static void __init compute_krcvqs(void)
1405{
1406        int i;
1407
1408        for (i = 0; i < krcvqsset; i++)
1409                n_krcvqs += krcvqs[i];
1410}
1411
1412/*
1413 * Do all the generic driver unit- and chip-independent memory
1414 * allocation and initialization.
1415 */
1416static int __init hfi1_mod_init(void)
1417{
1418        int ret;
1419
1420        ret = dev_init();
1421        if (ret)
1422                goto bail;
1423
1424        ret = node_affinity_init();
1425        if (ret)
1426                goto bail;
1427
1428        /* validate max MTU before any devices start */
1429        if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1430                pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1431                       hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1432                hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1433        }
1434        /* valid CUs run from 1-128 in powers of 2 */
1435        if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1436                hfi1_cu = 1;
1437        /* valid credit return threshold is 0-100, variable is unsigned */
1438        if (user_credit_return_threshold > 100)
1439                user_credit_return_threshold = 100;
1440
1441        compute_krcvqs();
1442        /*
1443         * sanitize receive interrupt count, time must wait until after
1444         * the hardware type is known
1445         */
1446        if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1447                rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1448        /* reject invalid combinations */
1449        if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1450                pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1451                rcv_intr_count = 1;
1452        }
1453        if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1454                /*
1455                 * Avoid indefinite packet delivery by requiring a timeout
1456                 * if count is > 1.
1457                 */
1458                pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1459                rcv_intr_timeout = 1;
1460        }
1461        if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1462                /*
1463                 * The dynamic algorithm expects a non-zero timeout
1464                 * and a count > 1.
1465                 */
1466                pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1467                rcv_intr_dynamic = 0;
1468        }
1469
1470        /* sanitize link CRC options */
1471        link_crc_mask &= SUPPORTED_CRCS;
1472
1473        ret = opfn_init();
1474        if (ret < 0) {
1475                pr_err("Failed to allocate opfn_wq");
1476                goto bail_dev;
1477        }
1478
1479        /*
1480         * These must be called before the driver is registered with
1481         * the PCI subsystem.
1482         */
1483        hfi1_dbg_init();
1484        ret = pci_register_driver(&hfi1_pci_driver);
1485        if (ret < 0) {
1486                pr_err("Unable to register driver: error %d\n", -ret);
1487                goto bail_dev;
1488        }
1489        goto bail; /* all OK */
1490
1491bail_dev:
1492        hfi1_dbg_exit();
1493        dev_cleanup();
1494bail:
1495        return ret;
1496}
1497
1498module_init(hfi1_mod_init);
1499
1500/*
1501 * Do the non-unit driver cleanup, memory free, etc. at unload.
1502 */
1503static void __exit hfi1_mod_cleanup(void)
1504{
1505        pci_unregister_driver(&hfi1_pci_driver);
1506        opfn_exit();
1507        node_affinity_destroy_all();
1508        hfi1_dbg_exit();
1509
1510        WARN_ON(!xa_empty(&hfi1_dev_table));
1511        dispose_firmware();     /* asymmetric with obtain_firmware() */
1512        dev_cleanup();
1513}
1514
1515module_exit(hfi1_mod_cleanup);
1516
1517/* this can only be called after a successful initialization */
1518static void cleanup_device_data(struct hfi1_devdata *dd)
1519{
1520        int ctxt;
1521        int pidx;
1522
1523        /* users can't do anything more with chip */
1524        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1525                struct hfi1_pportdata *ppd = &dd->pport[pidx];
1526                struct cc_state *cc_state;
1527                int i;
1528
1529                if (ppd->statusp)
1530                        *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1531
1532                for (i = 0; i < OPA_MAX_SLS; i++)
1533                        hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1534
1535                spin_lock(&ppd->cc_state_lock);
1536                cc_state = get_cc_state_protected(ppd);
1537                RCU_INIT_POINTER(ppd->cc_state, NULL);
1538                spin_unlock(&ppd->cc_state_lock);
1539
1540                if (cc_state)
1541                        kfree_rcu(cc_state, rcu);
1542        }
1543
1544        free_credit_return(dd);
1545
1546        if (dd->rcvhdrtail_dummy_kvaddr) {
1547                dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1548                                  (void *)dd->rcvhdrtail_dummy_kvaddr,
1549                                  dd->rcvhdrtail_dummy_dma);
1550                dd->rcvhdrtail_dummy_kvaddr = NULL;
1551        }
1552
1553        /*
1554         * Free any resources still in use (usually just kernel contexts)
1555         * at unload; we do for ctxtcnt, because that's what we allocate.
1556         */
1557        for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1558                struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1559
1560                if (rcd) {
1561                        hfi1_free_ctxt_rcv_groups(rcd);
1562                        hfi1_free_ctxt(rcd);
1563                }
1564        }
1565
1566        kfree(dd->rcd);
1567        dd->rcd = NULL;
1568
1569        free_pio_map(dd);
1570        /* must follow rcv context free - need to remove rcv's hooks */
1571        for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1572                sc_free(dd->send_contexts[ctxt].sc);
1573        dd->num_send_contexts = 0;
1574        kfree(dd->send_contexts);
1575        dd->send_contexts = NULL;
1576        kfree(dd->hw_to_sw);
1577        dd->hw_to_sw = NULL;
1578        kfree(dd->boardname);
1579        vfree(dd->events);
1580        vfree(dd->status);
1581}
1582
1583/*
1584 * Clean up on unit shutdown, or error during unit load after
1585 * successful initialization.
1586 */
1587static void postinit_cleanup(struct hfi1_devdata *dd)
1588{
1589        hfi1_start_cleanup(dd);
1590        hfi1_comp_vectors_clean_up(dd);
1591        hfi1_dev_affinity_clean_up(dd);
1592
1593        hfi1_pcie_ddcleanup(dd);
1594        hfi1_pcie_cleanup(dd->pcidev);
1595
1596        cleanup_device_data(dd);
1597
1598        hfi1_free_devdata(dd);
1599}
1600
1601static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1602{
1603        int ret = 0, j, pidx, initfail;
1604        struct hfi1_devdata *dd;
1605        struct hfi1_pportdata *ppd;
1606
1607        /* First, lock the non-writable module parameters */
1608        HFI1_CAP_LOCK();
1609
1610        /* Validate dev ids */
1611        if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1612              ent->device == PCI_DEVICE_ID_INTEL1)) {
1613                dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1614                        ent->device);
1615                ret = -ENODEV;
1616                goto bail;
1617        }
1618
1619        /* Allocate the dd so we can get to work */
1620        dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1621                                sizeof(struct hfi1_pportdata));
1622        if (IS_ERR(dd)) {
1623                ret = PTR_ERR(dd);
1624                goto bail;
1625        }
1626
1627        /* Validate some global module parameters */
1628        ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
1629        if (ret)
1630                goto bail;
1631
1632        /* use the encoding function as a sanitization check */
1633        if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1634                dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1635                           hfi1_hdrq_entsize);
1636                ret = -EINVAL;
1637                goto bail;
1638        }
1639
1640        /* The receive eager buffer size must be set before the receive
1641         * contexts are created.
1642         *
1643         * Set the eager buffer size.  Validate that it falls in a range
1644         * allowed by the hardware - all powers of 2 between the min and
1645         * max.  The maximum valid MTU is within the eager buffer range
1646         * so we do not need to cap the max_mtu by an eager buffer size
1647         * setting.
1648         */
1649        if (eager_buffer_size) {
1650                if (!is_power_of_2(eager_buffer_size))
1651                        eager_buffer_size =
1652                                roundup_pow_of_two(eager_buffer_size);
1653                eager_buffer_size =
1654                        clamp_val(eager_buffer_size,
1655                                  MIN_EAGER_BUFFER * 8,
1656                                  MAX_EAGER_BUFFER_TOTAL);
1657                dd_dev_info(dd, "Eager buffer size %u\n",
1658                            eager_buffer_size);
1659        } else {
1660                dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
1661                ret = -EINVAL;
1662                goto bail;
1663        }
1664
1665        /* restrict value of hfi1_rcvarr_split */
1666        hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1667
1668        ret = hfi1_pcie_init(dd);
1669        if (ret)
1670                goto bail;
1671
1672        /*
1673         * Do device-specific initialization, function table setup, dd
1674         * allocation, etc.
1675         */
1676        ret = hfi1_init_dd(dd);
1677        if (ret)
1678                goto clean_bail; /* error already printed */
1679
1680        ret = create_workqueues(dd);
1681        if (ret)
1682                goto clean_bail;
1683
1684        /* do the generic initialization */
1685        initfail = hfi1_init(dd, 0);
1686
1687        ret = hfi1_register_ib_device(dd);
1688
1689        /*
1690         * Now ready for use.  this should be cleared whenever we
1691         * detect a reset, or initiate one.  If earlier failure,
1692         * we still create devices, so diags, etc. can be used
1693         * to determine cause of problem.
1694         */
1695        if (!initfail && !ret) {
1696                dd->flags |= HFI1_INITTED;
1697                /* create debufs files after init and ib register */
1698                hfi1_dbg_ibdev_init(&dd->verbs_dev);
1699        }
1700
1701        j = hfi1_device_create(dd);
1702        if (j)
1703                dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1704
1705        if (initfail || ret) {
1706                msix_clean_up_interrupts(dd);
1707                stop_timers(dd);
1708                flush_workqueue(ib_wq);
1709                for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1710                        hfi1_quiet_serdes(dd->pport + pidx);
1711                        ppd = dd->pport + pidx;
1712                        if (ppd->hfi1_wq) {
1713                                destroy_workqueue(ppd->hfi1_wq);
1714                                ppd->hfi1_wq = NULL;
1715                        }
1716                        if (ppd->link_wq) {
1717                                destroy_workqueue(ppd->link_wq);
1718                                ppd->link_wq = NULL;
1719                        }
1720                }
1721                if (!j)
1722                        hfi1_device_remove(dd);
1723                if (!ret)
1724                        hfi1_unregister_ib_device(dd);
1725                postinit_cleanup(dd);
1726                if (initfail)
1727                        ret = initfail;
1728                goto bail;      /* everything already cleaned */
1729        }
1730
1731        sdma_start(dd);
1732
1733        return 0;
1734
1735clean_bail:
1736        hfi1_pcie_cleanup(pdev);
1737bail:
1738        return ret;
1739}
1740
1741static void wait_for_clients(struct hfi1_devdata *dd)
1742{
1743        /*
1744         * Remove the device init value and complete the device if there is
1745         * no clients or wait for active clients to finish.
1746         */
1747        if (atomic_dec_and_test(&dd->user_refcount))
1748                complete(&dd->user_comp);
1749
1750        wait_for_completion(&dd->user_comp);
1751}
1752
1753static void remove_one(struct pci_dev *pdev)
1754{
1755        struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1756
1757        /* close debugfs files before ib unregister */
1758        hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1759
1760        /* remove the /dev hfi1 interface */
1761        hfi1_device_remove(dd);
1762
1763        /* wait for existing user space clients to finish */
1764        wait_for_clients(dd);
1765
1766        /* unregister from IB core */
1767        hfi1_unregister_ib_device(dd);
1768
1769        /* free netdev data */
1770        hfi1_netdev_free(dd);
1771
1772        /*
1773         * Disable the IB link, disable interrupts on the device,
1774         * clear dma engines, etc.
1775         */
1776        shutdown_device(dd);
1777        destroy_workqueues(dd);
1778
1779        stop_timers(dd);
1780
1781        /* wait until all of our (qsfp) queue_work() calls complete */
1782        flush_workqueue(ib_wq);
1783
1784        postinit_cleanup(dd);
1785}
1786
1787static void shutdown_one(struct pci_dev *pdev)
1788{
1789        struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1790
1791        shutdown_device(dd);
1792}
1793
1794/**
1795 * hfi1_create_rcvhdrq - create a receive header queue
1796 * @dd: the hfi1_ib device
1797 * @rcd: the context data
1798 *
1799 * This must be contiguous memory (from an i/o perspective), and must be
1800 * DMA'able (which means for some systems, it will go through an IOMMU,
1801 * or be forced into a low address range).
1802 */
1803int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1804{
1805        unsigned amt;
1806
1807        if (!rcd->rcvhdrq) {
1808                gfp_t gfp_flags;
1809
1810                amt = rcvhdrq_size(rcd);
1811
1812                if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1813                        gfp_flags = GFP_KERNEL;
1814                else
1815                        gfp_flags = GFP_USER;
1816                rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1817                                                  &rcd->rcvhdrq_dma,
1818                                                  gfp_flags | __GFP_COMP);
1819
1820                if (!rcd->rcvhdrq) {
1821                        dd_dev_err(dd,
1822                                   "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1823                                   amt, rcd->ctxt);
1824                        goto bail;
1825                }
1826
1827                if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1828                    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1829                        rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1830                                                                    PAGE_SIZE,
1831                                                                    &rcd->rcvhdrqtailaddr_dma,
1832                                                                    gfp_flags);
1833                        if (!rcd->rcvhdrtail_kvaddr)
1834                                goto bail_free;
1835                }
1836        }
1837
1838        set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
1839                      rcd->rcvhdrq_cnt);
1840
1841        return 0;
1842
1843bail_free:
1844        dd_dev_err(dd,
1845                   "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1846                   rcd->ctxt);
1847        dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1848                          rcd->rcvhdrq_dma);
1849        rcd->rcvhdrq = NULL;
1850bail:
1851        return -ENOMEM;
1852}
1853
1854/**
1855 * allocate eager buffers, both kernel and user contexts.
1856 * @rcd: the context we are setting up.
1857 *
1858 * Allocate the eager TID buffers and program them into hip.
1859 * They are no longer completely contiguous, we do multiple allocation
1860 * calls.  Otherwise we get the OOM code involved, by asking for too
1861 * much per call, with disastrous results on some kernels.
1862 */
1863int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1864{
1865        struct hfi1_devdata *dd = rcd->dd;
1866        u32 max_entries, egrtop, alloced_bytes = 0;
1867        gfp_t gfp_flags;
1868        u16 order, idx = 0;
1869        int ret = 0;
1870        u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1871
1872        /*
1873         * GFP_USER, but without GFP_FS, so buffer cache can be
1874         * coalesced (we hope); otherwise, even at order 4,
1875         * heavy filesystem activity makes these fail, and we can
1876         * use compound pages.
1877         */
1878        gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1879
1880        /*
1881         * The minimum size of the eager buffers is a groups of MTU-sized
1882         * buffers.
1883         * The global eager_buffer_size parameter is checked against the
1884         * theoretical lower limit of the value. Here, we check against the
1885         * MTU.
1886         */
1887        if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1888                rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1889        /*
1890         * If using one-pkt-per-egr-buffer, lower the eager buffer
1891         * size to the max MTU (page-aligned).
1892         */
1893        if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1894                rcd->egrbufs.rcvtid_size = round_mtu;
1895
1896        /*
1897         * Eager buffers sizes of 1MB or less require smaller TID sizes
1898         * to satisfy the "multiple of 8 RcvArray entries" requirement.
1899         */
1900        if (rcd->egrbufs.size <= (1 << 20))
1901                rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1902                        rounddown_pow_of_two(rcd->egrbufs.size / 8));
1903
1904        while (alloced_bytes < rcd->egrbufs.size &&
1905               rcd->egrbufs.alloced < rcd->egrbufs.count) {
1906                rcd->egrbufs.buffers[idx].addr =
1907                        dma_alloc_coherent(&dd->pcidev->dev,
1908                                           rcd->egrbufs.rcvtid_size,
1909                                           &rcd->egrbufs.buffers[idx].dma,
1910                                           gfp_flags);
1911                if (rcd->egrbufs.buffers[idx].addr) {
1912                        rcd->egrbufs.buffers[idx].len =
1913                                rcd->egrbufs.rcvtid_size;
1914                        rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1915                                rcd->egrbufs.buffers[idx].addr;
1916                        rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1917                                rcd->egrbufs.buffers[idx].dma;
1918                        rcd->egrbufs.alloced++;
1919                        alloced_bytes += rcd->egrbufs.rcvtid_size;
1920                        idx++;
1921                } else {
1922                        u32 new_size, i, j;
1923                        u64 offset = 0;
1924
1925                        /*
1926                         * Fail the eager buffer allocation if:
1927                         *   - we are already using the lowest acceptable size
1928                         *   - we are using one-pkt-per-egr-buffer (this implies
1929                         *     that we are accepting only one size)
1930                         */
1931                        if (rcd->egrbufs.rcvtid_size == round_mtu ||
1932                            !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1933                                dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1934                                           rcd->ctxt);
1935                                ret = -ENOMEM;
1936                                goto bail_rcvegrbuf_phys;
1937                        }
1938
1939                        new_size = rcd->egrbufs.rcvtid_size / 2;
1940
1941                        /*
1942                         * If the first attempt to allocate memory failed, don't
1943                         * fail everything but continue with the next lower
1944                         * size.
1945                         */
1946                        if (idx == 0) {
1947                                rcd->egrbufs.rcvtid_size = new_size;
1948                                continue;
1949                        }
1950
1951                        /*
1952                         * Re-partition already allocated buffers to a smaller
1953                         * size.
1954                         */
1955                        rcd->egrbufs.alloced = 0;
1956                        for (i = 0, j = 0, offset = 0; j < idx; i++) {
1957                                if (i >= rcd->egrbufs.count)
1958                                        break;
1959                                rcd->egrbufs.rcvtids[i].dma =
1960                                        rcd->egrbufs.buffers[j].dma + offset;
1961                                rcd->egrbufs.rcvtids[i].addr =
1962                                        rcd->egrbufs.buffers[j].addr + offset;
1963                                rcd->egrbufs.alloced++;
1964                                if ((rcd->egrbufs.buffers[j].dma + offset +
1965                                     new_size) ==
1966                                    (rcd->egrbufs.buffers[j].dma +
1967                                     rcd->egrbufs.buffers[j].len)) {
1968                                        j++;
1969                                        offset = 0;
1970                                } else {
1971                                        offset += new_size;
1972                                }
1973                        }
1974                        rcd->egrbufs.rcvtid_size = new_size;
1975                }
1976        }
1977        rcd->egrbufs.numbufs = idx;
1978        rcd->egrbufs.size = alloced_bytes;
1979
1980        hfi1_cdbg(PROC,
1981                  "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
1982                  rcd->ctxt, rcd->egrbufs.alloced,
1983                  rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1984
1985        /*
1986         * Set the contexts rcv array head update threshold to the closest
1987         * power of 2 (so we can use a mask instead of modulo) below half
1988         * the allocated entries.
1989         */
1990        rcd->egrbufs.threshold =
1991                rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1992        /*
1993         * Compute the expected RcvArray entry base. This is done after
1994         * allocating the eager buffers in order to maximize the
1995         * expected RcvArray entries for the context.
1996         */
1997        max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1998        egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1999        rcd->expected_count = max_entries - egrtop;
2000        if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2001                rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2002
2003        rcd->expected_base = rcd->eager_base + egrtop;
2004        hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2005                  rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2006                  rcd->eager_base, rcd->expected_base);
2007
2008        if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2009                hfi1_cdbg(PROC,
2010                          "ctxt%u: current Eager buffer size is invalid %u\n",
2011                          rcd->ctxt, rcd->egrbufs.rcvtid_size);
2012                ret = -EINVAL;
2013                goto bail_rcvegrbuf_phys;
2014        }
2015
2016        for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2017                hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2018                             rcd->egrbufs.rcvtids[idx].dma, order);
2019                cond_resched();
2020        }
2021
2022        return 0;
2023
2024bail_rcvegrbuf_phys:
2025        for (idx = 0; idx < rcd->egrbufs.alloced &&
2026             rcd->egrbufs.buffers[idx].addr;
2027             idx++) {
2028                dma_free_coherent(&dd->pcidev->dev,
2029                                  rcd->egrbufs.buffers[idx].len,
2030                                  rcd->egrbufs.buffers[idx].addr,
2031                                  rcd->egrbufs.buffers[idx].dma);
2032                rcd->egrbufs.buffers[idx].addr = NULL;
2033                rcd->egrbufs.buffers[idx].dma = 0;
2034                rcd->egrbufs.buffers[idx].len = 0;
2035        }
2036
2037        return ret;
2038}
2039