linux/drivers/infiniband/hw/hfi1/pio.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015-2017 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48#include <linux/delay.h>
  49#include "hfi.h"
  50#include "qp.h"
  51#include "trace.h"
  52
  53#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
  54
  55#define SC(name) SEND_CTXT_##name
  56/*
  57 * Send Context functions
  58 */
  59static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
  60
  61/*
  62 * Set the CM reset bit and wait for it to clear.  Use the provided
  63 * sendctrl register.  This routine has no locking.
  64 */
  65void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
  66{
  67        write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
  68        while (1) {
  69                udelay(1);
  70                sendctrl = read_csr(dd, SEND_CTRL);
  71                if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
  72                        break;
  73        }
  74}
  75
  76/* defined in header release 48 and higher */
  77#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
  78#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
  79#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
  80#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
  81                << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
  82#endif
  83
  84/* global control of PIO send */
  85void pio_send_control(struct hfi1_devdata *dd, int op)
  86{
  87        u64 reg, mask;
  88        unsigned long flags;
  89        int write = 1;  /* write sendctrl back */
  90        int flush = 0;  /* re-read sendctrl to make sure it is flushed */
  91
  92        spin_lock_irqsave(&dd->sendctrl_lock, flags);
  93
  94        reg = read_csr(dd, SEND_CTRL);
  95        switch (op) {
  96        case PSC_GLOBAL_ENABLE:
  97                reg |= SEND_CTRL_SEND_ENABLE_SMASK;
  98        /* Fall through */
  99        case PSC_DATA_VL_ENABLE:
 100                /* Disallow sending on VLs not enabled */
 101                mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
 102                                SEND_CTRL_UNSUPPORTED_VL_SHIFT;
 103                reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
 104                break;
 105        case PSC_GLOBAL_DISABLE:
 106                reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
 107                break;
 108        case PSC_GLOBAL_VLARB_ENABLE:
 109                reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
 110                break;
 111        case PSC_GLOBAL_VLARB_DISABLE:
 112                reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
 113                break;
 114        case PSC_CM_RESET:
 115                __cm_reset(dd, reg);
 116                write = 0; /* CSR already written (and flushed) */
 117                break;
 118        case PSC_DATA_VL_DISABLE:
 119                reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
 120                flush = 1;
 121                break;
 122        default:
 123                dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
 124                break;
 125        }
 126
 127        if (write) {
 128                write_csr(dd, SEND_CTRL, reg);
 129                if (flush)
 130                        (void)read_csr(dd, SEND_CTRL); /* flush write */
 131        }
 132
 133        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
 134}
 135
 136/* number of send context memory pools */
 137#define NUM_SC_POOLS 2
 138
 139/* Send Context Size (SCS) wildcards */
 140#define SCS_POOL_0 -1
 141#define SCS_POOL_1 -2
 142
 143/* Send Context Count (SCC) wildcards */
 144#define SCC_PER_VL -1
 145#define SCC_PER_CPU  -2
 146#define SCC_PER_KRCVQ  -3
 147
 148/* Send Context Size (SCS) constants */
 149#define SCS_ACK_CREDITS  32
 150#define SCS_VL15_CREDITS 102    /* 3 pkts of 2048B data + 128B header */
 151
 152#define PIO_THRESHOLD_CEILING 4096
 153
 154#define PIO_WAIT_BATCH_SIZE 5
 155
 156/* default send context sizes */
 157static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
 158        [SC_KERNEL] = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
 159                        .count = SCC_PER_VL },  /* one per NUMA */
 160        [SC_ACK]    = { .size  = SCS_ACK_CREDITS,
 161                        .count = SCC_PER_KRCVQ },
 162        [SC_USER]   = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
 163                        .count = SCC_PER_CPU }, /* one per CPU */
 164        [SC_VL15]   = { .size  = SCS_VL15_CREDITS,
 165                        .count = 1 },
 166
 167};
 168
 169/* send context memory pool configuration */
 170struct mem_pool_config {
 171        int centipercent;       /* % of memory, in 100ths of 1% */
 172        int absolute_blocks;    /* absolute block count */
 173};
 174
 175/* default memory pool configuration: 100% in pool 0 */
 176static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
 177        /* centi%, abs blocks */
 178        {  10000,     -1 },             /* pool 0 */
 179        {      0,     -1 },             /* pool 1 */
 180};
 181
 182/* memory pool information, used when calculating final sizes */
 183struct mem_pool_info {
 184        int centipercent;       /*
 185                                 * 100th of 1% of memory to use, -1 if blocks
 186                                 * already set
 187                                 */
 188        int count;              /* count of contexts in the pool */
 189        int blocks;             /* block size of the pool */
 190        int size;               /* context size, in blocks */
 191};
 192
 193/*
 194 * Convert a pool wildcard to a valid pool index.  The wildcards
 195 * start at -1 and increase negatively.  Map them as:
 196 *      -1 => 0
 197 *      -2 => 1
 198 *      etc.
 199 *
 200 * Return -1 on non-wildcard input, otherwise convert to a pool number.
 201 */
 202static int wildcard_to_pool(int wc)
 203{
 204        if (wc >= 0)
 205                return -1;      /* non-wildcard */
 206        return -wc - 1;
 207}
 208
 209static const char *sc_type_names[SC_MAX] = {
 210        "kernel",
 211        "ack",
 212        "user",
 213        "vl15"
 214};
 215
 216static const char *sc_type_name(int index)
 217{
 218        if (index < 0 || index >= SC_MAX)
 219                return "unknown";
 220        return sc_type_names[index];
 221}
 222
 223/*
 224 * Read the send context memory pool configuration and send context
 225 * size configuration.  Replace any wildcards and come up with final
 226 * counts and sizes for the send context types.
 227 */
 228int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
 229{
 230        struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
 231        int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
 232        int total_contexts = 0;
 233        int fixed_blocks;
 234        int pool_blocks;
 235        int used_blocks;
 236        int cp_total;           /* centipercent total */
 237        int ab_total;           /* absolute block total */
 238        int extra;
 239        int i;
 240
 241        /*
 242         * When SDMA is enabled, kernel context pio packet size is capped by
 243         * "piothreshold". Reduce pio buffer allocation for kernel context by
 244         * setting it to a fixed size. The allocation allows 3-deep buffering
 245         * of the largest pio packets plus up to 128 bytes header, sufficient
 246         * to maintain verbs performance.
 247         *
 248         * When SDMA is disabled, keep the default pooling allocation.
 249         */
 250        if (HFI1_CAP_IS_KSET(SDMA)) {
 251                u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
 252                                         piothreshold : PIO_THRESHOLD_CEILING;
 253                sc_config_sizes[SC_KERNEL].size =
 254                        3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
 255        }
 256
 257        /*
 258         * Step 0:
 259         *      - copy the centipercents/absolute sizes from the pool config
 260         *      - sanity check these values
 261         *      - add up centipercents, then later check for full value
 262         *      - add up absolute blocks, then later check for over-commit
 263         */
 264        cp_total = 0;
 265        ab_total = 0;
 266        for (i = 0; i < NUM_SC_POOLS; i++) {
 267                int cp = sc_mem_pool_config[i].centipercent;
 268                int ab = sc_mem_pool_config[i].absolute_blocks;
 269
 270                /*
 271                 * A negative value is "unused" or "invalid".  Both *can*
 272                 * be valid, but centipercent wins, so check that first
 273                 */
 274                if (cp >= 0) {                  /* centipercent valid */
 275                        cp_total += cp;
 276                } else if (ab >= 0) {           /* absolute blocks valid */
 277                        ab_total += ab;
 278                } else {                        /* neither valid */
 279                        dd_dev_err(
 280                                dd,
 281                                "Send context memory pool %d: both the block count and centipercent are invalid\n",
 282                                i);
 283                        return -EINVAL;
 284                }
 285
 286                mem_pool_info[i].centipercent = cp;
 287                mem_pool_info[i].blocks = ab;
 288        }
 289
 290        /* do not use both % and absolute blocks for different pools */
 291        if (cp_total != 0 && ab_total != 0) {
 292                dd_dev_err(
 293                        dd,
 294                        "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
 295                return -EINVAL;
 296        }
 297
 298        /* if any percentages are present, they must add up to 100% x 100 */
 299        if (cp_total != 0 && cp_total != 10000) {
 300                dd_dev_err(
 301                        dd,
 302                        "Send context memory pool centipercent is %d, expecting 10000\n",
 303                        cp_total);
 304                return -EINVAL;
 305        }
 306
 307        /* the absolute pool total cannot be more than the mem total */
 308        if (ab_total > total_blocks) {
 309                dd_dev_err(
 310                        dd,
 311                        "Send context memory pool absolute block count %d is larger than the memory size %d\n",
 312                        ab_total, total_blocks);
 313                return -EINVAL;
 314        }
 315
 316        /*
 317         * Step 2:
 318         *      - copy from the context size config
 319         *      - replace context type wildcard counts with real values
 320         *      - add up non-memory pool block sizes
 321         *      - add up memory pool user counts
 322         */
 323        fixed_blocks = 0;
 324        for (i = 0; i < SC_MAX; i++) {
 325                int count = sc_config_sizes[i].count;
 326                int size = sc_config_sizes[i].size;
 327                int pool;
 328
 329                /*
 330                 * Sanity check count: Either a positive value or
 331                 * one of the expected wildcards is valid.  The positive
 332                 * value is checked later when we compare against total
 333                 * memory available.
 334                 */
 335                if (i == SC_ACK) {
 336                        count = dd->n_krcv_queues;
 337                } else if (i == SC_KERNEL) {
 338                        count = INIT_SC_PER_VL * num_vls;
 339                } else if (count == SCC_PER_CPU) {
 340                        count = dd->num_rcv_contexts - dd->n_krcv_queues;
 341                } else if (count < 0) {
 342                        dd_dev_err(
 343                                dd,
 344                                "%s send context invalid count wildcard %d\n",
 345                                sc_type_name(i), count);
 346                        return -EINVAL;
 347                }
 348                if (total_contexts + count > dd->chip_send_contexts)
 349                        count = dd->chip_send_contexts - total_contexts;
 350
 351                total_contexts += count;
 352
 353                /*
 354                 * Sanity check pool: The conversion will return a pool
 355                 * number or -1 if a fixed (non-negative) value.  The fixed
 356                 * value is checked later when we compare against
 357                 * total memory available.
 358                 */
 359                pool = wildcard_to_pool(size);
 360                if (pool == -1) {                       /* non-wildcard */
 361                        fixed_blocks += size * count;
 362                } else if (pool < NUM_SC_POOLS) {       /* valid wildcard */
 363                        mem_pool_info[pool].count += count;
 364                } else {                                /* invalid wildcard */
 365                        dd_dev_err(
 366                                dd,
 367                                "%s send context invalid pool wildcard %d\n",
 368                                sc_type_name(i), size);
 369                        return -EINVAL;
 370                }
 371
 372                dd->sc_sizes[i].count = count;
 373                dd->sc_sizes[i].size = size;
 374        }
 375        if (fixed_blocks > total_blocks) {
 376                dd_dev_err(
 377                        dd,
 378                        "Send context fixed block count, %u, larger than total block count %u\n",
 379                        fixed_blocks, total_blocks);
 380                return -EINVAL;
 381        }
 382
 383        /* step 3: calculate the blocks in the pools, and pool context sizes */
 384        pool_blocks = total_blocks - fixed_blocks;
 385        if (ab_total > pool_blocks) {
 386                dd_dev_err(
 387                        dd,
 388                        "Send context fixed pool sizes, %u, larger than pool block count %u\n",
 389                        ab_total, pool_blocks);
 390                return -EINVAL;
 391        }
 392        /* subtract off the fixed pool blocks */
 393        pool_blocks -= ab_total;
 394
 395        for (i = 0; i < NUM_SC_POOLS; i++) {
 396                struct mem_pool_info *pi = &mem_pool_info[i];
 397
 398                /* % beats absolute blocks */
 399                if (pi->centipercent >= 0)
 400                        pi->blocks = (pool_blocks * pi->centipercent) / 10000;
 401
 402                if (pi->blocks == 0 && pi->count != 0) {
 403                        dd_dev_err(
 404                                dd,
 405                                "Send context memory pool %d has %u contexts, but no blocks\n",
 406                                i, pi->count);
 407                        return -EINVAL;
 408                }
 409                if (pi->count == 0) {
 410                        /* warn about wasted blocks */
 411                        if (pi->blocks != 0)
 412                                dd_dev_err(
 413                                        dd,
 414                                        "Send context memory pool %d has %u blocks, but zero contexts\n",
 415                                        i, pi->blocks);
 416                        pi->size = 0;
 417                } else {
 418                        pi->size = pi->blocks / pi->count;
 419                }
 420        }
 421
 422        /* step 4: fill in the context type sizes from the pool sizes */
 423        used_blocks = 0;
 424        for (i = 0; i < SC_MAX; i++) {
 425                if (dd->sc_sizes[i].size < 0) {
 426                        unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
 427
 428                        WARN_ON_ONCE(pool >= NUM_SC_POOLS);
 429                        dd->sc_sizes[i].size = mem_pool_info[pool].size;
 430                }
 431                /* make sure we are not larger than what is allowed by the HW */
 432#define PIO_MAX_BLOCKS 1024
 433                if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
 434                        dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
 435
 436                /* calculate our total usage */
 437                used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
 438        }
 439        extra = total_blocks - used_blocks;
 440        if (extra != 0)
 441                dd_dev_info(dd, "unused send context blocks: %d\n", extra);
 442
 443        return total_contexts;
 444}
 445
 446int init_send_contexts(struct hfi1_devdata *dd)
 447{
 448        u16 base;
 449        int ret, i, j, context;
 450
 451        ret = init_credit_return(dd);
 452        if (ret)
 453                return ret;
 454
 455        dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
 456                                        GFP_KERNEL);
 457        dd->send_contexts = kcalloc(dd->num_send_contexts,
 458                                        sizeof(struct send_context_info),
 459                                        GFP_KERNEL);
 460        if (!dd->send_contexts || !dd->hw_to_sw) {
 461                kfree(dd->hw_to_sw);
 462                kfree(dd->send_contexts);
 463                free_credit_return(dd);
 464                return -ENOMEM;
 465        }
 466
 467        /* hardware context map starts with invalid send context indices */
 468        for (i = 0; i < TXE_NUM_CONTEXTS; i++)
 469                dd->hw_to_sw[i] = INVALID_SCI;
 470
 471        /*
 472         * All send contexts have their credit sizes.  Allocate credits
 473         * for each context one after another from the global space.
 474         */
 475        context = 0;
 476        base = 1;
 477        for (i = 0; i < SC_MAX; i++) {
 478                struct sc_config_sizes *scs = &dd->sc_sizes[i];
 479
 480                for (j = 0; j < scs->count; j++) {
 481                        struct send_context_info *sci =
 482                                                &dd->send_contexts[context];
 483                        sci->type = i;
 484                        sci->base = base;
 485                        sci->credits = scs->size;
 486
 487                        context++;
 488                        base += scs->size;
 489                }
 490        }
 491
 492        return 0;
 493}
 494
 495/*
 496 * Allocate a software index and hardware context of the given type.
 497 *
 498 * Must be called with dd->sc_lock held.
 499 */
 500static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
 501                       u32 *hw_context)
 502{
 503        struct send_context_info *sci;
 504        u32 index;
 505        u32 context;
 506
 507        for (index = 0, sci = &dd->send_contexts[0];
 508                        index < dd->num_send_contexts; index++, sci++) {
 509                if (sci->type == type && sci->allocated == 0) {
 510                        sci->allocated = 1;
 511                        /* use a 1:1 mapping, but make them non-equal */
 512                        context = dd->chip_send_contexts - index - 1;
 513                        dd->hw_to_sw[context] = index;
 514                        *sw_index = index;
 515                        *hw_context = context;
 516                        return 0; /* success */
 517                }
 518        }
 519        dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
 520        return -ENOSPC;
 521}
 522
 523/*
 524 * Free the send context given by its software index.
 525 *
 526 * Must be called with dd->sc_lock held.
 527 */
 528static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
 529{
 530        struct send_context_info *sci;
 531
 532        sci = &dd->send_contexts[sw_index];
 533        if (!sci->allocated) {
 534                dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
 535                           __func__, sw_index, hw_context);
 536        }
 537        sci->allocated = 0;
 538        dd->hw_to_sw[hw_context] = INVALID_SCI;
 539}
 540
 541/* return the base context of a context in a group */
 542static inline u32 group_context(u32 context, u32 group)
 543{
 544        return (context >> group) << group;
 545}
 546
 547/* return the size of a group */
 548static inline u32 group_size(u32 group)
 549{
 550        return 1 << group;
 551}
 552
 553/*
 554 * Obtain the credit return addresses, kernel virtual and bus, for the
 555 * given sc.
 556 *
 557 * To understand this routine:
 558 * o va and dma are arrays of struct credit_return.  One for each physical
 559 *   send context, per NUMA.
 560 * o Each send context always looks in its relative location in a struct
 561 *   credit_return for its credit return.
 562 * o Each send context in a group must have its return address CSR programmed
 563 *   with the same value.  Use the address of the first send context in the
 564 *   group.
 565 */
 566static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
 567{
 568        u32 gc = group_context(sc->hw_context, sc->group);
 569        u32 index = sc->hw_context & 0x7;
 570
 571        sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
 572        *dma = (unsigned long)
 573               &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
 574}
 575
 576/*
 577 * Work queue function triggered in error interrupt routine for
 578 * kernel contexts.
 579 */
 580static void sc_halted(struct work_struct *work)
 581{
 582        struct send_context *sc;
 583
 584        sc = container_of(work, struct send_context, halt_work);
 585        sc_restart(sc);
 586}
 587
 588/*
 589 * Calculate PIO block threshold for this send context using the given MTU.
 590 * Trigger a return when one MTU plus optional header of credits remain.
 591 *
 592 * Parameter mtu is in bytes.
 593 * Parameter hdrqentsize is in DWORDs.
 594 *
 595 * Return value is what to write into the CSR: trigger return when
 596 * unreturned credits pass this count.
 597 */
 598u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
 599{
 600        u32 release_credits;
 601        u32 threshold;
 602
 603        /* add in the header size, then divide by the PIO block size */
 604        mtu += hdrqentsize << 2;
 605        release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
 606
 607        /* check against this context's credits */
 608        if (sc->credits <= release_credits)
 609                threshold = 1;
 610        else
 611                threshold = sc->credits - release_credits;
 612
 613        return threshold;
 614}
 615
 616/*
 617 * Calculate credit threshold in terms of percent of the allocated credits.
 618 * Trigger when unreturned credits equal or exceed the percentage of the whole.
 619 *
 620 * Return value is what to write into the CSR: trigger return when
 621 * unreturned credits pass this count.
 622 */
 623u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
 624{
 625        return (sc->credits * percent) / 100;
 626}
 627
 628/*
 629 * Set the credit return threshold.
 630 */
 631void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
 632{
 633        unsigned long flags;
 634        u32 old_threshold;
 635        int force_return = 0;
 636
 637        spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
 638
 639        old_threshold = (sc->credit_ctrl >>
 640                                SC(CREDIT_CTRL_THRESHOLD_SHIFT))
 641                         & SC(CREDIT_CTRL_THRESHOLD_MASK);
 642
 643        if (new_threshold != old_threshold) {
 644                sc->credit_ctrl =
 645                        (sc->credit_ctrl
 646                                & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
 647                        | ((new_threshold
 648                                & SC(CREDIT_CTRL_THRESHOLD_MASK))
 649                           << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
 650                write_kctxt_csr(sc->dd, sc->hw_context,
 651                                SC(CREDIT_CTRL), sc->credit_ctrl);
 652
 653                /* force a credit return on change to avoid a possible stall */
 654                force_return = 1;
 655        }
 656
 657        spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
 658
 659        if (force_return)
 660                sc_return_credits(sc);
 661}
 662
 663/*
 664 * set_pio_integrity
 665 *
 666 * Set the CHECK_ENABLE register for the send context 'sc'.
 667 */
 668void set_pio_integrity(struct send_context *sc)
 669{
 670        struct hfi1_devdata *dd = sc->dd;
 671        u32 hw_context = sc->hw_context;
 672        int type = sc->type;
 673
 674        write_kctxt_csr(dd, hw_context,
 675                        SC(CHECK_ENABLE),
 676                        hfi1_pkt_default_send_ctxt_mask(dd, type));
 677}
 678
 679static u32 get_buffers_allocated(struct send_context *sc)
 680{
 681        int cpu;
 682        u32 ret = 0;
 683
 684        for_each_possible_cpu(cpu)
 685                ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
 686        return ret;
 687}
 688
 689static void reset_buffers_allocated(struct send_context *sc)
 690{
 691        int cpu;
 692
 693        for_each_possible_cpu(cpu)
 694                (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
 695}
 696
 697/*
 698 * Allocate a NUMA relative send context structure of the given type along
 699 * with a HW context.
 700 */
 701struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
 702                              uint hdrqentsize, int numa)
 703{
 704        struct send_context_info *sci;
 705        struct send_context *sc = NULL;
 706        int req_type = type;
 707        dma_addr_t dma;
 708        unsigned long flags;
 709        u64 reg;
 710        u32 thresh;
 711        u32 sw_index;
 712        u32 hw_context;
 713        int ret;
 714        u8 opval, opmask;
 715
 716        /* do not allocate while frozen */
 717        if (dd->flags & HFI1_FROZEN)
 718                return NULL;
 719
 720        sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
 721        if (!sc)
 722                return NULL;
 723
 724        sc->buffers_allocated = alloc_percpu(u32);
 725        if (!sc->buffers_allocated) {
 726                kfree(sc);
 727                dd_dev_err(dd,
 728                           "Cannot allocate buffers_allocated per cpu counters\n"
 729                          );
 730                return NULL;
 731        }
 732
 733        /*
 734         * VNIC contexts are dynamically allocated.
 735         * Hence, pick a user context for VNIC.
 736         */
 737        if (type == SC_VNIC)
 738                type = SC_USER;
 739
 740        spin_lock_irqsave(&dd->sc_lock, flags);
 741        ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
 742        if (ret) {
 743                spin_unlock_irqrestore(&dd->sc_lock, flags);
 744                free_percpu(sc->buffers_allocated);
 745                kfree(sc);
 746                return NULL;
 747        }
 748
 749        /*
 750         * VNIC contexts are used by kernel driver.
 751         * Hence, mark them as kernel contexts.
 752         */
 753        if (req_type == SC_VNIC) {
 754                dd->send_contexts[sw_index].type = SC_KERNEL;
 755                type = SC_KERNEL;
 756        }
 757
 758        sci = &dd->send_contexts[sw_index];
 759        sci->sc = sc;
 760
 761        sc->dd = dd;
 762        sc->node = numa;
 763        sc->type = type;
 764        spin_lock_init(&sc->alloc_lock);
 765        spin_lock_init(&sc->release_lock);
 766        spin_lock_init(&sc->credit_ctrl_lock);
 767        INIT_LIST_HEAD(&sc->piowait);
 768        INIT_WORK(&sc->halt_work, sc_halted);
 769        init_waitqueue_head(&sc->halt_wait);
 770
 771        /* grouping is always single context for now */
 772        sc->group = 0;
 773
 774        sc->sw_index = sw_index;
 775        sc->hw_context = hw_context;
 776        cr_group_addresses(sc, &dma);
 777        sc->credits = sci->credits;
 778        sc->size = sc->credits * PIO_BLOCK_SIZE;
 779
 780/* PIO Send Memory Address details */
 781#define PIO_ADDR_CONTEXT_MASK 0xfful
 782#define PIO_ADDR_CONTEXT_SHIFT 16
 783        sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
 784                                        << PIO_ADDR_CONTEXT_SHIFT);
 785
 786        /* set base and credits */
 787        reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
 788                                        << SC(CTRL_CTXT_DEPTH_SHIFT))
 789                | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
 790                                        << SC(CTRL_CTXT_BASE_SHIFT));
 791        write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
 792
 793        set_pio_integrity(sc);
 794
 795        /* unmask all errors */
 796        write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
 797
 798        /* set the default partition key */
 799        write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
 800                        (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
 801                         DEFAULT_PKEY) <<
 802                        SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
 803
 804        /* per context type checks */
 805        if (type == SC_USER) {
 806                opval = USER_OPCODE_CHECK_VAL;
 807                opmask = USER_OPCODE_CHECK_MASK;
 808        } else {
 809                opval = OPCODE_CHECK_VAL_DISABLED;
 810                opmask = OPCODE_CHECK_MASK_DISABLED;
 811        }
 812
 813        /* set the send context check opcode mask and value */
 814        write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
 815                        ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
 816                        ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
 817
 818        /* set up credit return */
 819        reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
 820        write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
 821
 822        /*
 823         * Calculate the initial credit return threshold.
 824         *
 825         * For Ack contexts, set a threshold for half the credits.
 826         * For User contexts use the given percentage.  This has been
 827         * sanitized on driver start-up.
 828         * For Kernel contexts, use the default MTU plus a header
 829         * or half the credits, whichever is smaller. This should
 830         * work for both the 3-deep buffering allocation and the
 831         * pooling allocation.
 832         */
 833        if (type == SC_ACK) {
 834                thresh = sc_percent_to_threshold(sc, 50);
 835        } else if (type == SC_USER) {
 836                thresh = sc_percent_to_threshold(sc,
 837                                                 user_credit_return_threshold);
 838        } else { /* kernel */
 839                thresh = min(sc_percent_to_threshold(sc, 50),
 840                             sc_mtu_to_threshold(sc, hfi1_max_mtu,
 841                                                 hdrqentsize));
 842        }
 843        reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
 844        /* add in early return */
 845        if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
 846                reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
 847        else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
 848                reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
 849
 850        /* set up write-through credit_ctrl */
 851        sc->credit_ctrl = reg;
 852        write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
 853
 854        /* User send contexts should not allow sending on VL15 */
 855        if (type == SC_USER) {
 856                reg = 1ULL << 15;
 857                write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
 858        }
 859
 860        spin_unlock_irqrestore(&dd->sc_lock, flags);
 861
 862        /*
 863         * Allocate shadow ring to track outstanding PIO buffers _after_
 864         * unlocking.  We don't know the size until the lock is held and
 865         * we can't allocate while the lock is held.  No one is using
 866         * the context yet, so allocate it now.
 867         *
 868         * User contexts do not get a shadow ring.
 869         */
 870        if (type != SC_USER) {
 871                /*
 872                 * Size the shadow ring 1 larger than the number of credits
 873                 * so head == tail can mean empty.
 874                 */
 875                sc->sr_size = sci->credits + 1;
 876                sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
 877                                sc->sr_size, GFP_KERNEL, numa);
 878                if (!sc->sr) {
 879                        sc_free(sc);
 880                        return NULL;
 881                }
 882        }
 883
 884        hfi1_cdbg(PIO,
 885                  "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
 886                  sw_index,
 887                  hw_context,
 888                  sc_type_name(type),
 889                  sc->group,
 890                  sc->credits,
 891                  sc->credit_ctrl,
 892                  thresh);
 893
 894        return sc;
 895}
 896
 897/* free a per-NUMA send context structure */
 898void sc_free(struct send_context *sc)
 899{
 900        struct hfi1_devdata *dd;
 901        unsigned long flags;
 902        u32 sw_index;
 903        u32 hw_context;
 904
 905        if (!sc)
 906                return;
 907
 908        sc->flags |= SCF_IN_FREE;       /* ensure no restarts */
 909        dd = sc->dd;
 910        if (!list_empty(&sc->piowait))
 911                dd_dev_err(dd, "piowait list not empty!\n");
 912        sw_index = sc->sw_index;
 913        hw_context = sc->hw_context;
 914        sc_disable(sc); /* make sure the HW is disabled */
 915        flush_work(&sc->halt_work);
 916
 917        spin_lock_irqsave(&dd->sc_lock, flags);
 918        dd->send_contexts[sw_index].sc = NULL;
 919
 920        /* clear/disable all registers set in sc_alloc */
 921        write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
 922        write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
 923        write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
 924        write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
 925        write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
 926        write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
 927        write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
 928
 929        /* release the index and context for re-use */
 930        sc_hw_free(dd, sw_index, hw_context);
 931        spin_unlock_irqrestore(&dd->sc_lock, flags);
 932
 933        kfree(sc->sr);
 934        free_percpu(sc->buffers_allocated);
 935        kfree(sc);
 936}
 937
 938/* disable the context */
 939void sc_disable(struct send_context *sc)
 940{
 941        u64 reg;
 942        unsigned long flags;
 943        struct pio_buf *pbuf;
 944
 945        if (!sc)
 946                return;
 947
 948        /* do all steps, even if already disabled */
 949        spin_lock_irqsave(&sc->alloc_lock, flags);
 950        reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
 951        reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
 952        sc->flags &= ~SCF_ENABLED;
 953        sc_wait_for_packet_egress(sc, 1);
 954        write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
 955        spin_unlock_irqrestore(&sc->alloc_lock, flags);
 956
 957        /*
 958         * Flush any waiters.  Once the context is disabled,
 959         * credit return interrupts are stopped (although there
 960         * could be one in-process when the context is disabled).
 961         * Wait one microsecond for any lingering interrupts, then
 962         * proceed with the flush.
 963         */
 964        udelay(1);
 965        spin_lock_irqsave(&sc->release_lock, flags);
 966        if (sc->sr) {   /* this context has a shadow ring */
 967                while (sc->sr_tail != sc->sr_head) {
 968                        pbuf = &sc->sr[sc->sr_tail].pbuf;
 969                        if (pbuf->cb)
 970                                (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
 971                        sc->sr_tail++;
 972                        if (sc->sr_tail >= sc->sr_size)
 973                                sc->sr_tail = 0;
 974                }
 975        }
 976        spin_unlock_irqrestore(&sc->release_lock, flags);
 977}
 978
 979/* return SendEgressCtxtStatus.PacketOccupancy */
 980#define packet_occupancy(r) \
 981        (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
 982        >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
 983
 984/* is egress halted on the context? */
 985#define egress_halted(r) \
 986        ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
 987
 988/* wait for packet egress, optionally pause for credit return  */
 989static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
 990{
 991        struct hfi1_devdata *dd = sc->dd;
 992        u64 reg = 0;
 993        u64 reg_prev;
 994        u32 loop = 0;
 995
 996        while (1) {
 997                reg_prev = reg;
 998                reg = read_csr(dd, sc->hw_context * 8 +
 999                               SEND_EGRESS_CTXT_STATUS);
1000                /* done if egress is stopped */
1001                if (egress_halted(reg))
1002                        break;
1003                reg = packet_occupancy(reg);
1004                if (reg == 0)
1005                        break;
1006                /* counter is reset if occupancy count changes */
1007                if (reg != reg_prev)
1008                        loop = 0;
1009                if (loop > 50000) {
1010                        /* timed out - bounce the link */
1011                        dd_dev_err(dd,
1012                                   "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
1013                                   __func__, sc->sw_index,
1014                                   sc->hw_context, (u32)reg);
1015                        queue_work(dd->pport->hfi1_wq,
1016                                   &dd->pport->link_bounce_work);
1017                        break;
1018                }
1019                loop++;
1020                udelay(1);
1021        }
1022
1023        if (pause)
1024                /* Add additional delay to ensure chip returns all credits */
1025                pause_for_credit_return(dd);
1026}
1027
1028void sc_wait(struct hfi1_devdata *dd)
1029{
1030        int i;
1031
1032        for (i = 0; i < dd->num_send_contexts; i++) {
1033                struct send_context *sc = dd->send_contexts[i].sc;
1034
1035                if (!sc)
1036                        continue;
1037                sc_wait_for_packet_egress(sc, 0);
1038        }
1039}
1040
1041/*
1042 * Restart a context after it has been halted due to error.
1043 *
1044 * If the first step fails - wait for the halt to be asserted, return early.
1045 * Otherwise complain about timeouts but keep going.
1046 *
1047 * It is expected that allocations (enabled flag bit) have been shut off
1048 * already (only applies to kernel contexts).
1049 */
1050int sc_restart(struct send_context *sc)
1051{
1052        struct hfi1_devdata *dd = sc->dd;
1053        u64 reg;
1054        u32 loop;
1055        int count;
1056
1057        /* bounce off if not halted, or being free'd */
1058        if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1059                return -EINVAL;
1060
1061        dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1062                    sc->hw_context);
1063
1064        /*
1065         * Step 1: Wait for the context to actually halt.
1066         *
1067         * The error interrupt is asynchronous to actually setting halt
1068         * on the context.
1069         */
1070        loop = 0;
1071        while (1) {
1072                reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1073                if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1074                        break;
1075                if (loop > 100) {
1076                        dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1077                                   __func__, sc->sw_index, sc->hw_context);
1078                        return -ETIME;
1079                }
1080                loop++;
1081                udelay(1);
1082        }
1083
1084        /*
1085         * Step 2: Ensure no users are still trying to write to PIO.
1086         *
1087         * For kernel contexts, we have already turned off buffer allocation.
1088         * Now wait for the buffer count to go to zero.
1089         *
1090         * For user contexts, the user handling code has cut off write access
1091         * to the context's PIO pages before calling this routine and will
1092         * restore write access after this routine returns.
1093         */
1094        if (sc->type != SC_USER) {
1095                /* kernel context */
1096                loop = 0;
1097                while (1) {
1098                        count = get_buffers_allocated(sc);
1099                        if (count == 0)
1100                                break;
1101                        if (loop > 100) {
1102                                dd_dev_err(dd,
1103                                           "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1104                                           __func__, sc->sw_index,
1105                                           sc->hw_context, count);
1106                        }
1107                        loop++;
1108                        udelay(1);
1109                }
1110        }
1111
1112        /*
1113         * Step 3: Wait for all packets to egress.
1114         * This is done while disabling the send context
1115         *
1116         * Step 4: Disable the context
1117         *
1118         * This is a superset of the halt.  After the disable, the
1119         * errors can be cleared.
1120         */
1121        sc_disable(sc);
1122
1123        /*
1124         * Step 5: Enable the context
1125         *
1126         * This enable will clear the halted flag and per-send context
1127         * error flags.
1128         */
1129        return sc_enable(sc);
1130}
1131
1132/*
1133 * PIO freeze processing.  To be called after the TXE block is fully frozen.
1134 * Go through all frozen send contexts and disable them.  The contexts are
1135 * already stopped by the freeze.
1136 */
1137void pio_freeze(struct hfi1_devdata *dd)
1138{
1139        struct send_context *sc;
1140        int i;
1141
1142        for (i = 0; i < dd->num_send_contexts; i++) {
1143                sc = dd->send_contexts[i].sc;
1144                /*
1145                 * Don't disable unallocated, unfrozen, or user send contexts.
1146                 * User send contexts will be disabled when the process
1147                 * calls into the driver to reset its context.
1148                 */
1149                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1150                        continue;
1151
1152                /* only need to disable, the context is already stopped */
1153                sc_disable(sc);
1154        }
1155}
1156
1157/*
1158 * Unfreeze PIO for kernel send contexts.  The precondition for calling this
1159 * is that all PIO send contexts have been disabled and the SPC freeze has
1160 * been cleared.  Now perform the last step and re-enable each kernel context.
1161 * User (PSM) processing will occur when PSM calls into the kernel to
1162 * acknowledge the freeze.
1163 */
1164void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1165{
1166        struct send_context *sc;
1167        int i;
1168
1169        for (i = 0; i < dd->num_send_contexts; i++) {
1170                sc = dd->send_contexts[i].sc;
1171                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1172                        continue;
1173
1174                sc_enable(sc);  /* will clear the sc frozen flag */
1175        }
1176}
1177
1178/*
1179 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1180 * Returns:
1181 *      -ETIMEDOUT - if we wait too long
1182 *      -EIO       - if there was an error
1183 */
1184static int pio_init_wait_progress(struct hfi1_devdata *dd)
1185{
1186        u64 reg;
1187        int max, count = 0;
1188
1189        /* max is the longest possible HW init time / delay */
1190        max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1191        while (1) {
1192                reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1193                if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1194                        break;
1195                if (count >= max)
1196                        return -ETIMEDOUT;
1197                udelay(5);
1198                count++;
1199        }
1200
1201        return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1202}
1203
1204/*
1205 * Reset all of the send contexts to their power-on state.  Used
1206 * only during manual init - no lock against sc_enable needed.
1207 */
1208void pio_reset_all(struct hfi1_devdata *dd)
1209{
1210        int ret;
1211
1212        /* make sure the init engine is not busy */
1213        ret = pio_init_wait_progress(dd);
1214        /* ignore any timeout */
1215        if (ret == -EIO) {
1216                /* clear the error */
1217                write_csr(dd, SEND_PIO_ERR_CLEAR,
1218                          SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1219        }
1220
1221        /* reset init all */
1222        write_csr(dd, SEND_PIO_INIT_CTXT,
1223                  SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1224        udelay(2);
1225        ret = pio_init_wait_progress(dd);
1226        if (ret < 0) {
1227                dd_dev_err(dd,
1228                           "PIO send context init %s while initializing all PIO blocks\n",
1229                           ret == -ETIMEDOUT ? "is stuck" : "had an error");
1230        }
1231}
1232
1233/* enable the context */
1234int sc_enable(struct send_context *sc)
1235{
1236        u64 sc_ctrl, reg, pio;
1237        struct hfi1_devdata *dd;
1238        unsigned long flags;
1239        int ret = 0;
1240
1241        if (!sc)
1242                return -EINVAL;
1243        dd = sc->dd;
1244
1245        /*
1246         * Obtain the allocator lock to guard against any allocation
1247         * attempts (which should not happen prior to context being
1248         * enabled). On the release/disable side we don't need to
1249         * worry about locking since the releaser will not do anything
1250         * if the context accounting values have not changed.
1251         */
1252        spin_lock_irqsave(&sc->alloc_lock, flags);
1253        sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1254        if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1255                goto unlock; /* already enabled */
1256
1257        /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1258
1259        *sc->hw_free = 0;
1260        sc->free = 0;
1261        sc->alloc_free = 0;
1262        sc->fill = 0;
1263        sc->fill_wrap = 0;
1264        sc->sr_head = 0;
1265        sc->sr_tail = 0;
1266        sc->flags = 0;
1267        /* the alloc lock insures no fast path allocation */
1268        reset_buffers_allocated(sc);
1269
1270        /*
1271         * Clear all per-context errors.  Some of these will be set when
1272         * we are re-enabling after a context halt.  Now that the context
1273         * is disabled, the halt will not clear until after the PIO init
1274         * engine runs below.
1275         */
1276        reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1277        if (reg)
1278                write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1279
1280        /*
1281         * The HW PIO initialization engine can handle only one init
1282         * request at a time. Serialize access to each device's engine.
1283         */
1284        spin_lock(&dd->sc_init_lock);
1285        /*
1286         * Since access to this code block is serialized and
1287         * each access waits for the initialization to complete
1288         * before releasing the lock, the PIO initialization engine
1289         * should not be in use, so we don't have to wait for the
1290         * InProgress bit to go down.
1291         */
1292        pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1293               SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1294                SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1295        write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1296        /*
1297         * Wait until the engine is done.  Give the chip the required time
1298         * so, hopefully, we read the register just once.
1299         */
1300        udelay(2);
1301        ret = pio_init_wait_progress(dd);
1302        spin_unlock(&dd->sc_init_lock);
1303        if (ret) {
1304                dd_dev_err(dd,
1305                           "sctxt%u(%u): Context not enabled due to init failure %d\n",
1306                           sc->sw_index, sc->hw_context, ret);
1307                goto unlock;
1308        }
1309
1310        /*
1311         * All is well. Enable the context.
1312         */
1313        sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1314        write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1315        /*
1316         * Read SendCtxtCtrl to force the write out and prevent a timing
1317         * hazard where a PIO write may reach the context before the enable.
1318         */
1319        read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1320        sc->flags |= SCF_ENABLED;
1321
1322unlock:
1323        spin_unlock_irqrestore(&sc->alloc_lock, flags);
1324
1325        return ret;
1326}
1327
1328/* force a credit return on the context */
1329void sc_return_credits(struct send_context *sc)
1330{
1331        if (!sc)
1332                return;
1333
1334        /* a 0->1 transition schedules a credit return */
1335        write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1336                        SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1337        /*
1338         * Ensure that the write is flushed and the credit return is
1339         * scheduled. We care more about the 0 -> 1 transition.
1340         */
1341        read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1342        /* set back to 0 for next time */
1343        write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1344}
1345
1346/* allow all in-flight packets to drain on the context */
1347void sc_flush(struct send_context *sc)
1348{
1349        if (!sc)
1350                return;
1351
1352        sc_wait_for_packet_egress(sc, 1);
1353}
1354
1355/* drop all packets on the context, no waiting until they are sent */
1356void sc_drop(struct send_context *sc)
1357{
1358        if (!sc)
1359                return;
1360
1361        dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1362                    __func__, sc->sw_index, sc->hw_context);
1363}
1364
1365/*
1366 * Start the software reaction to a context halt or SPC freeze:
1367 *      - mark the context as halted or frozen
1368 *      - stop buffer allocations
1369 *
1370 * Called from the error interrupt.  Other work is deferred until
1371 * out of the interrupt.
1372 */
1373void sc_stop(struct send_context *sc, int flag)
1374{
1375        unsigned long flags;
1376
1377        /* mark the context */
1378        sc->flags |= flag;
1379
1380        /* stop buffer allocations */
1381        spin_lock_irqsave(&sc->alloc_lock, flags);
1382        sc->flags &= ~SCF_ENABLED;
1383        spin_unlock_irqrestore(&sc->alloc_lock, flags);
1384        wake_up(&sc->halt_wait);
1385}
1386
1387#define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1388#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1389
1390/*
1391 * The send context buffer "allocator".
1392 *
1393 * @sc: the PIO send context we are allocating from
1394 * @len: length of whole packet - including PBC - in dwords
1395 * @cb: optional callback to call when the buffer is finished sending
1396 * @arg: argument for cb
1397 *
1398 * Return a pointer to a PIO buffer if successful, NULL if not enough room.
1399 */
1400struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1401                                pio_release_cb cb, void *arg)
1402{
1403        struct pio_buf *pbuf = NULL;
1404        unsigned long flags;
1405        unsigned long avail;
1406        unsigned long blocks = dwords_to_blocks(dw_len);
1407        u32 fill_wrap;
1408        int trycount = 0;
1409        u32 head, next;
1410
1411        spin_lock_irqsave(&sc->alloc_lock, flags);
1412        if (!(sc->flags & SCF_ENABLED)) {
1413                spin_unlock_irqrestore(&sc->alloc_lock, flags);
1414                goto done;
1415        }
1416
1417retry:
1418        avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1419        if (blocks > avail) {
1420                /* not enough room */
1421                if (unlikely(trycount)) { /* already tried to get more room */
1422                        spin_unlock_irqrestore(&sc->alloc_lock, flags);
1423                        goto done;
1424                }
1425                /* copy from receiver cache line and recalculate */
1426                sc->alloc_free = ACCESS_ONCE(sc->free);
1427                avail =
1428                        (unsigned long)sc->credits -
1429                        (sc->fill - sc->alloc_free);
1430                if (blocks > avail) {
1431                        /* still no room, actively update */
1432                        sc_release_update(sc);
1433                        sc->alloc_free = ACCESS_ONCE(sc->free);
1434                        trycount++;
1435                        goto retry;
1436                }
1437        }
1438
1439        /* there is enough room */
1440
1441        preempt_disable();
1442        this_cpu_inc(*sc->buffers_allocated);
1443
1444        /* read this once */
1445        head = sc->sr_head;
1446
1447        /* "allocate" the buffer */
1448        sc->fill += blocks;
1449        fill_wrap = sc->fill_wrap;
1450        sc->fill_wrap += blocks;
1451        if (sc->fill_wrap >= sc->credits)
1452                sc->fill_wrap = sc->fill_wrap - sc->credits;
1453
1454        /*
1455         * Fill the parts that the releaser looks at before moving the head.
1456         * The only necessary piece is the sent_at field.  The credits
1457         * we have just allocated cannot have been returned yet, so the
1458         * cb and arg will not be looked at for a "while".  Put them
1459         * on this side of the memory barrier anyway.
1460         */
1461        pbuf = &sc->sr[head].pbuf;
1462        pbuf->sent_at = sc->fill;
1463        pbuf->cb = cb;
1464        pbuf->arg = arg;
1465        pbuf->sc = sc;  /* could be filled in at sc->sr init time */
1466        /* make sure this is in memory before updating the head */
1467
1468        /* calculate next head index, do not store */
1469        next = head + 1;
1470        if (next >= sc->sr_size)
1471                next = 0;
1472        /*
1473         * update the head - must be last! - the releaser can look at fields
1474         * in pbuf once we move the head
1475         */
1476        smp_wmb();
1477        sc->sr_head = next;
1478        spin_unlock_irqrestore(&sc->alloc_lock, flags);
1479
1480        /* finish filling in the buffer outside the lock */
1481        pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1482        pbuf->end = sc->base_addr + sc->size;
1483        pbuf->qw_written = 0;
1484        pbuf->carry_bytes = 0;
1485        pbuf->carry.val64 = 0;
1486done:
1487        return pbuf;
1488}
1489
1490/*
1491 * There are at least two entities that can turn on credit return
1492 * interrupts and they can overlap.  Avoid problems by implementing
1493 * a count scheme that is enforced by a lock.  The lock is needed because
1494 * the count and CSR write must be paired.
1495 */
1496
1497/*
1498 * Start credit return interrupts.  This is managed by a count.  If already
1499 * on, just increment the count.
1500 */
1501void sc_add_credit_return_intr(struct send_context *sc)
1502{
1503        unsigned long flags;
1504
1505        /* lock must surround both the count change and the CSR update */
1506        spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1507        if (sc->credit_intr_count == 0) {
1508                sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1509                write_kctxt_csr(sc->dd, sc->hw_context,
1510                                SC(CREDIT_CTRL), sc->credit_ctrl);
1511        }
1512        sc->credit_intr_count++;
1513        spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1514}
1515
1516/*
1517 * Stop credit return interrupts.  This is managed by a count.  Decrement the
1518 * count, if the last user, then turn the credit interrupts off.
1519 */
1520void sc_del_credit_return_intr(struct send_context *sc)
1521{
1522        unsigned long flags;
1523
1524        WARN_ON(sc->credit_intr_count == 0);
1525
1526        /* lock must surround both the count change and the CSR update */
1527        spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1528        sc->credit_intr_count--;
1529        if (sc->credit_intr_count == 0) {
1530                sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1531                write_kctxt_csr(sc->dd, sc->hw_context,
1532                                SC(CREDIT_CTRL), sc->credit_ctrl);
1533        }
1534        spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1535}
1536
1537/*
1538 * The caller must be careful when calling this.  All needint calls
1539 * must be paired with !needint.
1540 */
1541void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1542{
1543        if (needint)
1544                sc_add_credit_return_intr(sc);
1545        else
1546                sc_del_credit_return_intr(sc);
1547        trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1548        if (needint) {
1549                mmiowb();
1550                sc_return_credits(sc);
1551        }
1552}
1553
1554/**
1555 * sc_piobufavail - callback when a PIO buffer is available
1556 * @sc: the send context
1557 *
1558 * This is called from the interrupt handler when a PIO buffer is
1559 * available after hfi1_verbs_send() returned an error that no buffers were
1560 * available. Disable the interrupt if there are no more QPs waiting.
1561 */
1562static void sc_piobufavail(struct send_context *sc)
1563{
1564        struct hfi1_devdata *dd = sc->dd;
1565        struct hfi1_ibdev *dev = &dd->verbs_dev;
1566        struct list_head *list;
1567        struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1568        struct rvt_qp *qp;
1569        struct hfi1_qp_priv *priv;
1570        unsigned long flags;
1571        unsigned i, n = 0;
1572
1573        if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1574            dd->send_contexts[sc->sw_index].type != SC_VL15)
1575                return;
1576        list = &sc->piowait;
1577        /*
1578         * Note: checking that the piowait list is empty and clearing
1579         * the buffer available interrupt needs to be atomic or we
1580         * could end up with QPs on the wait list with the interrupt
1581         * disabled.
1582         */
1583        write_seqlock_irqsave(&dev->iowait_lock, flags);
1584        while (!list_empty(list)) {
1585                struct iowait *wait;
1586
1587                if (n == ARRAY_SIZE(qps))
1588                        break;
1589                wait = list_first_entry(list, struct iowait, list);
1590                qp = iowait_to_qp(wait);
1591                priv = qp->priv;
1592                list_del_init(&priv->s_iowait.list);
1593                priv->s_iowait.lock = NULL;
1594                /* refcount held until actual wake up */
1595                qps[n++] = qp;
1596        }
1597        /*
1598         * If there had been waiters and there are more
1599         * insure that we redo the force to avoid a potential hang.
1600         */
1601        if (n) {
1602                hfi1_sc_wantpiobuf_intr(sc, 0);
1603                if (!list_empty(list))
1604                        hfi1_sc_wantpiobuf_intr(sc, 1);
1605        }
1606        write_sequnlock_irqrestore(&dev->iowait_lock, flags);
1607
1608        for (i = 0; i < n; i++)
1609                hfi1_qp_wakeup(qps[i],
1610                               RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
1611}
1612
1613/* translate a send credit update to a bit code of reasons */
1614static inline int fill_code(u64 hw_free)
1615{
1616        int code = 0;
1617
1618        if (hw_free & CR_STATUS_SMASK)
1619                code |= PRC_STATUS_ERR;
1620        if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1621                code |= PRC_PBC;
1622        if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1623                code |= PRC_THRESHOLD;
1624        if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1625                code |= PRC_FILL_ERR;
1626        if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1627                code |= PRC_SC_DISABLE;
1628        return code;
1629}
1630
1631/* use the jiffies compare to get the wrap right */
1632#define sent_before(a, b) time_before(a, b)     /* a < b */
1633
1634/*
1635 * The send context buffer "releaser".
1636 */
1637void sc_release_update(struct send_context *sc)
1638{
1639        struct pio_buf *pbuf;
1640        u64 hw_free;
1641        u32 head, tail;
1642        unsigned long old_free;
1643        unsigned long free;
1644        unsigned long extra;
1645        unsigned long flags;
1646        int code;
1647
1648        if (!sc)
1649                return;
1650
1651        spin_lock_irqsave(&sc->release_lock, flags);
1652        /* update free */
1653        hw_free = le64_to_cpu(*sc->hw_free);            /* volatile read */
1654        old_free = sc->free;
1655        extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1656                        - (old_free & CR_COUNTER_MASK))
1657                                & CR_COUNTER_MASK;
1658        free = old_free + extra;
1659        trace_hfi1_piofree(sc, extra);
1660
1661        /* call sent buffer callbacks */
1662        code = -1;                              /* code not yet set */
1663        head = ACCESS_ONCE(sc->sr_head);        /* snapshot the head */
1664        tail = sc->sr_tail;
1665        while (head != tail) {
1666                pbuf = &sc->sr[tail].pbuf;
1667
1668                if (sent_before(free, pbuf->sent_at)) {
1669                        /* not sent yet */
1670                        break;
1671                }
1672                if (pbuf->cb) {
1673                        if (code < 0) /* fill in code on first user */
1674                                code = fill_code(hw_free);
1675                        (*pbuf->cb)(pbuf->arg, code);
1676                }
1677
1678                tail++;
1679                if (tail >= sc->sr_size)
1680                        tail = 0;
1681        }
1682        sc->sr_tail = tail;
1683        /* make sure tail is updated before free */
1684        smp_wmb();
1685        sc->free = free;
1686        spin_unlock_irqrestore(&sc->release_lock, flags);
1687        sc_piobufavail(sc);
1688}
1689
1690/*
1691 * Send context group releaser.  Argument is the send context that caused
1692 * the interrupt.  Called from the send context interrupt handler.
1693 *
1694 * Call release on all contexts in the group.
1695 *
1696 * This routine takes the sc_lock without an irqsave because it is only
1697 * called from an interrupt handler.  Adjust if that changes.
1698 */
1699void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1700{
1701        struct send_context *sc;
1702        u32 sw_index;
1703        u32 gc, gc_end;
1704
1705        spin_lock(&dd->sc_lock);
1706        sw_index = dd->hw_to_sw[hw_context];
1707        if (unlikely(sw_index >= dd->num_send_contexts)) {
1708                dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1709                           __func__, hw_context, sw_index);
1710                goto done;
1711        }
1712        sc = dd->send_contexts[sw_index].sc;
1713        if (unlikely(!sc))
1714                goto done;
1715
1716        gc = group_context(hw_context, sc->group);
1717        gc_end = gc + group_size(sc->group);
1718        for (; gc < gc_end; gc++) {
1719                sw_index = dd->hw_to_sw[gc];
1720                if (unlikely(sw_index >= dd->num_send_contexts)) {
1721                        dd_dev_err(dd,
1722                                   "%s: invalid hw (%u) to sw (%u) mapping\n",
1723                                   __func__, hw_context, sw_index);
1724                        continue;
1725                }
1726                sc_release_update(dd->send_contexts[sw_index].sc);
1727        }
1728done:
1729        spin_unlock(&dd->sc_lock);
1730}
1731
1732/*
1733 * pio_select_send_context_vl() - select send context
1734 * @dd: devdata
1735 * @selector: a spreading factor
1736 * @vl: this vl
1737 *
1738 * This function returns a send context based on the selector and a vl.
1739 * The mapping fields are protected by RCU
1740 */
1741struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1742                                                u32 selector, u8 vl)
1743{
1744        struct pio_vl_map *m;
1745        struct pio_map_elem *e;
1746        struct send_context *rval;
1747
1748        /*
1749         * NOTE This should only happen if SC->VL changed after the initial
1750         * checks on the QP/AH
1751         * Default will return VL0's send context below
1752         */
1753        if (unlikely(vl >= num_vls)) {
1754                rval = NULL;
1755                goto done;
1756        }
1757
1758        rcu_read_lock();
1759        m = rcu_dereference(dd->pio_map);
1760        if (unlikely(!m)) {
1761                rcu_read_unlock();
1762                return dd->vld[0].sc;
1763        }
1764        e = m->map[vl & m->mask];
1765        rval = e->ksc[selector & e->mask];
1766        rcu_read_unlock();
1767
1768done:
1769        rval = !rval ? dd->vld[0].sc : rval;
1770        return rval;
1771}
1772
1773/*
1774 * pio_select_send_context_sc() - select send context
1775 * @dd: devdata
1776 * @selector: a spreading factor
1777 * @sc5: the 5 bit sc
1778 *
1779 * This function returns an send context based on the selector and an sc
1780 */
1781struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1782                                                u32 selector, u8 sc5)
1783{
1784        u8 vl = sc_to_vlt(dd, sc5);
1785
1786        return pio_select_send_context_vl(dd, selector, vl);
1787}
1788
1789/*
1790 * Free the indicated map struct
1791 */
1792static void pio_map_free(struct pio_vl_map *m)
1793{
1794        int i;
1795
1796        for (i = 0; m && i < m->actual_vls; i++)
1797                kfree(m->map[i]);
1798        kfree(m);
1799}
1800
1801/*
1802 * Handle RCU callback
1803 */
1804static void pio_map_rcu_callback(struct rcu_head *list)
1805{
1806        struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1807
1808        pio_map_free(m);
1809}
1810
1811/*
1812 * Set credit return threshold for the kernel send context
1813 */
1814static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1815{
1816        u32 thres;
1817
1818        thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1819                                            50),
1820                    sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1821                                        dd->vld[i].mtu,
1822                                        dd->rcd[0]->rcvhdrqentsize));
1823        sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1824}
1825
1826/*
1827 * pio_map_init - called when #vls change
1828 * @dd: hfi1_devdata
1829 * @port: port number
1830 * @num_vls: number of vls
1831 * @vl_scontexts: per vl send context mapping (optional)
1832 *
1833 * This routine changes the mapping based on the number of vls.
1834 *
1835 * vl_scontexts is used to specify a non-uniform vl/send context
1836 * loading. NULL implies auto computing the loading and giving each
1837 * VL an uniform distribution of send contexts per VL.
1838 *
1839 * The auto algorithm computers the sc_per_vl and the number of extra
1840 * send contexts. Any extra send contexts are added from the last VL
1841 * on down
1842 *
1843 * rcu locking is used here to control access to the mapping fields.
1844 *
1845 * If either the num_vls or num_send_contexts are non-power of 2, the
1846 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1847 * rounded up to the next highest power of 2 and the first entry is
1848 * reused in a round robin fashion.
1849 *
1850 * If an error occurs the map change is not done and the mapping is not
1851 * chaged.
1852 *
1853 */
1854int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1855{
1856        int i, j;
1857        int extra, sc_per_vl;
1858        int scontext = 1;
1859        int num_kernel_send_contexts = 0;
1860        u8 lvl_scontexts[OPA_MAX_VLS];
1861        struct pio_vl_map *oldmap, *newmap;
1862
1863        if (!vl_scontexts) {
1864                for (i = 0; i < dd->num_send_contexts; i++)
1865                        if (dd->send_contexts[i].type == SC_KERNEL)
1866                                num_kernel_send_contexts++;
1867                /* truncate divide */
1868                sc_per_vl = num_kernel_send_contexts / num_vls;
1869                /* extras */
1870                extra = num_kernel_send_contexts % num_vls;
1871                vl_scontexts = lvl_scontexts;
1872                /* add extras from last vl down */
1873                for (i = num_vls - 1; i >= 0; i--, extra--)
1874                        vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1875        }
1876        /* build new map */
1877        newmap = kzalloc(sizeof(*newmap) +
1878                         roundup_pow_of_two(num_vls) *
1879                         sizeof(struct pio_map_elem *),
1880                         GFP_KERNEL);
1881        if (!newmap)
1882                goto bail;
1883        newmap->actual_vls = num_vls;
1884        newmap->vls = roundup_pow_of_two(num_vls);
1885        newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1886        for (i = 0; i < newmap->vls; i++) {
1887                /* save for wrap around */
1888                int first_scontext = scontext;
1889
1890                if (i < newmap->actual_vls) {
1891                        int sz = roundup_pow_of_two(vl_scontexts[i]);
1892
1893                        /* only allocate once */
1894                        newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1895                                                 sz * sizeof(struct
1896                                                             send_context *),
1897                                                 GFP_KERNEL);
1898                        if (!newmap->map[i])
1899                                goto bail;
1900                        newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1901                        /*
1902                         * assign send contexts and
1903                         * adjust credit return threshold
1904                         */
1905                        for (j = 0; j < sz; j++) {
1906                                if (dd->kernel_send_context[scontext]) {
1907                                        newmap->map[i]->ksc[j] =
1908                                        dd->kernel_send_context[scontext];
1909                                        set_threshold(dd, scontext, i);
1910                                }
1911                                if (++scontext >= first_scontext +
1912                                                  vl_scontexts[i])
1913                                        /* wrap back to first send context */
1914                                        scontext = first_scontext;
1915                        }
1916                } else {
1917                        /* just re-use entry without allocating */
1918                        newmap->map[i] = newmap->map[i % num_vls];
1919                }
1920                scontext = first_scontext + vl_scontexts[i];
1921        }
1922        /* newmap in hand, save old map */
1923        spin_lock_irq(&dd->pio_map_lock);
1924        oldmap = rcu_dereference_protected(dd->pio_map,
1925                                           lockdep_is_held(&dd->pio_map_lock));
1926
1927        /* publish newmap */
1928        rcu_assign_pointer(dd->pio_map, newmap);
1929
1930        spin_unlock_irq(&dd->pio_map_lock);
1931        /* success, free any old map after grace period */
1932        if (oldmap)
1933                call_rcu(&oldmap->list, pio_map_rcu_callback);
1934        return 0;
1935bail:
1936        /* free any partial allocation */
1937        pio_map_free(newmap);
1938        return -ENOMEM;
1939}
1940
1941void free_pio_map(struct hfi1_devdata *dd)
1942{
1943        /* Free PIO map if allocated */
1944        if (rcu_access_pointer(dd->pio_map)) {
1945                spin_lock_irq(&dd->pio_map_lock);
1946                pio_map_free(rcu_access_pointer(dd->pio_map));
1947                RCU_INIT_POINTER(dd->pio_map, NULL);
1948                spin_unlock_irq(&dd->pio_map_lock);
1949                synchronize_rcu();
1950        }
1951        kfree(dd->kernel_send_context);
1952        dd->kernel_send_context = NULL;
1953}
1954
1955int init_pervl_scs(struct hfi1_devdata *dd)
1956{
1957        int i;
1958        u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
1959        u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
1960        u32 ctxt;
1961        struct hfi1_pportdata *ppd = dd->pport;
1962
1963        dd->vld[15].sc = sc_alloc(dd, SC_VL15,
1964                                  dd->rcd[0]->rcvhdrqentsize, dd->node);
1965        if (!dd->vld[15].sc)
1966                return -ENOMEM;
1967
1968        hfi1_init_ctxt(dd->vld[15].sc);
1969        dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1970
1971        dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
1972                                        sizeof(struct send_context *),
1973                                        GFP_KERNEL, dd->node);
1974        if (!dd->kernel_send_context)
1975                goto freesc15;
1976
1977        dd->kernel_send_context[0] = dd->vld[15].sc;
1978
1979        for (i = 0; i < num_vls; i++) {
1980                /*
1981                 * Since this function does not deal with a specific
1982                 * receive context but we need the RcvHdrQ entry size,
1983                 * use the size from rcd[0]. It is guaranteed to be
1984                 * valid at this point and will remain the same for all
1985                 * receive contexts.
1986                 */
1987                dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
1988                                         dd->rcd[0]->rcvhdrqentsize, dd->node);
1989                if (!dd->vld[i].sc)
1990                        goto nomem;
1991                dd->kernel_send_context[i + 1] = dd->vld[i].sc;
1992                hfi1_init_ctxt(dd->vld[i].sc);
1993                /* non VL15 start with the max MTU */
1994                dd->vld[i].mtu = hfi1_max_mtu;
1995        }
1996        for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
1997                dd->kernel_send_context[i + 1] =
1998                sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
1999                if (!dd->kernel_send_context[i + 1])
2000                        goto nomem;
2001                hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
2002        }
2003
2004        sc_enable(dd->vld[15].sc);
2005        ctxt = dd->vld[15].sc->hw_context;
2006        mask = all_vl_mask & ~(1LL << 15);
2007        write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2008        dd_dev_info(dd,
2009                    "Using send context %u(%u) for VL15\n",
2010                    dd->vld[15].sc->sw_index, ctxt);
2011
2012        for (i = 0; i < num_vls; i++) {
2013                sc_enable(dd->vld[i].sc);
2014                ctxt = dd->vld[i].sc->hw_context;
2015                mask = all_vl_mask & ~(data_vls_mask);
2016                write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2017        }
2018        for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2019                sc_enable(dd->kernel_send_context[i + 1]);
2020                ctxt = dd->kernel_send_context[i + 1]->hw_context;
2021                mask = all_vl_mask & ~(data_vls_mask);
2022                write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2023        }
2024
2025        if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2026                goto nomem;
2027        return 0;
2028
2029nomem:
2030        for (i = 0; i < num_vls; i++) {
2031                sc_free(dd->vld[i].sc);
2032                dd->vld[i].sc = NULL;
2033        }
2034
2035        for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2036                sc_free(dd->kernel_send_context[i + 1]);
2037
2038        kfree(dd->kernel_send_context);
2039        dd->kernel_send_context = NULL;
2040
2041freesc15:
2042        sc_free(dd->vld[15].sc);
2043        return -ENOMEM;
2044}
2045
2046int init_credit_return(struct hfi1_devdata *dd)
2047{
2048        int ret;
2049        int i;
2050
2051        dd->cr_base = kcalloc(
2052                node_affinity.num_possible_nodes,
2053                sizeof(struct credit_return_base),
2054                GFP_KERNEL);
2055        if (!dd->cr_base) {
2056                ret = -ENOMEM;
2057                goto done;
2058        }
2059        for_each_node_with_cpus(i) {
2060                int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2061
2062                set_dev_node(&dd->pcidev->dev, i);
2063                dd->cr_base[i].va = dma_zalloc_coherent(
2064                                        &dd->pcidev->dev,
2065                                        bytes,
2066                                        &dd->cr_base[i].dma,
2067                                        GFP_KERNEL);
2068                if (!dd->cr_base[i].va) {
2069                        set_dev_node(&dd->pcidev->dev, dd->node);
2070                        dd_dev_err(dd,
2071                                   "Unable to allocate credit return DMA range for NUMA %d\n",
2072                                   i);
2073                        ret = -ENOMEM;
2074                        goto done;
2075                }
2076        }
2077        set_dev_node(&dd->pcidev->dev, dd->node);
2078
2079        ret = 0;
2080done:
2081        return ret;
2082}
2083
2084void free_credit_return(struct hfi1_devdata *dd)
2085{
2086        int i;
2087
2088        if (!dd->cr_base)
2089                return;
2090        for (i = 0; i < node_affinity.num_possible_nodes; i++) {
2091                if (dd->cr_base[i].va) {
2092                        dma_free_coherent(&dd->pcidev->dev,
2093                                          TXE_NUM_CONTEXTS *
2094                                          sizeof(struct credit_return),
2095                                          dd->cr_base[i].va,
2096                                          dd->cr_base[i].dma);
2097                }
2098        }
2099        kfree(dd->cr_base);
2100        dd->cr_base = NULL;
2101}
2102