linux/drivers/infiniband/hw/hfi1/sdma.h
<<
>>
Prefs
   1#ifndef _HFI1_SDMA_H
   2#define _HFI1_SDMA_H
   3/*
   4 * Copyright(c) 2015, 2016 Intel Corporation.
   5 *
   6 * This file is provided under a dual BSD/GPLv2 license.  When using or
   7 * redistributing this file, you may do so under either license.
   8 *
   9 * GPL LICENSE SUMMARY
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of version 2 of the GNU General Public License as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * BSD LICENSE
  21 *
  22 * Redistribution and use in source and binary forms, with or without
  23 * modification, are permitted provided that the following conditions
  24 * are met:
  25 *
  26 *  - Redistributions of source code must retain the above copyright
  27 *    notice, this list of conditions and the following disclaimer.
  28 *  - Redistributions in binary form must reproduce the above copyright
  29 *    notice, this list of conditions and the following disclaimer in
  30 *    the documentation and/or other materials provided with the
  31 *    distribution.
  32 *  - Neither the name of Intel Corporation nor the names of its
  33 *    contributors may be used to endorse or promote products derived
  34 *    from this software without specific prior written permission.
  35 *
  36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  47 *
  48 */
  49
  50#include <linux/types.h>
  51#include <linux/list.h>
  52#include <asm/byteorder.h>
  53#include <linux/workqueue.h>
  54#include <linux/rculist.h>
  55
  56#include "hfi.h"
  57#include "verbs.h"
  58#include "sdma_txreq.h"
  59
  60/* Hardware limit */
  61#define MAX_DESC 64
  62/* Hardware limit for SDMA packet size */
  63#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
  64
  65#define SDMA_TXREQ_S_OK        0
  66#define SDMA_TXREQ_S_SENDERROR 1
  67#define SDMA_TXREQ_S_ABORTED   2
  68#define SDMA_TXREQ_S_SHUTDOWN  3
  69
  70/* flags bits */
  71#define SDMA_TXREQ_F_URGENT       0x0001
  72#define SDMA_TXREQ_F_AHG_COPY     0x0002
  73#define SDMA_TXREQ_F_USE_AHG      0x0004
  74
  75#define SDMA_MAP_NONE          0
  76#define SDMA_MAP_SINGLE        1
  77#define SDMA_MAP_PAGE          2
  78
  79#define SDMA_AHG_VALUE_MASK          0xffff
  80#define SDMA_AHG_VALUE_SHIFT         0
  81#define SDMA_AHG_INDEX_MASK          0xf
  82#define SDMA_AHG_INDEX_SHIFT         16
  83#define SDMA_AHG_FIELD_LEN_MASK      0xf
  84#define SDMA_AHG_FIELD_LEN_SHIFT     20
  85#define SDMA_AHG_FIELD_START_MASK    0x1f
  86#define SDMA_AHG_FIELD_START_SHIFT   24
  87#define SDMA_AHG_UPDATE_ENABLE_MASK  0x1
  88#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
  89
  90/* AHG modes */
  91
  92/*
  93 * Be aware the ordering and values
  94 * for SDMA_AHG_APPLY_UPDATE[123]
  95 * are assumed in generating a skip
  96 * count in submit_tx() in sdma.c
  97 */
  98#define SDMA_AHG_NO_AHG              0
  99#define SDMA_AHG_COPY                1
 100#define SDMA_AHG_APPLY_UPDATE1       2
 101#define SDMA_AHG_APPLY_UPDATE2       3
 102#define SDMA_AHG_APPLY_UPDATE3       4
 103
 104/*
 105 * Bits defined in the send DMA descriptor.
 106 */
 107#define SDMA_DESC0_FIRST_DESC_FLAG      BIT_ULL(63)
 108#define SDMA_DESC0_LAST_DESC_FLAG       BIT_ULL(62)
 109#define SDMA_DESC0_BYTE_COUNT_SHIFT     48
 110#define SDMA_DESC0_BYTE_COUNT_WIDTH     14
 111#define SDMA_DESC0_BYTE_COUNT_MASK \
 112        ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
 113#define SDMA_DESC0_BYTE_COUNT_SMASK \
 114        (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
 115#define SDMA_DESC0_PHY_ADDR_SHIFT       0
 116#define SDMA_DESC0_PHY_ADDR_WIDTH       48
 117#define SDMA_DESC0_PHY_ADDR_MASK \
 118        ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
 119#define SDMA_DESC0_PHY_ADDR_SMASK \
 120        (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
 121
 122#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
 123#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
 124#define SDMA_DESC1_HEADER_UPDATE1_MASK \
 125        ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
 126#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
 127        (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
 128#define SDMA_DESC1_HEADER_MODE_SHIFT    13
 129#define SDMA_DESC1_HEADER_MODE_WIDTH    3
 130#define SDMA_DESC1_HEADER_MODE_MASK \
 131        ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
 132#define SDMA_DESC1_HEADER_MODE_SMASK \
 133        (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
 134#define SDMA_DESC1_HEADER_INDEX_SHIFT   8
 135#define SDMA_DESC1_HEADER_INDEX_WIDTH   5
 136#define SDMA_DESC1_HEADER_INDEX_MASK \
 137        ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
 138#define SDMA_DESC1_HEADER_INDEX_SMASK \
 139        (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
 140#define SDMA_DESC1_HEADER_DWS_SHIFT     4
 141#define SDMA_DESC1_HEADER_DWS_WIDTH     4
 142#define SDMA_DESC1_HEADER_DWS_MASK \
 143        ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
 144#define SDMA_DESC1_HEADER_DWS_SMASK \
 145        (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
 146#define SDMA_DESC1_GENERATION_SHIFT     2
 147#define SDMA_DESC1_GENERATION_WIDTH     2
 148#define SDMA_DESC1_GENERATION_MASK \
 149        ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
 150#define SDMA_DESC1_GENERATION_SMASK \
 151        (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
 152#define SDMA_DESC1_INT_REQ_FLAG         BIT_ULL(1)
 153#define SDMA_DESC1_HEAD_TO_HOST_FLAG    BIT_ULL(0)
 154
 155enum sdma_states {
 156        sdma_state_s00_hw_down,
 157        sdma_state_s10_hw_start_up_halt_wait,
 158        sdma_state_s15_hw_start_up_clean_wait,
 159        sdma_state_s20_idle,
 160        sdma_state_s30_sw_clean_up_wait,
 161        sdma_state_s40_hw_clean_up_wait,
 162        sdma_state_s50_hw_halt_wait,
 163        sdma_state_s60_idle_halt_wait,
 164        sdma_state_s80_hw_freeze,
 165        sdma_state_s82_freeze_sw_clean,
 166        sdma_state_s99_running,
 167};
 168
 169enum sdma_events {
 170        sdma_event_e00_go_hw_down,
 171        sdma_event_e10_go_hw_start,
 172        sdma_event_e15_hw_halt_done,
 173        sdma_event_e25_hw_clean_up_done,
 174        sdma_event_e30_go_running,
 175        sdma_event_e40_sw_cleaned,
 176        sdma_event_e50_hw_cleaned,
 177        sdma_event_e60_hw_halted,
 178        sdma_event_e70_go_idle,
 179        sdma_event_e80_hw_freeze,
 180        sdma_event_e81_hw_frozen,
 181        sdma_event_e82_hw_unfreeze,
 182        sdma_event_e85_link_down,
 183        sdma_event_e90_sw_halted,
 184};
 185
 186struct sdma_set_state_action {
 187        unsigned op_enable:1;
 188        unsigned op_intenable:1;
 189        unsigned op_halt:1;
 190        unsigned op_cleanup:1;
 191        unsigned go_s99_running_tofalse:1;
 192        unsigned go_s99_running_totrue:1;
 193};
 194
 195struct sdma_state {
 196        struct kref          kref;
 197        struct completion    comp;
 198        enum sdma_states current_state;
 199        unsigned             current_op;
 200        unsigned             go_s99_running;
 201        /* debugging/development */
 202        enum sdma_states previous_state;
 203        unsigned             previous_op;
 204        enum sdma_events last_event;
 205};
 206
 207/**
 208 * DOC: sdma exported routines
 209 *
 210 * These sdma routines fit into three categories:
 211 * - The SDMA API for building and submitting packets
 212 *   to the ring
 213 *
 214 * - Initialization and tear down routines to buildup
 215 *   and tear down SDMA
 216 *
 217 * - ISR entrances to handle interrupts, state changes
 218 *   and errors
 219 */
 220
 221/**
 222 * DOC: sdma PSM/verbs API
 223 *
 224 * The sdma API is designed to be used by both PSM
 225 * and verbs to supply packets to the SDMA ring.
 226 *
 227 * The usage of the API is as follows:
 228 *
 229 * Embed a struct iowait in the QP or
 230 * PQ.  The iowait should be initialized with a
 231 * call to iowait_init().
 232 *
 233 * The user of the API should create an allocation method
 234 * for their version of the txreq. slabs, pre-allocated lists,
 235 * and dma pools can be used.  Once the user's overload of
 236 * the sdma_txreq has been allocated, the sdma_txreq member
 237 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
 238 *
 239 * The txreq must be declared with the sdma_txreq first.
 240 *
 241 * The tx request, once initialized,  is manipulated with calls to
 242 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
 243 * for each disjoint memory location.  It is the user's responsibility
 244 * to understand the packet boundaries and page boundaries to do the
 245 * appropriate number of sdma_txadd_* calls..  The user
 246 * must be prepared to deal with failures from these routines due to
 247 * either memory allocation or dma_mapping failures.
 248 *
 249 * The mapping specifics for each memory location are recorded
 250 * in the tx. Memory locations added with sdma_txadd_page()
 251 * and sdma_txadd_kvaddr() are automatically mapped when added
 252 * to the tx and nmapped as part of the progress processing in the
 253 * SDMA interrupt handling.
 254 *
 255 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
 256 * tx.   An example of a use case would be a pre-allocated
 257 * set of headers allocated via dma_pool_alloc() or
 258 * dma_alloc_coherent().  For these memory locations, it
 259 * is the responsibility of the user to handle that unmapping.
 260 * (This would usually be at an unload or job termination.)
 261 *
 262 * The routine sdma_send_txreq() is used to submit
 263 * a tx to the ring after the appropriate number of
 264 * sdma_txadd_* have been done.
 265 *
 266 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
 267 * can be used to submit a list of packets.
 268 *
 269 * The user is free to use the link overhead in the struct sdma_txreq as
 270 * long as the tx isn't in flight.
 271 *
 272 * The extreme degenerate case of the number of descriptors
 273 * exceeding the ring size is automatically handled as
 274 * memory locations are added.  An overflow of the descriptor
 275 * array that is part of the sdma_txreq is also automatically
 276 * handled.
 277 *
 278 */
 279
 280/**
 281 * DOC: Infrastructure calls
 282 *
 283 * sdma_init() is used to initialize data structures and
 284 * CSRs for the desired number of SDMA engines.
 285 *
 286 * sdma_start() is used to kick the SDMA engines initialized
 287 * with sdma_init().   Interrupts must be enabled at this
 288 * point since aspects of the state machine are interrupt
 289 * driven.
 290 *
 291 * sdma_engine_error() and sdma_engine_interrupt() are
 292 * entrances for interrupts.
 293 *
 294 * sdma_map_init() is for the management of the mapping
 295 * table when the number of vls is changed.
 296 *
 297 */
 298
 299/*
 300 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
 301 *
 302 * This is the raw descriptor in the SDMA ring
 303 */
 304struct hw_sdma_desc {
 305        /* private:  don't use directly */
 306        __le64 qw[2];
 307};
 308
 309/**
 310 * struct sdma_engine - Data pertaining to each SDMA engine.
 311 * @dd: a back-pointer to the device data
 312 * @ppd: per port back-pointer
 313 * @imask: mask for irq manipulation
 314 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
 315 *
 316 * This structure has the state for each sdma_engine.
 317 *
 318 * Accessing to non public fields are not supported
 319 * since the private members are subject to change.
 320 */
 321struct sdma_engine {
 322        /* read mostly */
 323        struct hfi1_devdata *dd;
 324        struct hfi1_pportdata *ppd;
 325        /* private: */
 326        void __iomem *tail_csr;
 327        u64 imask;                      /* clear interrupt mask */
 328        u64 idle_mask;
 329        u64 progress_mask;
 330        u64 int_mask;
 331        /* private: */
 332        volatile __le64      *head_dma; /* DMA'ed by chip */
 333        /* private: */
 334        dma_addr_t            head_phys;
 335        /* private: */
 336        struct hw_sdma_desc *descq;
 337        /* private: */
 338        unsigned descq_full_count;
 339        struct sdma_txreq **tx_ring;
 340        /* private: */
 341        dma_addr_t            descq_phys;
 342        /* private */
 343        u32 sdma_mask;
 344        /* private */
 345        struct sdma_state state;
 346        /* private */
 347        int cpu;
 348        /* private: */
 349        u8 sdma_shift;
 350        /* private: */
 351        u8 this_idx; /* zero relative engine */
 352        /* protect changes to senddmactrl shadow */
 353        spinlock_t senddmactrl_lock;
 354        /* private: */
 355        u64 p_senddmactrl;              /* shadow per-engine SendDmaCtrl */
 356
 357        /* read/write using tail_lock */
 358        spinlock_t            tail_lock ____cacheline_aligned_in_smp;
 359#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
 360        /* private: */
 361        u64                   tail_sn;
 362#endif
 363        /* private: */
 364        u32                   descq_tail;
 365        /* private: */
 366        unsigned long         ahg_bits;
 367        /* private: */
 368        u16                   desc_avail;
 369        /* private: */
 370        u16                   tx_tail;
 371        /* private: */
 372        u16 descq_cnt;
 373
 374        /* read/write using head_lock */
 375        /* private: */
 376        seqlock_t            head_lock ____cacheline_aligned_in_smp;
 377#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
 378        /* private: */
 379        u64                   head_sn;
 380#endif
 381        /* private: */
 382        u32                   descq_head;
 383        /* private: */
 384        u16                   tx_head;
 385        /* private: */
 386        u64                   last_status;
 387        /* private */
 388        u64                     err_cnt;
 389        /* private */
 390        u64                     sdma_int_cnt;
 391        u64                     idle_int_cnt;
 392        u64                     progress_int_cnt;
 393
 394        /* private: */
 395        struct list_head      dmawait;
 396
 397        /* CONFIG SDMA for now, just blindly duplicate */
 398        /* private: */
 399        struct tasklet_struct sdma_hw_clean_up_task
 400                ____cacheline_aligned_in_smp;
 401
 402        /* private: */
 403        struct tasklet_struct sdma_sw_clean_up_task
 404                ____cacheline_aligned_in_smp;
 405        /* private: */
 406        struct work_struct err_halt_worker;
 407        /* private */
 408        struct timer_list     err_progress_check_timer;
 409        u32                   progress_check_head;
 410        /* private: */
 411        struct work_struct flush_worker;
 412        /* protect flush list */
 413        spinlock_t flushlist_lock;
 414        /* private: */
 415        struct list_head flushlist;
 416        struct cpumask cpu_mask;
 417        struct kobject kobj;
 418};
 419
 420int sdma_init(struct hfi1_devdata *dd, u8 port);
 421void sdma_start(struct hfi1_devdata *dd);
 422void sdma_exit(struct hfi1_devdata *dd);
 423void sdma_all_running(struct hfi1_devdata *dd);
 424void sdma_all_idle(struct hfi1_devdata *dd);
 425void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
 426void sdma_freeze(struct hfi1_devdata *dd);
 427void sdma_unfreeze(struct hfi1_devdata *dd);
 428void sdma_wait(struct hfi1_devdata *dd);
 429
 430/**
 431 * sdma_empty() - idle engine test
 432 * @engine: sdma engine
 433 *
 434 * Currently used by verbs as a latency optimization.
 435 *
 436 * Return:
 437 * 1 - empty, 0 - non-empty
 438 */
 439static inline int sdma_empty(struct sdma_engine *sde)
 440{
 441        return sde->descq_tail == sde->descq_head;
 442}
 443
 444static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
 445{
 446        return sde->descq_cnt -
 447                (sde->descq_tail -
 448                 ACCESS_ONCE(sde->descq_head)) - 1;
 449}
 450
 451static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
 452{
 453        return sde->descq_cnt - sdma_descq_freecnt(sde);
 454}
 455
 456/*
 457 * Either head_lock or tail lock required to see
 458 * a steady state.
 459 */
 460static inline int __sdma_running(struct sdma_engine *engine)
 461{
 462        return engine->state.current_state == sdma_state_s99_running;
 463}
 464
 465/**
 466 * sdma_running() - state suitability test
 467 * @engine: sdma engine
 468 *
 469 * sdma_running probes the internal state to determine if it is suitable
 470 * for submitting packets.
 471 *
 472 * Return:
 473 * 1 - ok to submit, 0 - not ok to submit
 474 *
 475 */
 476static inline int sdma_running(struct sdma_engine *engine)
 477{
 478        unsigned long flags;
 479        int ret;
 480
 481        spin_lock_irqsave(&engine->tail_lock, flags);
 482        ret = __sdma_running(engine);
 483        spin_unlock_irqrestore(&engine->tail_lock, flags);
 484        return ret;
 485}
 486
 487void _sdma_txreq_ahgadd(
 488        struct sdma_txreq *tx,
 489        u8 num_ahg,
 490        u8 ahg_entry,
 491        u32 *ahg,
 492        u8 ahg_hlen);
 493
 494/**
 495 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
 496 * @tx: tx request to initialize
 497 * @flags: flags to key last descriptor additions
 498 * @tlen: total packet length (pbc + headers + data)
 499 * @ahg_entry: ahg entry to use  (0 - 31)
 500 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
 501 * @ahg: array of AHG descriptors (up to 9 entries)
 502 * @ahg_hlen: number of bytes from ASIC entry to use
 503 * @cb: callback
 504 *
 505 * The allocation of the sdma_txreq and it enclosing structure is user
 506 * dependent.  This routine must be called to initialize the user independent
 507 * fields.
 508 *
 509 * The currently supported flags are SDMA_TXREQ_F_URGENT,
 510 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
 511 *
 512 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
 513 * completion is desired as soon as possible.
 514 *
 515 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
 516 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
 517 * the AHG descriptors into the first 1 to 3 descriptors.
 518 *
 519 * Completions of submitted requests can be gotten on selected
 520 * txreqs by giving a completion routine callback to sdma_txinit() or
 521 * sdma_txinit_ahg().  The environment in which the callback runs
 522 * can be from an ISR, a tasklet, or a thread, so no sleeping
 523 * kernel routines can be used.   Aspects of the sdma ring may
 524 * be locked so care should be taken with locking.
 525 *
 526 * The callback pointer can be NULL to avoid any callback for the packet
 527 * being submitted. The callback will be provided this tx, a status, and a flag.
 528 *
 529 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
 530 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
 531 *
 532 * The flag, if the is the iowait had been used, indicates the iowait
 533 * sdma_busy count has reached zero.
 534 *
 535 * user data portion of tlen should be precise.   The sdma_txadd_* entrances
 536 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
 537 * specified in tlen have been supplied to the sdma_txreq.
 538 *
 539 * ahg_hlen is used to determine the number of on-chip entry bytes to
 540 * use as the header.   This is for cases where the stored header is
 541 * larger than the header to be used in a packet.  This is typical
 542 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
 543 * and RDMA_WRITE_MIDDLE.
 544 *
 545 */
 546static inline int sdma_txinit_ahg(
 547        struct sdma_txreq *tx,
 548        u16 flags,
 549        u16 tlen,
 550        u8 ahg_entry,
 551        u8 num_ahg,
 552        u32 *ahg,
 553        u8 ahg_hlen,
 554        void (*cb)(struct sdma_txreq *, int))
 555{
 556        if (tlen == 0)
 557                return -ENODATA;
 558        if (tlen > MAX_SDMA_PKT_SIZE)
 559                return -EMSGSIZE;
 560        tx->desc_limit = ARRAY_SIZE(tx->descs);
 561        tx->descp = &tx->descs[0];
 562        INIT_LIST_HEAD(&tx->list);
 563        tx->num_desc = 0;
 564        tx->flags = flags;
 565        tx->complete = cb;
 566        tx->coalesce_buf = NULL;
 567        tx->wait = NULL;
 568        tx->packet_len = tlen;
 569        tx->tlen = tx->packet_len;
 570        tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
 571        tx->descs[0].qw[1] = 0;
 572        if (flags & SDMA_TXREQ_F_AHG_COPY)
 573                tx->descs[0].qw[1] |=
 574                        (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
 575                                << SDMA_DESC1_HEADER_INDEX_SHIFT) |
 576                        (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
 577                                << SDMA_DESC1_HEADER_MODE_SHIFT);
 578        else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
 579                _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
 580        return 0;
 581}
 582
 583/**
 584 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
 585 * @tx: tx request to initialize
 586 * @flags: flags to key last descriptor additions
 587 * @tlen: total packet length (pbc + headers + data)
 588 * @cb: callback pointer
 589 *
 590 * The allocation of the sdma_txreq and it enclosing structure is user
 591 * dependent.  This routine must be called to initialize the user
 592 * independent fields.
 593 *
 594 * The currently supported flags is SDMA_TXREQ_F_URGENT.
 595 *
 596 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
 597 * completion is desired as soon as possible.
 598 *
 599 * Completions of submitted requests can be gotten on selected
 600 * txreqs by giving a completion routine callback to sdma_txinit() or
 601 * sdma_txinit_ahg().  The environment in which the callback runs
 602 * can be from an ISR, a tasklet, or a thread, so no sleeping
 603 * kernel routines can be used.   The head size of the sdma ring may
 604 * be locked so care should be taken with locking.
 605 *
 606 * The callback pointer can be NULL to avoid any callback for the packet
 607 * being submitted.
 608 *
 609 * The callback, if non-NULL,  will be provided this tx and a status.  The
 610 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
 611 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
 612 *
 613 */
 614static inline int sdma_txinit(
 615        struct sdma_txreq *tx,
 616        u16 flags,
 617        u16 tlen,
 618        void (*cb)(struct sdma_txreq *, int))
 619{
 620        return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
 621}
 622
 623/* helpers - don't use */
 624static inline int sdma_mapping_type(struct sdma_desc *d)
 625{
 626        return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
 627                >> SDMA_DESC1_GENERATION_SHIFT;
 628}
 629
 630static inline size_t sdma_mapping_len(struct sdma_desc *d)
 631{
 632        return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
 633                >> SDMA_DESC0_BYTE_COUNT_SHIFT;
 634}
 635
 636static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
 637{
 638        return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
 639                >> SDMA_DESC0_PHY_ADDR_SHIFT;
 640}
 641
 642static inline void make_tx_sdma_desc(
 643        struct sdma_txreq *tx,
 644        int type,
 645        dma_addr_t addr,
 646        size_t len)
 647{
 648        struct sdma_desc *desc = &tx->descp[tx->num_desc];
 649
 650        if (!tx->num_desc) {
 651                /* qw[0] zero; qw[1] first, ahg mode already in from init */
 652                desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
 653                                << SDMA_DESC1_GENERATION_SHIFT;
 654        } else {
 655                desc->qw[0] = 0;
 656                desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
 657                                << SDMA_DESC1_GENERATION_SHIFT;
 658        }
 659        desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
 660                                << SDMA_DESC0_PHY_ADDR_SHIFT) |
 661                        (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
 662                                << SDMA_DESC0_BYTE_COUNT_SHIFT);
 663}
 664
 665/* helper to extend txreq */
 666int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
 667                           int type, void *kvaddr, struct page *page,
 668                           unsigned long offset, u16 len);
 669int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
 670void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
 671
 672static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 673{
 674        if (tx->num_desc)
 675                __sdma_txclean(dd, tx);
 676}
 677
 678/* helpers used by public routines */
 679static inline void _sdma_close_tx(struct hfi1_devdata *dd,
 680                                  struct sdma_txreq *tx)
 681{
 682        tx->descp[tx->num_desc].qw[0] |=
 683                SDMA_DESC0_LAST_DESC_FLAG;
 684        tx->descp[tx->num_desc].qw[1] |=
 685                dd->default_desc1;
 686        if (tx->flags & SDMA_TXREQ_F_URGENT)
 687                tx->descp[tx->num_desc].qw[1] |=
 688                        (SDMA_DESC1_HEAD_TO_HOST_FLAG |
 689                         SDMA_DESC1_INT_REQ_FLAG);
 690}
 691
 692static inline int _sdma_txadd_daddr(
 693        struct hfi1_devdata *dd,
 694        int type,
 695        struct sdma_txreq *tx,
 696        dma_addr_t addr,
 697        u16 len)
 698{
 699        int rval = 0;
 700
 701        make_tx_sdma_desc(
 702                tx,
 703                type,
 704                addr, len);
 705        WARN_ON(len > tx->tlen);
 706        tx->tlen -= len;
 707        /* special cases for last */
 708        if (!tx->tlen) {
 709                if (tx->packet_len & (sizeof(u32) - 1)) {
 710                        rval = _pad_sdma_tx_descs(dd, tx);
 711                        if (rval)
 712                                return rval;
 713                } else {
 714                        _sdma_close_tx(dd, tx);
 715                }
 716        }
 717        tx->num_desc++;
 718        return rval;
 719}
 720
 721/**
 722 * sdma_txadd_page() - add a page to the sdma_txreq
 723 * @dd: the device to use for mapping
 724 * @tx: tx request to which the page is added
 725 * @page: page to map
 726 * @offset: offset within the page
 727 * @len: length in bytes
 728 *
 729 * This is used to add a page/offset/length descriptor.
 730 *
 731 * The mapping/unmapping of the page/offset/len is automatically handled.
 732 *
 733 * Return:
 734 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
 735 * extend/coalesce descriptor array
 736 */
 737static inline int sdma_txadd_page(
 738        struct hfi1_devdata *dd,
 739        struct sdma_txreq *tx,
 740        struct page *page,
 741        unsigned long offset,
 742        u16 len)
 743{
 744        dma_addr_t addr;
 745        int rval;
 746
 747        if ((unlikely(tx->num_desc == tx->desc_limit))) {
 748                rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
 749                                              NULL, page, offset, len);
 750                if (rval <= 0)
 751                        return rval;
 752        }
 753
 754        addr = dma_map_page(
 755                       &dd->pcidev->dev,
 756                       page,
 757                       offset,
 758                       len,
 759                       DMA_TO_DEVICE);
 760
 761        if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
 762                __sdma_txclean(dd, tx);
 763                return -ENOSPC;
 764        }
 765
 766        return _sdma_txadd_daddr(
 767                        dd, SDMA_MAP_PAGE, tx, addr, len);
 768}
 769
 770/**
 771 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
 772 * @dd: the device to use for mapping
 773 * @tx: sdma_txreq to which the page is added
 774 * @addr: dma address mapped by caller
 775 * @len: length in bytes
 776 *
 777 * This is used to add a descriptor for memory that is already dma mapped.
 778 *
 779 * In this case, there is no unmapping as part of the progress processing for
 780 * this memory location.
 781 *
 782 * Return:
 783 * 0 - success, -ENOMEM - couldn't extend descriptor array
 784 */
 785
 786static inline int sdma_txadd_daddr(
 787        struct hfi1_devdata *dd,
 788        struct sdma_txreq *tx,
 789        dma_addr_t addr,
 790        u16 len)
 791{
 792        int rval;
 793
 794        if ((unlikely(tx->num_desc == tx->desc_limit))) {
 795                rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
 796                                              NULL, NULL, 0, 0);
 797                if (rval <= 0)
 798                        return rval;
 799        }
 800
 801        return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
 802}
 803
 804/**
 805 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
 806 * @dd: the device to use for mapping
 807 * @tx: sdma_txreq to which the page is added
 808 * @kvaddr: the kernel virtual address
 809 * @len: length in bytes
 810 *
 811 * This is used to add a descriptor referenced by the indicated kvaddr and
 812 * len.
 813 *
 814 * The mapping/unmapping of the kvaddr and len is automatically handled.
 815 *
 816 * Return:
 817 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
 818 * descriptor array
 819 */
 820static inline int sdma_txadd_kvaddr(
 821        struct hfi1_devdata *dd,
 822        struct sdma_txreq *tx,
 823        void *kvaddr,
 824        u16 len)
 825{
 826        dma_addr_t addr;
 827        int rval;
 828
 829        if ((unlikely(tx->num_desc == tx->desc_limit))) {
 830                rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
 831                                              kvaddr, NULL, 0, len);
 832                if (rval <= 0)
 833                        return rval;
 834        }
 835
 836        addr = dma_map_single(
 837                       &dd->pcidev->dev,
 838                       kvaddr,
 839                       len,
 840                       DMA_TO_DEVICE);
 841
 842        if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
 843                __sdma_txclean(dd, tx);
 844                return -ENOSPC;
 845        }
 846
 847        return _sdma_txadd_daddr(
 848                        dd, SDMA_MAP_SINGLE, tx, addr, len);
 849}
 850
 851struct iowait;
 852
 853int sdma_send_txreq(struct sdma_engine *sde,
 854                    struct iowait *wait,
 855                    struct sdma_txreq *tx);
 856int sdma_send_txlist(struct sdma_engine *sde,
 857                     struct iowait *wait,
 858                     struct list_head *tx_list,
 859                     u32 *count);
 860
 861int sdma_ahg_alloc(struct sdma_engine *sde);
 862void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
 863
 864/**
 865 * sdma_build_ahg - build ahg descriptor
 866 * @data
 867 * @dwindex
 868 * @startbit
 869 * @bits
 870 *
 871 * Build and return a 32 bit descriptor.
 872 */
 873static inline u32 sdma_build_ahg_descriptor(
 874        u16 data,
 875        u8 dwindex,
 876        u8 startbit,
 877        u8 bits)
 878{
 879        return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
 880                ((startbit & SDMA_AHG_FIELD_START_MASK) <<
 881                SDMA_AHG_FIELD_START_SHIFT) |
 882                ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
 883                SDMA_AHG_FIELD_LEN_SHIFT) |
 884                ((dwindex & SDMA_AHG_INDEX_MASK) <<
 885                SDMA_AHG_INDEX_SHIFT) |
 886                ((data & SDMA_AHG_VALUE_MASK) <<
 887                SDMA_AHG_VALUE_SHIFT));
 888}
 889
 890/**
 891 * sdma_progress - use seq number of detect head progress
 892 * @sde: sdma_engine to check
 893 * @seq: base seq count
 894 * @tx: txreq for which we need to check descriptor availability
 895 *
 896 * This is used in the appropriate spot in the sleep routine
 897 * to check for potential ring progress.  This routine gets the
 898 * seqcount before queuing the iowait structure for progress.
 899 *
 900 * If the seqcount indicates that progress needs to be checked,
 901 * re-submission is detected by checking whether the descriptor
 902 * queue has enough descriptor for the txreq.
 903 */
 904static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
 905                                     struct sdma_txreq *tx)
 906{
 907        if (read_seqretry(&sde->head_lock, seq)) {
 908                sde->desc_avail = sdma_descq_freecnt(sde);
 909                if (tx->num_desc > sde->desc_avail)
 910                        return 0;
 911                return 1;
 912        }
 913        return 0;
 914}
 915
 916/**
 917 * sdma_iowait_schedule() - initialize wait structure
 918 * @sde: sdma_engine to schedule
 919 * @wait: wait struct to schedule
 920 *
 921 * This function initializes the iowait
 922 * structure embedded in the QP or PQ.
 923 *
 924 */
 925static inline void sdma_iowait_schedule(
 926        struct sdma_engine *sde,
 927        struct iowait *wait)
 928{
 929        struct hfi1_pportdata *ppd = sde->dd->pport;
 930
 931        iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
 932}
 933
 934/* for use by interrupt handling */
 935void sdma_engine_error(struct sdma_engine *sde, u64 status);
 936void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
 937
 938/*
 939 *
 940 * The diagram below details the relationship of the mapping structures
 941 *
 942 * Since the mapping now allows for non-uniform engines per vl, the
 943 * number of engines for a vl is either the vl_engines[vl] or
 944 * a computation based on num_sdma/num_vls:
 945 *
 946 * For example:
 947 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
 948 *
 949 * n = roundup to next highest power of 2 using nactual
 950 *
 951 * In the case where there are num_sdma/num_vls doesn't divide
 952 * evenly, the extras are added from the last vl downward.
 953 *
 954 * For the case where n > nactual, the engines are assigned
 955 * in a round robin fashion wrapping back to the first engine
 956 * for a particular vl.
 957 *
 958 *               dd->sdma_map
 959 *                    |                                   sdma_map_elem[0]
 960 *                    |                                +--------------------+
 961 *                    v                                |       mask         |
 962 *               sdma_vl_map                           |--------------------|
 963 *      +--------------------------+                   | sde[0] -> eng 1    |
 964 *      |    list (RCU)            |                   |--------------------|
 965 *      |--------------------------|                 ->| sde[1] -> eng 2    |
 966 *      |    mask                  |              --/  |--------------------|
 967 *      |--------------------------|            -/     |        *           |
 968 *      |    actual_vls (max 8)    |          -/       |--------------------|
 969 *      |--------------------------|       --/         | sde[n] -> eng n    |
 970 *      |    vls (max 8)           |     -/            +--------------------+
 971 *      |--------------------------|  --/
 972 *      |    map[0]                |-/
 973 *      |--------------------------|                   +--------------------+
 974 *      |    map[1]                |---                |       mask         |
 975 *      |--------------------------|   \----           |--------------------|
 976 *      |           *              |        \--        | sde[0] -> eng 1+n  |
 977 *      |           *              |           \----   |--------------------|
 978 *      |           *              |                \->| sde[1] -> eng 2+n  |
 979 *      |--------------------------|                   |--------------------|
 980 *      |   map[vls - 1]           |-                  |         *          |
 981 *      +--------------------------+ \-                |--------------------|
 982 *                                     \-              | sde[m] -> eng m+n  |
 983 *                                       \             +--------------------+
 984 *                                        \-
 985 *                                          \
 986 *                                           \-        +--------------------+
 987 *                                             \-      |       mask         |
 988 *                                               \     |--------------------|
 989 *                                                \-   | sde[0] -> eng 1+m+n|
 990 *                                                  \- |--------------------|
 991 *                                                    >| sde[1] -> eng 2+m+n|
 992 *                                                     |--------------------|
 993 *                                                     |         *          |
 994 *                                                     |--------------------|
 995 *                                                     | sde[o] -> eng o+m+n|
 996 *                                                     +--------------------+
 997 *
 998 */
 999
1000/**
1001 * struct sdma_map_elem - mapping for a vl
1002 * @mask - selector mask
1003 * @sde - array of engines for this vl
1004 *
1005 * The mask is used to "mod" the selector
1006 * to produce index into the trailing
1007 * array of sdes.
1008 */
1009struct sdma_map_elem {
1010        u32 mask;
1011        struct sdma_engine *sde[0];
1012};
1013
1014/**
1015 * struct sdma_map_el - mapping for a vl
1016 * @engine_to_vl - map of an engine to a vl
1017 * @list - rcu head for free callback
1018 * @mask - vl mask to "mod" the vl to produce an index to map array
1019 * @actual_vls - number of vls
1020 * @vls - number of vls rounded to next power of 2
1021 * @map - array of sdma_map_elem entries
1022 *
1023 * This is the parent mapping structure.  The trailing
1024 * members of the struct point to sdma_map_elem entries, which
1025 * in turn point to an array of sde's for that vl.
1026 */
1027struct sdma_vl_map {
1028        s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
1029        struct rcu_head list;
1030        u32 mask;
1031        u8 actual_vls;
1032        u8 vls;
1033        struct sdma_map_elem *map[0];
1034};
1035
1036int sdma_map_init(
1037        struct hfi1_devdata *dd,
1038        u8 port,
1039        u8 num_vls,
1040        u8 *vl_engines);
1041
1042/* slow path */
1043void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1044
1045/**
1046 * sdma_engine_progress_schedule() - schedule progress on engine
1047 * @sde: sdma_engine to schedule progress
1048 *
1049 * This is the fast path.
1050 *
1051 */
1052static inline void sdma_engine_progress_schedule(
1053        struct sdma_engine *sde)
1054{
1055        if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1056                return;
1057        _sdma_engine_progress_schedule(sde);
1058}
1059
1060struct sdma_engine *sdma_select_engine_sc(
1061        struct hfi1_devdata *dd,
1062        u32 selector,
1063        u8 sc5);
1064
1065struct sdma_engine *sdma_select_engine_vl(
1066        struct hfi1_devdata *dd,
1067        u32 selector,
1068        u8 vl);
1069
1070struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
1071                                            u32 selector, u8 vl);
1072ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
1073ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
1074                                size_t count);
1075int sdma_engine_get_vl(struct sdma_engine *sde);
1076void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1077void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
1078                                unsigned long cpuid);
1079
1080#ifdef CONFIG_SDMA_VERBOSITY
1081void sdma_dumpstate(struct sdma_engine *);
1082#endif
1083static inline char *slashstrip(char *s)
1084{
1085        char *r = s;
1086
1087        while (*s)
1088                if (*s++ == '/')
1089                        r = s;
1090        return r;
1091}
1092
1093u16 sdma_get_descq_cnt(void);
1094
1095extern uint mod_num_sdma;
1096
1097void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
1098
1099#endif
1100