linux/drivers/infiniband/hw/hfi1/iowait.h
<<
>>
Prefs
   1#ifndef _HFI1_IOWAIT_H
   2#define _HFI1_IOWAIT_H
   3/*
   4 * Copyright(c) 2015, 2016 Intel Corporation.
   5 *
   6 * This file is provided under a dual BSD/GPLv2 license.  When using or
   7 * redistributing this file, you may do so under either license.
   8 *
   9 * GPL LICENSE SUMMARY
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of version 2 of the GNU General Public License as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * BSD LICENSE
  21 *
  22 * Redistribution and use in source and binary forms, with or without
  23 * modification, are permitted provided that the following conditions
  24 * are met:
  25 *
  26 *  - Redistributions of source code must retain the above copyright
  27 *    notice, this list of conditions and the following disclaimer.
  28 *  - Redistributions in binary form must reproduce the above copyright
  29 *    notice, this list of conditions and the following disclaimer in
  30 *    the documentation and/or other materials provided with the
  31 *    distribution.
  32 *  - Neither the name of Intel Corporation nor the names of its
  33 *    contributors may be used to endorse or promote products derived
  34 *    from this software without specific prior written permission.
  35 *
  36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  47 *
  48 */
  49
  50#include <linux/list.h>
  51#include <linux/workqueue.h>
  52#include <linux/sched.h>
  53
  54#include "sdma_txreq.h"
  55
  56/*
  57 * typedef (*restart_t)() - restart callback
  58 * @work: pointer to work structure
  59 */
  60typedef void (*restart_t)(struct work_struct *work);
  61
  62struct sdma_txreq;
  63struct sdma_engine;
  64/**
  65 * struct iowait - linkage for delayed progress/waiting
  66 * @list: used to add/insert into QP/PQ wait lists
  67 * @lock: uses to record the list head lock
  68 * @tx_head: overflow list of sdma_txreq's
  69 * @sleep: no space callback
  70 * @wakeup: space callback wakeup
  71 * @sdma_drained: sdma count drained
  72 * @iowork: workqueue overhead
  73 * @wait_dma: wait for sdma_busy == 0
  74 * @wait_pio: wait for pio_busy == 0
  75 * @sdma_busy: # of packets in flight
  76 * @count: total number of descriptors in tx_head'ed list
  77 * @tx_limit: limit for overflow queuing
  78 * @tx_count: number of tx entry's in tx_head'ed list
  79 *
  80 * This is to be embedded in user's state structure
  81 * (QP or PQ).
  82 *
  83 * The sleep and wakeup members are a
  84 * bit misnamed.   They do not strictly
  85 * speaking sleep or wake up, but they
  86 * are callbacks for the ULP to implement
  87 * what ever queuing/dequeuing of
  88 * the embedded iowait and its containing struct
  89 * when a resource shortage like SDMA ring space is seen.
  90 *
  91 * Both potentially have locks help
  92 * so sleeping is not allowed.
  93 *
  94 * The wait_dma member along with the iow
  95 *
  96 * The lock field is used by waiters to record
  97 * the seqlock_t that guards the list head.
  98 * Waiters explicity know that, but the destroy
  99 * code that unwaits QPs does not.
 100 */
 101
 102struct iowait {
 103        struct list_head list;
 104        struct list_head tx_head;
 105        int (*sleep)(
 106                struct sdma_engine *sde,
 107                struct iowait *wait,
 108                struct sdma_txreq *tx,
 109                unsigned seq);
 110        void (*wakeup)(struct iowait *wait, int reason);
 111        void (*sdma_drained)(struct iowait *wait);
 112        seqlock_t *lock;
 113        struct work_struct iowork;
 114        wait_queue_head_t wait_dma;
 115        wait_queue_head_t wait_pio;
 116        atomic_t sdma_busy;
 117        atomic_t pio_busy;
 118        u32 count;
 119        u32 tx_limit;
 120        u32 tx_count;
 121};
 122
 123#define SDMA_AVAIL_REASON 0
 124
 125/**
 126 * iowait_init() - initialize wait structure
 127 * @wait: wait struct to initialize
 128 * @tx_limit: limit for overflow queuing
 129 * @func: restart function for workqueue
 130 * @sleep: sleep function for no space
 131 * @resume: wakeup function for no space
 132 *
 133 * This function initializes the iowait
 134 * structure embedded in the QP or PQ.
 135 *
 136 */
 137
 138static inline void iowait_init(
 139        struct iowait *wait,
 140        u32 tx_limit,
 141        void (*func)(struct work_struct *work),
 142        int (*sleep)(
 143                struct sdma_engine *sde,
 144                struct iowait *wait,
 145                struct sdma_txreq *tx,
 146                unsigned seq),
 147        void (*wakeup)(struct iowait *wait, int reason),
 148        void (*sdma_drained)(struct iowait *wait))
 149{
 150        wait->count = 0;
 151        wait->lock = NULL;
 152        INIT_LIST_HEAD(&wait->list);
 153        INIT_LIST_HEAD(&wait->tx_head);
 154        INIT_WORK(&wait->iowork, func);
 155        init_waitqueue_head(&wait->wait_dma);
 156        init_waitqueue_head(&wait->wait_pio);
 157        atomic_set(&wait->sdma_busy, 0);
 158        atomic_set(&wait->pio_busy, 0);
 159        wait->tx_limit = tx_limit;
 160        wait->sleep = sleep;
 161        wait->wakeup = wakeup;
 162        wait->sdma_drained = sdma_drained;
 163}
 164
 165/**
 166 * iowait_schedule() - initialize wait structure
 167 * @wait: wait struct to schedule
 168 * @wq: workqueue for schedule
 169 * @cpu: cpu
 170 */
 171static inline void iowait_schedule(
 172        struct iowait *wait,
 173        struct workqueue_struct *wq,
 174        int cpu)
 175{
 176        queue_work_on(cpu, wq, &wait->iowork);
 177}
 178
 179/**
 180 * iowait_sdma_drain() - wait for DMAs to drain
 181 *
 182 * @wait: iowait structure
 183 *
 184 * This will delay until the iowait sdmas have
 185 * completed.
 186 */
 187static inline void iowait_sdma_drain(struct iowait *wait)
 188{
 189        wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
 190}
 191
 192/**
 193 * iowait_sdma_pending() - return sdma pending count
 194 *
 195 * @wait: iowait structure
 196 *
 197 */
 198static inline int iowait_sdma_pending(struct iowait *wait)
 199{
 200        return atomic_read(&wait->sdma_busy);
 201}
 202
 203/**
 204 * iowait_sdma_inc - note sdma io pending
 205 * @wait: iowait structure
 206 */
 207static inline void iowait_sdma_inc(struct iowait *wait)
 208{
 209        atomic_inc(&wait->sdma_busy);
 210}
 211
 212/**
 213 * iowait_sdma_add - add count to pending
 214 * @wait: iowait structure
 215 */
 216static inline void iowait_sdma_add(struct iowait *wait, int count)
 217{
 218        atomic_add(count, &wait->sdma_busy);
 219}
 220
 221/**
 222 * iowait_sdma_dec - note sdma complete
 223 * @wait: iowait structure
 224 */
 225static inline int iowait_sdma_dec(struct iowait *wait)
 226{
 227        return atomic_dec_and_test(&wait->sdma_busy);
 228}
 229
 230/**
 231 * iowait_pio_drain() - wait for pios to drain
 232 *
 233 * @wait: iowait structure
 234 *
 235 * This will delay until the iowait pios have
 236 * completed.
 237 */
 238static inline void iowait_pio_drain(struct iowait *wait)
 239{
 240        wait_event_timeout(wait->wait_pio,
 241                           !atomic_read(&wait->pio_busy),
 242                           HZ);
 243}
 244
 245/**
 246 * iowait_pio_pending() - return pio pending count
 247 *
 248 * @wait: iowait structure
 249 *
 250 */
 251static inline int iowait_pio_pending(struct iowait *wait)
 252{
 253        return atomic_read(&wait->pio_busy);
 254}
 255
 256/**
 257 * iowait_pio_inc - note pio pending
 258 * @wait: iowait structure
 259 */
 260static inline void iowait_pio_inc(struct iowait *wait)
 261{
 262        atomic_inc(&wait->pio_busy);
 263}
 264
 265/**
 266 * iowait_sdma_dec - note pio complete
 267 * @wait: iowait structure
 268 */
 269static inline int iowait_pio_dec(struct iowait *wait)
 270{
 271        return atomic_dec_and_test(&wait->pio_busy);
 272}
 273
 274/**
 275 * iowait_drain_wakeup() - trigger iowait_drain() waiter
 276 *
 277 * @wait: iowait structure
 278 *
 279 * This will trigger any waiters.
 280 */
 281static inline void iowait_drain_wakeup(struct iowait *wait)
 282{
 283        wake_up(&wait->wait_dma);
 284        wake_up(&wait->wait_pio);
 285        if (wait->sdma_drained)
 286                wait->sdma_drained(wait);
 287}
 288
 289/**
 290 * iowait_get_txhead() - get packet off of iowait list
 291 *
 292 * @wait wait struture
 293 */
 294static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
 295{
 296        struct sdma_txreq *tx = NULL;
 297
 298        if (!list_empty(&wait->tx_head)) {
 299                tx = list_first_entry(
 300                        &wait->tx_head,
 301                        struct sdma_txreq,
 302                        list);
 303                list_del_init(&tx->list);
 304        }
 305        return tx;
 306}
 307
 308#endif
 309