linux/drivers/s390/cio/qdio.h
<<
>>
Prefs
   1/*
   2 * linux/drivers/s390/cio/qdio.h
   3 *
   4 * Copyright 2000,2009 IBM Corp.
   5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   6 *            Jan Glauber <jang@linux.vnet.ibm.com>
   7 */
   8#ifndef _CIO_QDIO_H
   9#define _CIO_QDIO_H
  10
  11#include <asm/page.h>
  12#include <asm/schid.h>
  13#include <asm/debug.h>
  14#include "chsc.h"
  15
  16#define QDIO_BUSY_BIT_PATIENCE          (100 << 12)     /* 100 microseconds */
  17#define QDIO_INPUT_THRESHOLD            (500 << 12)     /* 500 microseconds */
  18
  19/*
  20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
  21 * till next initiative to give transmitted skbs back to the stack is too long.
  22 * Therefore polling is started in case of multicast queue is filled more
  23 * than 50 percent.
  24 */
  25#define QDIO_IQDIO_POLL_LVL             65      /* HS multicast queue */
  26
  27enum qdio_irq_states {
  28        QDIO_IRQ_STATE_INACTIVE,
  29        QDIO_IRQ_STATE_ESTABLISHED,
  30        QDIO_IRQ_STATE_ACTIVE,
  31        QDIO_IRQ_STATE_STOPPED,
  32        QDIO_IRQ_STATE_CLEANUP,
  33        QDIO_IRQ_STATE_ERR,
  34        NR_QDIO_IRQ_STATES,
  35};
  36
  37/* used as intparm in do_IO */
  38#define QDIO_DOING_ESTABLISH    1
  39#define QDIO_DOING_ACTIVATE     2
  40#define QDIO_DOING_CLEANUP      3
  41
  42#define SLSB_STATE_NOT_INIT     0x0
  43#define SLSB_STATE_EMPTY        0x1
  44#define SLSB_STATE_PRIMED       0x2
  45#define SLSB_STATE_HALTED       0xe
  46#define SLSB_STATE_ERROR        0xf
  47#define SLSB_TYPE_INPUT         0x0
  48#define SLSB_TYPE_OUTPUT        0x20
  49#define SLSB_OWNER_PROG         0x80
  50#define SLSB_OWNER_CU           0x40
  51
  52#define SLSB_P_INPUT_NOT_INIT   \
  53        (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT)  /* 0x80 */
  54#define SLSB_P_INPUT_ACK        \
  55        (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)     /* 0x81 */
  56#define SLSB_CU_INPUT_EMPTY     \
  57        (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)       /* 0x41 */
  58#define SLSB_P_INPUT_PRIMED     \
  59        (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED)    /* 0x82 */
  60#define SLSB_P_INPUT_HALTED     \
  61        (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED)    /* 0x8e */
  62#define SLSB_P_INPUT_ERROR      \
  63        (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR)     /* 0x8f */
  64#define SLSB_P_OUTPUT_NOT_INIT  \
  65        (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
  66#define SLSB_P_OUTPUT_EMPTY     \
  67        (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY)    /* 0xa1 */
  68#define SLSB_CU_OUTPUT_PRIMED   \
  69        (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED)     /* 0x62 */
  70#define SLSB_P_OUTPUT_HALTED    \
  71        (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED)   /* 0xae */
  72#define SLSB_P_OUTPUT_ERROR     \
  73        (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR)    /* 0xaf */
  74
  75#define SLSB_ERROR_DURING_LOOKUP  0xff
  76
  77/* additional CIWs returned by extended Sense-ID */
  78#define CIW_TYPE_EQUEUE                 0x3 /* establish QDIO queues */
  79#define CIW_TYPE_AQUEUE                 0x4 /* activate QDIO queues */
  80
  81/* flags for st qdio sch data */
  82#define CHSC_FLAG_QDIO_CAPABILITY       0x80
  83#define CHSC_FLAG_VALIDITY              0x40
  84
  85/* qdio adapter-characteristics-1 flag */
  86#define AC1_SIGA_INPUT_NEEDED           0x40    /* process input queues */
  87#define AC1_SIGA_OUTPUT_NEEDED          0x20    /* process output queues */
  88#define AC1_SIGA_SYNC_NEEDED            0x10    /* ask hypervisor to sync */
  89#define AC1_AUTOMATIC_SYNC_ON_THININT   0x08    /* set by hypervisor */
  90#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI   0x04    /* set by hypervisor */
  91#define AC1_SC_QEBSM_AVAILABLE          0x02    /* available for subchannel */
  92#define AC1_SC_QEBSM_ENABLED            0x01    /* enabled for subchannel */
  93
  94/* SIGA flags */
  95#define QDIO_SIGA_WRITE         0x00
  96#define QDIO_SIGA_READ          0x01
  97#define QDIO_SIGA_SYNC          0x02
  98#define QDIO_SIGA_QEBSM_FLAG    0x80
  99
 100#ifdef CONFIG_64BIT
 101static inline int do_sqbs(u64 token, unsigned char state, int queue,
 102                          int *start, int *count)
 103{
 104        register unsigned long _ccq asm ("0") = *count;
 105        register unsigned long _token asm ("1") = token;
 106        unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
 107
 108        asm volatile(
 109                "       .insn   rsy,0xeb000000008A,%1,0,0(%2)"
 110                : "+d" (_ccq), "+d" (_queuestart)
 111                : "d" ((unsigned long)state), "d" (_token)
 112                : "memory", "cc");
 113        *count = _ccq & 0xff;
 114        *start = _queuestart & 0xff;
 115
 116        return (_ccq >> 32) & 0xff;
 117}
 118
 119static inline int do_eqbs(u64 token, unsigned char *state, int queue,
 120                          int *start, int *count, int ack)
 121{
 122        register unsigned long _ccq asm ("0") = *count;
 123        register unsigned long _token asm ("1") = token;
 124        unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
 125        unsigned long _state = (unsigned long)ack << 63;
 126
 127        asm volatile(
 128                "       .insn   rrf,0xB99c0000,%1,%2,0,0"
 129                : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
 130                : "d" (_token)
 131                : "memory", "cc");
 132        *count = _ccq & 0xff;
 133        *start = _queuestart & 0xff;
 134        *state = _state & 0xff;
 135
 136        return (_ccq >> 32) & 0xff;
 137}
 138#else
 139static inline int do_sqbs(u64 token, unsigned char state, int queue,
 140                          int *start, int *count) { return 0; }
 141static inline int do_eqbs(u64 token, unsigned char *state, int queue,
 142                          int *start, int *count, int ack) { return 0; }
 143#endif /* CONFIG_64BIT */
 144
 145struct qdio_irq;
 146
 147struct siga_flag {
 148        u8 input:1;
 149        u8 output:1;
 150        u8 sync:1;
 151        u8 sync_after_ai:1;
 152        u8 sync_out_after_pci:1;
 153        u8:3;
 154} __attribute__ ((packed));
 155
 156struct chsc_ssqd_area {
 157        struct chsc_header request;
 158        u16:10;
 159        u8 ssid:2;
 160        u8 fmt:4;
 161        u16 first_sch;
 162        u16:16;
 163        u16 last_sch;
 164        u32:32;
 165        struct chsc_header response;
 166        u32:32;
 167        struct qdio_ssqd_desc qdio_ssqd;
 168} __attribute__ ((packed));
 169
 170struct scssc_area {
 171        struct chsc_header request;
 172        u16 operation_code;
 173        u16:16;
 174        u32:32;
 175        u32:32;
 176        u64 summary_indicator_addr;
 177        u64 subchannel_indicator_addr;
 178        u32 ks:4;
 179        u32 kc:4;
 180        u32:21;
 181        u32 isc:3;
 182        u32 word_with_d_bit;
 183        u32:32;
 184        struct subchannel_id schid;
 185        u32 reserved[1004];
 186        struct chsc_header response;
 187        u32:32;
 188} __attribute__ ((packed));
 189
 190struct qdio_dev_perf_stat {
 191        unsigned int adapter_int;
 192        unsigned int qdio_int;
 193        unsigned int pci_request_int;
 194
 195        unsigned int tasklet_inbound;
 196        unsigned int tasklet_inbound_resched;
 197        unsigned int tasklet_inbound_resched2;
 198        unsigned int tasklet_outbound;
 199
 200        unsigned int siga_read;
 201        unsigned int siga_write;
 202        unsigned int siga_sync;
 203
 204        unsigned int inbound_call;
 205        unsigned int inbound_handler;
 206        unsigned int stop_polling;
 207        unsigned int inbound_queue_full;
 208        unsigned int outbound_call;
 209        unsigned int outbound_handler;
 210        unsigned int outbound_queue_full;
 211        unsigned int fast_requeue;
 212        unsigned int target_full;
 213        unsigned int eqbs;
 214        unsigned int eqbs_partial;
 215        unsigned int sqbs;
 216        unsigned int sqbs_partial;
 217        unsigned int int_discarded;
 218} ____cacheline_aligned;
 219
 220struct qdio_queue_perf_stat {
 221        /*
 222         * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
 223         * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
 224         * aka 127 SBALs found.
 225         */
 226        unsigned int nr_sbals[8];
 227        unsigned int nr_sbal_error;
 228        unsigned int nr_sbal_nop;
 229        unsigned int nr_sbal_total;
 230};
 231
 232enum qdio_queue_irq_states {
 233        QDIO_QUEUE_IRQS_DISABLED,
 234};
 235
 236struct qdio_input_q {
 237        /* input buffer acknowledgement flag */
 238        int polling;
 239        /* first ACK'ed buffer */
 240        int ack_start;
 241        /* how much sbals are acknowledged with qebsm */
 242        int ack_count;
 243        /* last time of noticing incoming data */
 244        u64 timestamp;
 245        /* upper-layer polling flag */
 246        unsigned long queue_irq_state;
 247        /* callback to start upper-layer polling */
 248        void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
 249};
 250
 251struct qdio_output_q {
 252        /* PCIs are enabled for the queue */
 253        int pci_out_enabled;
 254        /* timer to check for more outbound work */
 255        struct timer_list timer;
 256        /* used SBALs before tasklet schedule */
 257        int scan_threshold;
 258};
 259
 260/*
 261 * Note on cache alignment: grouped slsb and write mostly data at the beginning
 262 * sbal[] is read-only and starts on a new cacheline followed by read mostly.
 263 */
 264struct qdio_q {
 265        struct slsb slsb;
 266
 267        union {
 268                struct qdio_input_q in;
 269                struct qdio_output_q out;
 270        } u;
 271
 272        /*
 273         * inbound: next buffer the program should check for
 274         * outbound: next buffer to check if adapter processed it
 275         */
 276        int first_to_check;
 277
 278        /* first_to_check of the last time */
 279        int last_move;
 280
 281        /* beginning position for calling the program */
 282        int first_to_kick;
 283
 284        /* number of buffers in use by the adapter */
 285        atomic_t nr_buf_used;
 286
 287        /* error condition during a data transfer */
 288        unsigned int qdio_error;
 289
 290        struct tasklet_struct tasklet;
 291        struct qdio_queue_perf_stat q_stats;
 292
 293        struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
 294
 295        /* queue number */
 296        int nr;
 297
 298        /* bitmask of queue number */
 299        int mask;
 300
 301        /* input or output queue */
 302        int is_input_q;
 303
 304        /* list of thinint input queues */
 305        struct list_head entry;
 306
 307        /* upper-layer program handler */
 308        qdio_handler_t (*handler);
 309
 310        struct dentry *debugfs_q;
 311        struct qdio_irq *irq_ptr;
 312        struct sl *sl;
 313        /*
 314         * A page is allocated under this pointer and used for slib and sl.
 315         * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
 316         */
 317        struct slib *slib;
 318} __attribute__ ((aligned(256)));
 319
 320struct qdio_irq {
 321        struct qib qib;
 322        u32 *dsci;              /* address of device state change indicator */
 323        struct ccw_device *cdev;
 324        struct dentry *debugfs_dev;
 325        struct dentry *debugfs_perf;
 326
 327        unsigned long int_parm;
 328        struct subchannel_id schid;
 329        unsigned long sch_token;        /* QEBSM facility */
 330
 331        enum qdio_irq_states state;
 332
 333        struct siga_flag siga_flag;     /* siga sync information from qdioac */
 334
 335        int nr_input_qs;
 336        int nr_output_qs;
 337
 338        struct ccw1 ccw;
 339        struct ciw equeue;
 340        struct ciw aqueue;
 341
 342        struct qdio_ssqd_desc ssqd_desc;
 343        void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
 344
 345        int perf_stat_enabled;
 346
 347        struct qdr *qdr;
 348        unsigned long chsc_page;
 349
 350        struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
 351        struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
 352
 353        debug_info_t *debug_area;
 354        struct mutex setup_mutex;
 355        struct qdio_dev_perf_stat perf_stat;
 356};
 357
 358/* helper functions */
 359#define queue_type(q)   q->irq_ptr->qib.qfmt
 360#define SCH_NO(q)       (q->irq_ptr->schid.sch_no)
 361
 362#define is_thinint_irq(irq) \
 363        (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
 364         css_general_characteristics.aif_osa)
 365
 366#define qperf(__qdev, __attr)   ((__qdev)->perf_stat.(__attr))
 367
 368#define qperf_inc(__q, __attr)                                          \
 369({                                                                      \
 370        struct qdio_irq *qdev = (__q)->irq_ptr;                         \
 371        if (qdev->perf_stat_enabled)                                    \
 372                (qdev->perf_stat.__attr)++;                             \
 373})
 374
 375static inline void account_sbals_error(struct qdio_q *q, int count)
 376{
 377        q->q_stats.nr_sbal_error += count;
 378        q->q_stats.nr_sbal_total += count;
 379}
 380
 381/* the highest iqdio queue is used for multicast */
 382static inline int multicast_outbound(struct qdio_q *q)
 383{
 384        return (q->irq_ptr->nr_output_qs > 1) &&
 385               (q->nr == q->irq_ptr->nr_output_qs - 1);
 386}
 387
 388#define pci_out_supported(q) \
 389        (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
 390#define is_qebsm(q)                     (q->irq_ptr->sch_token != 0)
 391
 392#define need_siga_in(q)                 (q->irq_ptr->siga_flag.input)
 393#define need_siga_out(q)                (q->irq_ptr->siga_flag.output)
 394#define need_siga_sync(q)               (unlikely(q->irq_ptr->siga_flag.sync))
 395#define need_siga_sync_after_ai(q)      \
 396        (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
 397#define need_siga_sync_out_after_pci(q) \
 398        (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
 399
 400#define for_each_input_queue(irq_ptr, q, i)     \
 401        for (i = 0, q = irq_ptr->input_qs[0];   \
 402                i < irq_ptr->nr_input_qs;       \
 403                q = irq_ptr->input_qs[++i])
 404#define for_each_output_queue(irq_ptr, q, i)    \
 405        for (i = 0, q = irq_ptr->output_qs[0];  \
 406                i < irq_ptr->nr_output_qs;      \
 407                q = irq_ptr->output_qs[++i])
 408
 409#define prev_buf(bufnr) \
 410        ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
 411#define next_buf(bufnr) \
 412        ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
 413#define add_buf(bufnr, inc) \
 414        ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
 415#define sub_buf(bufnr, dec) \
 416        ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
 417
 418#define queue_irqs_enabled(q)                   \
 419        (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
 420#define queue_irqs_disabled(q)                  \
 421        (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
 422
 423#define TIQDIO_SHARED_IND               63
 424
 425/* device state change indicators */
 426struct indicator_t {
 427        u32 ind;        /* u32 because of compare-and-swap performance */
 428        atomic_t count; /* use count, 0 or 1 for non-shared indicators */
 429};
 430
 431extern struct indicator_t *q_indicators;
 432
 433static inline int shared_ind(u32 *dsci)
 434{
 435        return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
 436}
 437
 438/* prototypes for thin interrupt */
 439void qdio_setup_thinint(struct qdio_irq *irq_ptr);
 440int qdio_establish_thinint(struct qdio_irq *irq_ptr);
 441void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
 442void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
 443void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
 444void tiqdio_inbound_processing(unsigned long q);
 445int tiqdio_allocate_memory(void);
 446void tiqdio_free_memory(void);
 447int tiqdio_register_thinints(void);
 448void tiqdio_unregister_thinints(void);
 449
 450/* prototypes for setup */
 451void qdio_inbound_processing(unsigned long data);
 452void qdio_outbound_processing(unsigned long data);
 453void qdio_outbound_timer(unsigned long data);
 454void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 455                      struct irb *irb);
 456int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
 457                     int nr_output_qs);
 458void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
 459int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
 460                        struct subchannel_id *schid,
 461                        struct qdio_ssqd_desc *data);
 462int qdio_setup_irq(struct qdio_initialize *init_data);
 463void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
 464                                struct ccw_device *cdev);
 465void qdio_release_memory(struct qdio_irq *irq_ptr);
 466int qdio_setup_create_sysfs(struct ccw_device *cdev);
 467void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
 468int qdio_setup_init(void);
 469void qdio_setup_exit(void);
 470
 471int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 472                        unsigned char *state);
 473#endif /* _CIO_QDIO_H */
 474