linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   4 *
   5 * Copyright IBM Corp. 2000, 2008
   6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   7 *            Jan Glauber <jang@linux.vnet.ibm.com>
   8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/timer.h>
  14#include <linux/delay.h>
  15#include <linux/gfp.h>
  16#include <linux/io.h>
  17#include <linux/atomic.h>
  18#include <asm/debug.h>
  19#include <asm/qdio.h>
  20#include <asm/ipl.h>
  21
  22#include "cio.h"
  23#include "css.h"
  24#include "device.h"
  25#include "qdio.h"
  26#include "qdio_debug.h"
  27
  28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  29        "Jan Glauber <jang@linux.vnet.ibm.com>");
  30MODULE_DESCRIPTION("QDIO base support");
  31MODULE_LICENSE("GPL");
  32
  33static inline int do_siga_sync(unsigned long schid,
  34                               unsigned int out_mask, unsigned int in_mask,
  35                               unsigned int fc)
  36{
  37        register unsigned long __fc asm ("0") = fc;
  38        register unsigned long __schid asm ("1") = schid;
  39        register unsigned long out asm ("2") = out_mask;
  40        register unsigned long in asm ("3") = in_mask;
  41        int cc;
  42
  43        asm volatile(
  44                "       siga    0\n"
  45                "       ipm     %0\n"
  46                "       srl     %0,28\n"
  47                : "=d" (cc)
  48                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  49        return cc;
  50}
  51
  52static inline int do_siga_input(unsigned long schid, unsigned int mask,
  53                                unsigned int fc)
  54{
  55        register unsigned long __fc asm ("0") = fc;
  56        register unsigned long __schid asm ("1") = schid;
  57        register unsigned long __mask asm ("2") = mask;
  58        int cc;
  59
  60        asm volatile(
  61                "       siga    0\n"
  62                "       ipm     %0\n"
  63                "       srl     %0,28\n"
  64                : "=d" (cc)
  65                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  66        return cc;
  67}
  68
  69/**
  70 * do_siga_output - perform SIGA-w/wt function
  71 * @schid: subchannel id or in case of QEBSM the subchannel token
  72 * @mask: which output queues to process
  73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  74 * @fc: function code to perform
  75 * @aob: asynchronous operation block
  76 *
  77 * Returns condition code.
  78 * Note: For IQDC unicast queues only the highest priority queue is processed.
  79 */
  80static inline int do_siga_output(unsigned long schid, unsigned long mask,
  81                                 unsigned int *bb, unsigned int fc,
  82                                 unsigned long aob)
  83{
  84        register unsigned long __fc asm("0") = fc;
  85        register unsigned long __schid asm("1") = schid;
  86        register unsigned long __mask asm("2") = mask;
  87        register unsigned long __aob asm("3") = aob;
  88        int cc;
  89
  90        asm volatile(
  91                "       siga    0\n"
  92                "       ipm     %0\n"
  93                "       srl     %0,28\n"
  94                : "=d" (cc), "+d" (__fc), "+d" (__aob)
  95                : "d" (__schid), "d" (__mask)
  96                : "cc");
  97        *bb = __fc >> 31;
  98        return cc;
  99}
 100
 101static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 102{
 103        /* all done or next buffer state different */
 104        if (ccq == 0 || ccq == 32)
 105                return 0;
 106        /* no buffer processed */
 107        if (ccq == 97)
 108                return 1;
 109        /* not all buffers processed */
 110        if (ccq == 96)
 111                return 2;
 112        /* notify devices immediately */
 113        DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 114        return -EIO;
 115}
 116
 117/**
 118 * qdio_do_eqbs - extract buffer states for QEBSM
 119 * @q: queue to manipulate
 120 * @state: state of the extracted buffers
 121 * @start: buffer number to start at
 122 * @count: count of buffers to examine
 123 * @auto_ack: automatically acknowledge buffers
 124 *
 125 * Returns the number of successfully extracted equal buffer states.
 126 * Stops processing if a state is different from the last buffers state.
 127 */
 128static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 129                        int start, int count, int auto_ack)
 130{
 131        int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
 132        unsigned int ccq = 0;
 133
 134        qperf_inc(q, eqbs);
 135
 136        if (!q->is_input_q)
 137                nr += q->irq_ptr->nr_input_qs;
 138again:
 139        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 140                      auto_ack);
 141        rc = qdio_check_ccq(q, ccq);
 142        if (!rc)
 143                return count - tmp_count;
 144
 145        if (rc == 1) {
 146                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 147                goto again;
 148        }
 149
 150        if (rc == 2) {
 151                qperf_inc(q, eqbs_partial);
 152                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 153                        tmp_count);
 154                /*
 155                 * Retry once, if that fails bail out and process the
 156                 * extracted buffers before trying again.
 157                 */
 158                if (!retried++)
 159                        goto again;
 160                else
 161                        return count - tmp_count;
 162        }
 163
 164        DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 165        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 166        q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
 167                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 168        return 0;
 169}
 170
 171/**
 172 * qdio_do_sqbs - set buffer states for QEBSM
 173 * @q: queue to manipulate
 174 * @state: new state of the buffers
 175 * @start: first buffer number to change
 176 * @count: how many buffers to change
 177 *
 178 * Returns the number of successfully changed buffers.
 179 * Does retrying until the specified count of buffer states is set or an
 180 * error occurs.
 181 */
 182static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 183                        int count)
 184{
 185        unsigned int ccq = 0;
 186        int tmp_count = count, tmp_start = start;
 187        int nr = q->nr;
 188        int rc;
 189
 190        if (!count)
 191                return 0;
 192        qperf_inc(q, sqbs);
 193
 194        if (!q->is_input_q)
 195                nr += q->irq_ptr->nr_input_qs;
 196again:
 197        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 198        rc = qdio_check_ccq(q, ccq);
 199        if (!rc) {
 200                WARN_ON_ONCE(tmp_count);
 201                return count - tmp_count;
 202        }
 203
 204        if (rc == 1 || rc == 2) {
 205                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 206                qperf_inc(q, sqbs_partial);
 207                goto again;
 208        }
 209
 210        DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 211        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 212        q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
 213                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 214        return 0;
 215}
 216
 217/* returns number of examined buffers and their common state in *state */
 218static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 219                                 unsigned char *state, unsigned int count,
 220                                 int auto_ack, int merge_pending)
 221{
 222        unsigned char __state = 0;
 223        int i;
 224
 225        if (is_qebsm(q))
 226                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 227
 228        for (i = 0; i < count; i++) {
 229                if (!__state) {
 230                        __state = q->slsb.val[bufnr];
 231                        if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 232                                __state = SLSB_P_OUTPUT_EMPTY;
 233                } else if (merge_pending) {
 234                        if ((q->slsb.val[bufnr] & __state) != __state)
 235                                break;
 236                } else if (q->slsb.val[bufnr] != __state)
 237                        break;
 238                bufnr = next_buf(bufnr);
 239        }
 240        *state = __state;
 241        return i;
 242}
 243
 244static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 245                                unsigned char *state, int auto_ack)
 246{
 247        return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 248}
 249
 250/* wrap-around safe setting of slsb states, returns number of changed buffers */
 251static inline int set_buf_states(struct qdio_q *q, int bufnr,
 252                                 unsigned char state, int count)
 253{
 254        int i;
 255
 256        if (is_qebsm(q))
 257                return qdio_do_sqbs(q, state, bufnr, count);
 258
 259        for (i = 0; i < count; i++) {
 260                xchg(&q->slsb.val[bufnr], state);
 261                bufnr = next_buf(bufnr);
 262        }
 263        return count;
 264}
 265
 266static inline int set_buf_state(struct qdio_q *q, int bufnr,
 267                                unsigned char state)
 268{
 269        return set_buf_states(q, bufnr, state, 1);
 270}
 271
 272/* set slsb states to initial state */
 273static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 274{
 275        struct qdio_q *q;
 276        int i;
 277
 278        for_each_input_queue(irq_ptr, q, i)
 279                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 280                               QDIO_MAX_BUFFERS_PER_Q);
 281        for_each_output_queue(irq_ptr, q, i)
 282                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 283                               QDIO_MAX_BUFFERS_PER_Q);
 284}
 285
 286static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 287                          unsigned int input)
 288{
 289        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 290        unsigned int fc = QDIO_SIGA_SYNC;
 291        int cc;
 292
 293        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 294        qperf_inc(q, siga_sync);
 295
 296        if (is_qebsm(q)) {
 297                schid = q->irq_ptr->sch_token;
 298                fc |= QDIO_SIGA_QEBSM_FLAG;
 299        }
 300
 301        cc = do_siga_sync(schid, output, input, fc);
 302        if (unlikely(cc))
 303                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 304        return (cc) ? -EIO : 0;
 305}
 306
 307static inline int qdio_siga_sync_q(struct qdio_q *q)
 308{
 309        if (q->is_input_q)
 310                return qdio_siga_sync(q, 0, q->mask);
 311        else
 312                return qdio_siga_sync(q, q->mask, 0);
 313}
 314
 315static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 316        unsigned long aob)
 317{
 318        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 319        unsigned int fc = QDIO_SIGA_WRITE;
 320        u64 start_time = 0;
 321        int retries = 0, cc;
 322        unsigned long laob = 0;
 323
 324        WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
 325                             !q->u.out.use_cq));
 326        if (q->u.out.use_cq && aob != 0) {
 327                fc = QDIO_SIGA_WRITEQ;
 328                laob = aob;
 329        }
 330
 331        if (is_qebsm(q)) {
 332                schid = q->irq_ptr->sch_token;
 333                fc |= QDIO_SIGA_QEBSM_FLAG;
 334        }
 335again:
 336        cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 337
 338        /* hipersocket busy condition */
 339        if (unlikely(*busy_bit)) {
 340                retries++;
 341
 342                if (!start_time) {
 343                        start_time = get_tod_clock_fast();
 344                        goto again;
 345                }
 346                if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
 347                        goto again;
 348        }
 349        if (retries) {
 350                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 351                              "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 352                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 353        }
 354        return cc;
 355}
 356
 357static inline int qdio_siga_input(struct qdio_q *q)
 358{
 359        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 360        unsigned int fc = QDIO_SIGA_READ;
 361        int cc;
 362
 363        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 364        qperf_inc(q, siga_read);
 365
 366        if (is_qebsm(q)) {
 367                schid = q->irq_ptr->sch_token;
 368                fc |= QDIO_SIGA_QEBSM_FLAG;
 369        }
 370
 371        cc = do_siga_input(schid, q->mask, fc);
 372        if (unlikely(cc))
 373                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 374        return (cc) ? -EIO : 0;
 375}
 376
 377#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 378#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 379
 380static inline void qdio_sync_queues(struct qdio_q *q)
 381{
 382        /* PCI capable outbound queues will also be scanned so sync them too */
 383        if (pci_out_supported(q))
 384                qdio_siga_sync_all(q);
 385        else
 386                qdio_siga_sync_q(q);
 387}
 388
 389int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 390                        unsigned char *state)
 391{
 392        if (need_siga_sync(q))
 393                qdio_siga_sync_q(q);
 394        return get_buf_states(q, bufnr, state, 1, 0, 0);
 395}
 396
 397static inline void qdio_stop_polling(struct qdio_q *q)
 398{
 399        if (!q->u.in.polling)
 400                return;
 401
 402        q->u.in.polling = 0;
 403        qperf_inc(q, stop_polling);
 404
 405        /* show the card that we are not polling anymore */
 406        if (is_qebsm(q)) {
 407                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 408                               q->u.in.ack_count);
 409                q->u.in.ack_count = 0;
 410        } else
 411                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 412}
 413
 414static inline void account_sbals(struct qdio_q *q, unsigned int count)
 415{
 416        int pos;
 417
 418        q->q_stats.nr_sbal_total += count;
 419        if (count == QDIO_MAX_BUFFERS_MASK) {
 420                q->q_stats.nr_sbals[7]++;
 421                return;
 422        }
 423        pos = ilog2(count);
 424        q->q_stats.nr_sbals[pos]++;
 425}
 426
 427static void process_buffer_error(struct qdio_q *q, int count)
 428{
 429        unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 430                                        SLSB_P_OUTPUT_NOT_INIT;
 431
 432        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 433
 434        /* special handling for no target buffer empty */
 435        if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
 436            q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
 437                qperf_inc(q, target_full);
 438                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 439                              q->first_to_check);
 440                goto set;
 441        }
 442
 443        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 444        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 445        DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 446        DBF_ERROR("F14:%2x F15:%2x",
 447                  q->sbal[q->first_to_check]->element[14].sflags,
 448                  q->sbal[q->first_to_check]->element[15].sflags);
 449
 450set:
 451        /*
 452         * Interrupts may be avoided as long as the error is present
 453         * so change the buffer state immediately to avoid starvation.
 454         */
 455        set_buf_states(q, q->first_to_check, state, count);
 456}
 457
 458static inline void inbound_primed(struct qdio_q *q, int count)
 459{
 460        int new;
 461
 462        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
 463
 464        /* for QEBSM the ACK was already set by EQBS */
 465        if (is_qebsm(q)) {
 466                if (!q->u.in.polling) {
 467                        q->u.in.polling = 1;
 468                        q->u.in.ack_count = count;
 469                        q->u.in.ack_start = q->first_to_check;
 470                        return;
 471                }
 472
 473                /* delete the previous ACK's */
 474                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 475                               q->u.in.ack_count);
 476                q->u.in.ack_count = count;
 477                q->u.in.ack_start = q->first_to_check;
 478                return;
 479        }
 480
 481        /*
 482         * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 483         * or by the next inbound run.
 484         */
 485        new = add_buf(q->first_to_check, count - 1);
 486        if (q->u.in.polling) {
 487                /* reset the previous ACK but first set the new one */
 488                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 489                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 490        } else {
 491                q->u.in.polling = 1;
 492                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 493        }
 494
 495        q->u.in.ack_start = new;
 496        count--;
 497        if (!count)
 498                return;
 499        /* need to change ALL buffers to get more interrupts */
 500        set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 501}
 502
 503static int get_inbound_buffer_frontier(struct qdio_q *q)
 504{
 505        int count, stop;
 506        unsigned char state = 0;
 507
 508        q->timestamp = get_tod_clock_fast();
 509
 510        /*
 511         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 512         * would return 0.
 513         */
 514        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 515        stop = add_buf(q->first_to_check, count);
 516
 517        if (q->first_to_check == stop)
 518                goto out;
 519
 520        /*
 521         * No siga sync here, as a PCI or we after a thin interrupt
 522         * already sync'ed the queues.
 523         */
 524        count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
 525        if (!count)
 526                goto out;
 527
 528        switch (state) {
 529        case SLSB_P_INPUT_PRIMED:
 530                inbound_primed(q, count);
 531                q->first_to_check = add_buf(q->first_to_check, count);
 532                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 533                        qperf_inc(q, inbound_queue_full);
 534                if (q->irq_ptr->perf_stat_enabled)
 535                        account_sbals(q, count);
 536                break;
 537        case SLSB_P_INPUT_ERROR:
 538                process_buffer_error(q, count);
 539                q->first_to_check = add_buf(q->first_to_check, count);
 540                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 541                        qperf_inc(q, inbound_queue_full);
 542                if (q->irq_ptr->perf_stat_enabled)
 543                        account_sbals_error(q, count);
 544                break;
 545        case SLSB_CU_INPUT_EMPTY:
 546        case SLSB_P_INPUT_NOT_INIT:
 547        case SLSB_P_INPUT_ACK:
 548                if (q->irq_ptr->perf_stat_enabled)
 549                        q->q_stats.nr_sbal_nop++;
 550                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
 551                        q->nr, q->first_to_check);
 552                break;
 553        default:
 554                WARN_ON_ONCE(1);
 555        }
 556out:
 557        return q->first_to_check;
 558}
 559
 560static int qdio_inbound_q_moved(struct qdio_q *q)
 561{
 562        int bufnr;
 563
 564        bufnr = get_inbound_buffer_frontier(q);
 565
 566        if (bufnr != q->last_move) {
 567                q->last_move = bufnr;
 568                if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 569                        q->u.in.timestamp = get_tod_clock();
 570                return 1;
 571        } else
 572                return 0;
 573}
 574
 575static inline int qdio_inbound_q_done(struct qdio_q *q)
 576{
 577        unsigned char state = 0;
 578
 579        if (!atomic_read(&q->nr_buf_used))
 580                return 1;
 581
 582        if (need_siga_sync(q))
 583                qdio_siga_sync_q(q);
 584        get_buf_state(q, q->first_to_check, &state, 0);
 585
 586        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 587                /* more work coming */
 588                return 0;
 589
 590        if (is_thinint_irq(q->irq_ptr))
 591                return 1;
 592
 593        /* don't poll under z/VM */
 594        if (MACHINE_IS_VM)
 595                return 1;
 596
 597        /*
 598         * At this point we know, that inbound first_to_check
 599         * has (probably) not moved (see qdio_inbound_processing).
 600         */
 601        if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 602                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 603                              q->first_to_check);
 604                return 1;
 605        } else
 606                return 0;
 607}
 608
 609static inline int contains_aobs(struct qdio_q *q)
 610{
 611        return !q->is_input_q && q->u.out.use_cq;
 612}
 613
 614static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 615{
 616        unsigned char state = 0;
 617        int j, b = start;
 618
 619        if (!contains_aobs(q))
 620                return;
 621
 622        for (j = 0; j < count; ++j) {
 623                get_buf_state(q, b, &state, 0);
 624                if (state == SLSB_P_OUTPUT_PENDING) {
 625                        struct qaob *aob = q->u.out.aobs[b];
 626                        if (aob == NULL)
 627                                continue;
 628
 629                        q->u.out.sbal_state[b].flags |=
 630                                QDIO_OUTBUF_STATE_FLAG_PENDING;
 631                        q->u.out.aobs[b] = NULL;
 632                } else if (state == SLSB_P_OUTPUT_EMPTY) {
 633                        q->u.out.sbal_state[b].aob = NULL;
 634                }
 635                b = next_buf(b);
 636        }
 637}
 638
 639static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 640                                        int bufnr)
 641{
 642        unsigned long phys_aob = 0;
 643
 644        if (!q->use_cq)
 645                goto out;
 646
 647        if (!q->aobs[bufnr]) {
 648                struct qaob *aob = qdio_allocate_aob();
 649                q->aobs[bufnr] = aob;
 650        }
 651        if (q->aobs[bufnr]) {
 652                q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 653                q->sbal_state[bufnr].aob = q->aobs[bufnr];
 654                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 655                phys_aob = virt_to_phys(q->aobs[bufnr]);
 656                WARN_ON_ONCE(phys_aob & 0xFF);
 657        }
 658
 659out:
 660        return phys_aob;
 661}
 662
 663static void qdio_kick_handler(struct qdio_q *q)
 664{
 665        int start = q->first_to_kick;
 666        int end = q->first_to_check;
 667        int count;
 668
 669        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 670                return;
 671
 672        count = sub_buf(end, start);
 673
 674        if (q->is_input_q) {
 675                qperf_inc(q, inbound_handler);
 676                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 677        } else {
 678                qperf_inc(q, outbound_handler);
 679                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 680                              start, count);
 681        }
 682
 683        qdio_handle_aobs(q, start, count);
 684
 685        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 686                   q->irq_ptr->int_parm);
 687
 688        /* for the next time */
 689        q->first_to_kick = end;
 690        q->qdio_error = 0;
 691}
 692
 693static inline int qdio_tasklet_schedule(struct qdio_q *q)
 694{
 695        if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
 696                tasklet_schedule(&q->tasklet);
 697                return 0;
 698        }
 699        return -EPERM;
 700}
 701
 702static void __qdio_inbound_processing(struct qdio_q *q)
 703{
 704        qperf_inc(q, tasklet_inbound);
 705
 706        if (!qdio_inbound_q_moved(q))
 707                return;
 708
 709        qdio_kick_handler(q);
 710
 711        if (!qdio_inbound_q_done(q)) {
 712                /* means poll time is not yet over */
 713                qperf_inc(q, tasklet_inbound_resched);
 714                if (!qdio_tasklet_schedule(q))
 715                        return;
 716        }
 717
 718        qdio_stop_polling(q);
 719        /*
 720         * We need to check again to not lose initiative after
 721         * resetting the ACK state.
 722         */
 723        if (!qdio_inbound_q_done(q)) {
 724                qperf_inc(q, tasklet_inbound_resched2);
 725                qdio_tasklet_schedule(q);
 726        }
 727}
 728
 729void qdio_inbound_processing(unsigned long data)
 730{
 731        struct qdio_q *q = (struct qdio_q *)data;
 732        __qdio_inbound_processing(q);
 733}
 734
 735static int get_outbound_buffer_frontier(struct qdio_q *q)
 736{
 737        int count, stop;
 738        unsigned char state = 0;
 739
 740        q->timestamp = get_tod_clock_fast();
 741
 742        if (need_siga_sync(q))
 743                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 744                    !pci_out_supported(q)) ||
 745                    (queue_type(q) == QDIO_IQDIO_QFMT &&
 746                    multicast_outbound(q)))
 747                        qdio_siga_sync_q(q);
 748
 749        /*
 750         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 751         * would return 0.
 752         */
 753        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 754        stop = add_buf(q->first_to_check, count);
 755        if (q->first_to_check == stop)
 756                goto out;
 757
 758        count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
 759        if (!count)
 760                goto out;
 761
 762        switch (state) {
 763        case SLSB_P_OUTPUT_EMPTY:
 764                /* the adapter got it */
 765                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 766                        "out empty:%1d %02x", q->nr, count);
 767
 768                atomic_sub(count, &q->nr_buf_used);
 769                q->first_to_check = add_buf(q->first_to_check, count);
 770                if (q->irq_ptr->perf_stat_enabled)
 771                        account_sbals(q, count);
 772
 773                break;
 774        case SLSB_P_OUTPUT_ERROR:
 775                process_buffer_error(q, count);
 776                q->first_to_check = add_buf(q->first_to_check, count);
 777                atomic_sub(count, &q->nr_buf_used);
 778                if (q->irq_ptr->perf_stat_enabled)
 779                        account_sbals_error(q, count);
 780                break;
 781        case SLSB_CU_OUTPUT_PRIMED:
 782                /* the adapter has not fetched the output yet */
 783                if (q->irq_ptr->perf_stat_enabled)
 784                        q->q_stats.nr_sbal_nop++;
 785                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 786                              q->nr);
 787                break;
 788        case SLSB_P_OUTPUT_NOT_INIT:
 789        case SLSB_P_OUTPUT_HALTED:
 790                break;
 791        default:
 792                WARN_ON_ONCE(1);
 793        }
 794
 795out:
 796        return q->first_to_check;
 797}
 798
 799/* all buffers processed? */
 800static inline int qdio_outbound_q_done(struct qdio_q *q)
 801{
 802        return atomic_read(&q->nr_buf_used) == 0;
 803}
 804
 805static inline int qdio_outbound_q_moved(struct qdio_q *q)
 806{
 807        int bufnr;
 808
 809        bufnr = get_outbound_buffer_frontier(q);
 810
 811        if (bufnr != q->last_move) {
 812                q->last_move = bufnr;
 813                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 814                return 1;
 815        } else
 816                return 0;
 817}
 818
 819static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 820{
 821        int retries = 0, cc;
 822        unsigned int busy_bit;
 823
 824        if (!need_siga_out(q))
 825                return 0;
 826
 827        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 828retry:
 829        qperf_inc(q, siga_write);
 830
 831        cc = qdio_siga_output(q, &busy_bit, aob);
 832        switch (cc) {
 833        case 0:
 834                break;
 835        case 2:
 836                if (busy_bit) {
 837                        while (++retries < QDIO_BUSY_BIT_RETRIES) {
 838                                mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 839                                goto retry;
 840                        }
 841                        DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 842                        cc = -EBUSY;
 843                } else {
 844                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 845                        cc = -ENOBUFS;
 846                }
 847                break;
 848        case 1:
 849        case 3:
 850                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 851                cc = -EIO;
 852                break;
 853        }
 854        if (retries) {
 855                DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 856                DBF_ERROR("count:%u", retries);
 857        }
 858        return cc;
 859}
 860
 861static void __qdio_outbound_processing(struct qdio_q *q)
 862{
 863        qperf_inc(q, tasklet_outbound);
 864        WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 865
 866        if (qdio_outbound_q_moved(q))
 867                qdio_kick_handler(q);
 868
 869        if (queue_type(q) == QDIO_ZFCP_QFMT)
 870                if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 871                        goto sched;
 872
 873        if (q->u.out.pci_out_enabled)
 874                return;
 875
 876        /*
 877         * Now we know that queue type is either qeth without pci enabled
 878         * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 879         * is noticed and outbound_handler is called after some time.
 880         */
 881        if (qdio_outbound_q_done(q))
 882                del_timer_sync(&q->u.out.timer);
 883        else
 884                if (!timer_pending(&q->u.out.timer) &&
 885                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
 886                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 887        return;
 888
 889sched:
 890        qdio_tasklet_schedule(q);
 891}
 892
 893/* outbound tasklet */
 894void qdio_outbound_processing(unsigned long data)
 895{
 896        struct qdio_q *q = (struct qdio_q *)data;
 897        __qdio_outbound_processing(q);
 898}
 899
 900void qdio_outbound_timer(struct timer_list *t)
 901{
 902        struct qdio_q *q = from_timer(q, t, u.out.timer);
 903
 904        qdio_tasklet_schedule(q);
 905}
 906
 907static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 908{
 909        struct qdio_q *out;
 910        int i;
 911
 912        if (!pci_out_supported(q))
 913                return;
 914
 915        for_each_output_queue(q->irq_ptr, out, i)
 916                if (!qdio_outbound_q_done(out))
 917                        qdio_tasklet_schedule(out);
 918}
 919
 920static void __tiqdio_inbound_processing(struct qdio_q *q)
 921{
 922        qperf_inc(q, tasklet_inbound);
 923        if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 924                qdio_sync_queues(q);
 925
 926        /*
 927         * The interrupt could be caused by a PCI request. Check the
 928         * PCI capable outbound queues.
 929         */
 930        qdio_check_outbound_after_thinint(q);
 931
 932        if (!qdio_inbound_q_moved(q))
 933                return;
 934
 935        qdio_kick_handler(q);
 936
 937        if (!qdio_inbound_q_done(q)) {
 938                qperf_inc(q, tasklet_inbound_resched);
 939                if (!qdio_tasklet_schedule(q))
 940                        return;
 941        }
 942
 943        qdio_stop_polling(q);
 944        /*
 945         * We need to check again to not lose initiative after
 946         * resetting the ACK state.
 947         */
 948        if (!qdio_inbound_q_done(q)) {
 949                qperf_inc(q, tasklet_inbound_resched2);
 950                qdio_tasklet_schedule(q);
 951        }
 952}
 953
 954void tiqdio_inbound_processing(unsigned long data)
 955{
 956        struct qdio_q *q = (struct qdio_q *)data;
 957        __tiqdio_inbound_processing(q);
 958}
 959
 960static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 961                                  enum qdio_irq_states state)
 962{
 963        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 964
 965        irq_ptr->state = state;
 966        mb();
 967}
 968
 969static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 970{
 971        if (irb->esw.esw0.erw.cons) {
 972                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 973                DBF_ERROR_HEX(irb, 64);
 974                DBF_ERROR_HEX(irb->ecw, 64);
 975        }
 976}
 977
 978/* PCI interrupt handler */
 979static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 980{
 981        int i;
 982        struct qdio_q *q;
 983
 984        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 985                return;
 986
 987        for_each_input_queue(irq_ptr, q, i) {
 988                if (q->u.in.queue_start_poll) {
 989                        /* skip if polling is enabled or already in work */
 990                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 991                                     &q->u.in.queue_irq_state)) {
 992                                qperf_inc(q, int_discarded);
 993                                continue;
 994                        }
 995                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 996                                                 q->irq_ptr->int_parm);
 997                } else {
 998                        tasklet_schedule(&q->tasklet);
 999                }
1000        }
1001
1002        if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
1003                return;
1004
1005        for_each_output_queue(irq_ptr, q, i) {
1006                if (qdio_outbound_q_done(q))
1007                        continue;
1008                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1009                        qdio_siga_sync_q(q);
1010                qdio_tasklet_schedule(q);
1011        }
1012}
1013
1014static void qdio_handle_activate_check(struct ccw_device *cdev,
1015                                unsigned long intparm, int cstat, int dstat)
1016{
1017        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1018        struct qdio_q *q;
1019        int count;
1020
1021        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1022        DBF_ERROR("intp :%lx", intparm);
1023        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1024
1025        if (irq_ptr->nr_input_qs) {
1026                q = irq_ptr->input_qs[0];
1027        } else if (irq_ptr->nr_output_qs) {
1028                q = irq_ptr->output_qs[0];
1029        } else {
1030                dump_stack();
1031                goto no_handler;
1032        }
1033
1034        count = sub_buf(q->first_to_check, q->first_to_kick);
1035        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1036                   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1037no_handler:
1038        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1039        /*
1040         * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1041         * Therefore we call the LGR detection function here.
1042         */
1043        lgr_info_log();
1044}
1045
1046static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1047                                      int dstat)
1048{
1049        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1050
1051        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1052
1053        if (cstat)
1054                goto error;
1055        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1056                goto error;
1057        if (!(dstat & DEV_STAT_DEV_END))
1058                goto error;
1059        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1060        return;
1061
1062error:
1063        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1064        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1065        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1066}
1067
1068/* qdio interrupt handler */
1069void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1070                      struct irb *irb)
1071{
1072        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1073        struct subchannel_id schid;
1074        int cstat, dstat;
1075
1076        if (!intparm || !irq_ptr) {
1077                ccw_device_get_schid(cdev, &schid);
1078                DBF_ERROR("qint:%4x", schid.sch_no);
1079                return;
1080        }
1081
1082        if (irq_ptr->perf_stat_enabled)
1083                irq_ptr->perf_stat.qdio_int++;
1084
1085        if (IS_ERR(irb)) {
1086                DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1087                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1088                wake_up(&cdev->private->wait_q);
1089                return;
1090        }
1091        qdio_irq_check_sense(irq_ptr, irb);
1092        cstat = irb->scsw.cmd.cstat;
1093        dstat = irb->scsw.cmd.dstat;
1094
1095        switch (irq_ptr->state) {
1096        case QDIO_IRQ_STATE_INACTIVE:
1097                qdio_establish_handle_irq(cdev, cstat, dstat);
1098                break;
1099        case QDIO_IRQ_STATE_CLEANUP:
1100                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1101                break;
1102        case QDIO_IRQ_STATE_ESTABLISHED:
1103        case QDIO_IRQ_STATE_ACTIVE:
1104                if (cstat & SCHN_STAT_PCI) {
1105                        qdio_int_handler_pci(irq_ptr);
1106                        return;
1107                }
1108                if (cstat || dstat)
1109                        qdio_handle_activate_check(cdev, intparm, cstat,
1110                                                   dstat);
1111                break;
1112        case QDIO_IRQ_STATE_STOPPED:
1113                break;
1114        default:
1115                WARN_ON_ONCE(1);
1116        }
1117        wake_up(&cdev->private->wait_q);
1118}
1119
1120/**
1121 * qdio_get_ssqd_desc - get qdio subchannel description
1122 * @cdev: ccw device to get description for
1123 * @data: where to store the ssqd
1124 *
1125 * Returns 0 or an error code. The results of the chsc are stored in the
1126 * specified structure.
1127 */
1128int qdio_get_ssqd_desc(struct ccw_device *cdev,
1129                       struct qdio_ssqd_desc *data)
1130{
1131        struct subchannel_id schid;
1132
1133        if (!cdev || !cdev->private)
1134                return -EINVAL;
1135
1136        ccw_device_get_schid(cdev, &schid);
1137        DBF_EVENT("get ssqd:%4x", schid.sch_no);
1138        return qdio_setup_get_ssqd(NULL, &schid, data);
1139}
1140EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1141
1142static void qdio_shutdown_queues(struct ccw_device *cdev)
1143{
1144        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1145        struct qdio_q *q;
1146        int i;
1147
1148        for_each_input_queue(irq_ptr, q, i)
1149                tasklet_kill(&q->tasklet);
1150
1151        for_each_output_queue(irq_ptr, q, i) {
1152                del_timer_sync(&q->u.out.timer);
1153                tasklet_kill(&q->tasklet);
1154        }
1155}
1156
1157/**
1158 * qdio_shutdown - shut down a qdio subchannel
1159 * @cdev: associated ccw device
1160 * @how: use halt or clear to shutdown
1161 */
1162int qdio_shutdown(struct ccw_device *cdev, int how)
1163{
1164        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1165        struct subchannel_id schid;
1166        int rc;
1167
1168        if (!irq_ptr)
1169                return -ENODEV;
1170
1171        WARN_ON_ONCE(irqs_disabled());
1172        ccw_device_get_schid(cdev, &schid);
1173        DBF_EVENT("qshutdown:%4x", schid.sch_no);
1174
1175        mutex_lock(&irq_ptr->setup_mutex);
1176        /*
1177         * Subchannel was already shot down. We cannot prevent being called
1178         * twice since cio may trigger a shutdown asynchronously.
1179         */
1180        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1181                mutex_unlock(&irq_ptr->setup_mutex);
1182                return 0;
1183        }
1184
1185        /*
1186         * Indicate that the device is going down. Scheduling the queue
1187         * tasklets is forbidden from here on.
1188         */
1189        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1190
1191        tiqdio_remove_input_queues(irq_ptr);
1192        qdio_shutdown_queues(cdev);
1193        qdio_shutdown_debug_entries(irq_ptr);
1194
1195        /* cleanup subchannel */
1196        spin_lock_irq(get_ccwdev_lock(cdev));
1197
1198        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1199                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1200        else
1201                /* default behaviour is halt */
1202                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1203        if (rc) {
1204                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1205                DBF_ERROR("rc:%4d", rc);
1206                goto no_cleanup;
1207        }
1208
1209        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1210        spin_unlock_irq(get_ccwdev_lock(cdev));
1211        wait_event_interruptible_timeout(cdev->private->wait_q,
1212                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1213                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1214                10 * HZ);
1215        spin_lock_irq(get_ccwdev_lock(cdev));
1216
1217no_cleanup:
1218        qdio_shutdown_thinint(irq_ptr);
1219
1220        /* restore interrupt handler */
1221        if ((void *)cdev->handler == (void *)qdio_int_handler)
1222                cdev->handler = irq_ptr->orig_handler;
1223        spin_unlock_irq(get_ccwdev_lock(cdev));
1224
1225        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1226        mutex_unlock(&irq_ptr->setup_mutex);
1227        if (rc)
1228                return rc;
1229        return 0;
1230}
1231EXPORT_SYMBOL_GPL(qdio_shutdown);
1232
1233/**
1234 * qdio_free - free data structures for a qdio subchannel
1235 * @cdev: associated ccw device
1236 */
1237int qdio_free(struct ccw_device *cdev)
1238{
1239        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1240        struct subchannel_id schid;
1241
1242        if (!irq_ptr)
1243                return -ENODEV;
1244
1245        ccw_device_get_schid(cdev, &schid);
1246        DBF_EVENT("qfree:%4x", schid.sch_no);
1247        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1248        mutex_lock(&irq_ptr->setup_mutex);
1249
1250        irq_ptr->debug_area = NULL;
1251        cdev->private->qdio_data = NULL;
1252        mutex_unlock(&irq_ptr->setup_mutex);
1253
1254        qdio_release_memory(irq_ptr);
1255        return 0;
1256}
1257EXPORT_SYMBOL_GPL(qdio_free);
1258
1259/**
1260 * qdio_allocate - allocate qdio queues and associated data
1261 * @init_data: initialization data
1262 */
1263int qdio_allocate(struct qdio_initialize *init_data)
1264{
1265        struct subchannel_id schid;
1266        struct qdio_irq *irq_ptr;
1267
1268        ccw_device_get_schid(init_data->cdev, &schid);
1269        DBF_EVENT("qallocate:%4x", schid.sch_no);
1270
1271        if ((init_data->no_input_qs && !init_data->input_handler) ||
1272            (init_data->no_output_qs && !init_data->output_handler))
1273                return -EINVAL;
1274
1275        if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1276            (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1277                return -EINVAL;
1278
1279        if ((!init_data->input_sbal_addr_array) ||
1280            (!init_data->output_sbal_addr_array))
1281                return -EINVAL;
1282
1283        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1284        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1285        if (!irq_ptr)
1286                goto out_err;
1287
1288        mutex_init(&irq_ptr->setup_mutex);
1289        if (qdio_allocate_dbf(init_data, irq_ptr))
1290                goto out_rel;
1291
1292        /*
1293         * Allocate a page for the chsc calls in qdio_establish.
1294         * Must be pre-allocated since a zfcp recovery will call
1295         * qdio_establish. In case of low memory and swap on a zfcp disk
1296         * we may not be able to allocate memory otherwise.
1297         */
1298        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1299        if (!irq_ptr->chsc_page)
1300                goto out_rel;
1301
1302        /* qdr is used in ccw1.cda which is u32 */
1303        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1304        if (!irq_ptr->qdr)
1305                goto out_rel;
1306
1307        if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1308                             init_data->no_output_qs))
1309                goto out_rel;
1310
1311        init_data->cdev->private->qdio_data = irq_ptr;
1312        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1313        return 0;
1314out_rel:
1315        qdio_release_memory(irq_ptr);
1316out_err:
1317        return -ENOMEM;
1318}
1319EXPORT_SYMBOL_GPL(qdio_allocate);
1320
1321static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1322{
1323        struct qdio_q *q = irq_ptr->input_qs[0];
1324        int i, use_cq = 0;
1325
1326        if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1327                use_cq = 1;
1328
1329        for_each_output_queue(irq_ptr, q, i) {
1330                if (use_cq) {
1331                        if (qdio_enable_async_operation(&q->u.out) < 0) {
1332                                use_cq = 0;
1333                                continue;
1334                        }
1335                } else
1336                        qdio_disable_async_operation(&q->u.out);
1337        }
1338        DBF_EVENT("use_cq:%d", use_cq);
1339}
1340
1341/**
1342 * qdio_establish - establish queues on a qdio subchannel
1343 * @init_data: initialization data
1344 */
1345int qdio_establish(struct qdio_initialize *init_data)
1346{
1347        struct ccw_device *cdev = init_data->cdev;
1348        struct subchannel_id schid;
1349        struct qdio_irq *irq_ptr;
1350        int rc;
1351
1352        ccw_device_get_schid(cdev, &schid);
1353        DBF_EVENT("qestablish:%4x", schid.sch_no);
1354
1355        irq_ptr = cdev->private->qdio_data;
1356        if (!irq_ptr)
1357                return -ENODEV;
1358
1359        mutex_lock(&irq_ptr->setup_mutex);
1360        qdio_setup_irq(init_data);
1361
1362        rc = qdio_establish_thinint(irq_ptr);
1363        if (rc) {
1364                mutex_unlock(&irq_ptr->setup_mutex);
1365                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1366                return rc;
1367        }
1368
1369        /* establish q */
1370        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1371        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1372        irq_ptr->ccw.count = irq_ptr->equeue.count;
1373        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1374
1375        spin_lock_irq(get_ccwdev_lock(cdev));
1376        ccw_device_set_options_mask(cdev, 0);
1377
1378        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1379        spin_unlock_irq(get_ccwdev_lock(cdev));
1380        if (rc) {
1381                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1382                DBF_ERROR("rc:%4x", rc);
1383                mutex_unlock(&irq_ptr->setup_mutex);
1384                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1385                return rc;
1386        }
1387
1388        wait_event_interruptible_timeout(cdev->private->wait_q,
1389                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1390                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1391
1392        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1393                mutex_unlock(&irq_ptr->setup_mutex);
1394                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1395                return -EIO;
1396        }
1397
1398        qdio_setup_ssqd_info(irq_ptr);
1399
1400        qdio_detect_hsicq(irq_ptr);
1401
1402        /* qebsm is now setup if available, initialize buffer states */
1403        qdio_init_buf_states(irq_ptr);
1404
1405        mutex_unlock(&irq_ptr->setup_mutex);
1406        qdio_print_subchannel_info(irq_ptr, cdev);
1407        qdio_setup_debug_entries(irq_ptr, cdev);
1408        return 0;
1409}
1410EXPORT_SYMBOL_GPL(qdio_establish);
1411
1412/**
1413 * qdio_activate - activate queues on a qdio subchannel
1414 * @cdev: associated cdev
1415 */
1416int qdio_activate(struct ccw_device *cdev)
1417{
1418        struct subchannel_id schid;
1419        struct qdio_irq *irq_ptr;
1420        int rc;
1421
1422        ccw_device_get_schid(cdev, &schid);
1423        DBF_EVENT("qactivate:%4x", schid.sch_no);
1424
1425        irq_ptr = cdev->private->qdio_data;
1426        if (!irq_ptr)
1427                return -ENODEV;
1428
1429        mutex_lock(&irq_ptr->setup_mutex);
1430        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1431                rc = -EBUSY;
1432                goto out;
1433        }
1434
1435        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1436        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1437        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1438        irq_ptr->ccw.cda = 0;
1439
1440        spin_lock_irq(get_ccwdev_lock(cdev));
1441        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1442
1443        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1444                              0, DOIO_DENY_PREFETCH);
1445        spin_unlock_irq(get_ccwdev_lock(cdev));
1446        if (rc) {
1447                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1448                DBF_ERROR("rc:%4x", rc);
1449                goto out;
1450        }
1451
1452        if (is_thinint_irq(irq_ptr))
1453                tiqdio_add_input_queues(irq_ptr);
1454
1455        /* wait for subchannel to become active */
1456        msleep(5);
1457
1458        switch (irq_ptr->state) {
1459        case QDIO_IRQ_STATE_STOPPED:
1460        case QDIO_IRQ_STATE_ERR:
1461                rc = -EIO;
1462                break;
1463        default:
1464                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1465                rc = 0;
1466        }
1467out:
1468        mutex_unlock(&irq_ptr->setup_mutex);
1469        return rc;
1470}
1471EXPORT_SYMBOL_GPL(qdio_activate);
1472
1473static inline int buf_in_between(int bufnr, int start, int count)
1474{
1475        int end = add_buf(start, count);
1476
1477        if (end > start) {
1478                if (bufnr >= start && bufnr < end)
1479                        return 1;
1480                else
1481                        return 0;
1482        }
1483
1484        /* wrap-around case */
1485        if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1486            (bufnr < end))
1487                return 1;
1488        else
1489                return 0;
1490}
1491
1492/**
1493 * handle_inbound - reset processed input buffers
1494 * @q: queue containing the buffers
1495 * @callflags: flags
1496 * @bufnr: first buffer to process
1497 * @count: how many buffers are emptied
1498 */
1499static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1500                          int bufnr, int count)
1501{
1502        int diff;
1503
1504        qperf_inc(q, inbound_call);
1505
1506        if (!q->u.in.polling)
1507                goto set;
1508
1509        /* protect against stop polling setting an ACK for an emptied slsb */
1510        if (count == QDIO_MAX_BUFFERS_PER_Q) {
1511                /* overwriting everything, just delete polling status */
1512                q->u.in.polling = 0;
1513                q->u.in.ack_count = 0;
1514                goto set;
1515        } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1516                if (is_qebsm(q)) {
1517                        /* partial overwrite, just update ack_start */
1518                        diff = add_buf(bufnr, count);
1519                        diff = sub_buf(diff, q->u.in.ack_start);
1520                        q->u.in.ack_count -= diff;
1521                        if (q->u.in.ack_count <= 0) {
1522                                q->u.in.polling = 0;
1523                                q->u.in.ack_count = 0;
1524                                goto set;
1525                        }
1526                        q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1527                }
1528                else
1529                        /* the only ACK will be deleted, so stop polling */
1530                        q->u.in.polling = 0;
1531        }
1532
1533set:
1534        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1535        atomic_add(count, &q->nr_buf_used);
1536
1537        if (need_siga_in(q))
1538                return qdio_siga_input(q);
1539
1540        return 0;
1541}
1542
1543/**
1544 * handle_outbound - process filled outbound buffers
1545 * @q: queue containing the buffers
1546 * @callflags: flags
1547 * @bufnr: first buffer to process
1548 * @count: how many buffers are filled
1549 */
1550static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1551                           int bufnr, int count)
1552{
1553        unsigned char state = 0;
1554        int used, rc = 0;
1555
1556        qperf_inc(q, outbound_call);
1557
1558        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1559        used = atomic_add_return(count, &q->nr_buf_used);
1560
1561        if (used == QDIO_MAX_BUFFERS_PER_Q)
1562                qperf_inc(q, outbound_queue_full);
1563
1564        if (callflags & QDIO_FLAG_PCI_OUT) {
1565                q->u.out.pci_out_enabled = 1;
1566                qperf_inc(q, pci_request_int);
1567        } else
1568                q->u.out.pci_out_enabled = 0;
1569
1570        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1571                unsigned long phys_aob = 0;
1572
1573                /* One SIGA-W per buffer required for unicast HSI */
1574                WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1575
1576                phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1577
1578                rc = qdio_kick_outbound_q(q, phys_aob);
1579        } else if (need_siga_sync(q)) {
1580                rc = qdio_siga_sync_q(q);
1581        } else {
1582                /* try to fast requeue buffers */
1583                get_buf_state(q, prev_buf(bufnr), &state, 0);
1584                if (state != SLSB_CU_OUTPUT_PRIMED)
1585                        rc = qdio_kick_outbound_q(q, 0);
1586                else
1587                        qperf_inc(q, fast_requeue);
1588        }
1589
1590        /* in case of SIGA errors we must process the error immediately */
1591        if (used >= q->u.out.scan_threshold || rc)
1592                qdio_tasklet_schedule(q);
1593        else
1594                /* free the SBALs in case of no further traffic */
1595                if (!timer_pending(&q->u.out.timer) &&
1596                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1597                        mod_timer(&q->u.out.timer, jiffies + HZ);
1598        return rc;
1599}
1600
1601/**
1602 * do_QDIO - process input or output buffers
1603 * @cdev: associated ccw_device for the qdio subchannel
1604 * @callflags: input or output and special flags from the program
1605 * @q_nr: queue number
1606 * @bufnr: buffer number
1607 * @count: how many buffers to process
1608 */
1609int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1610            int q_nr, unsigned int bufnr, unsigned int count)
1611{
1612        struct qdio_irq *irq_ptr;
1613
1614        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1615                return -EINVAL;
1616
1617        irq_ptr = cdev->private->qdio_data;
1618        if (!irq_ptr)
1619                return -ENODEV;
1620
1621        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1622                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1623
1624        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1625                return -EIO;
1626        if (!count)
1627                return 0;
1628        if (callflags & QDIO_FLAG_SYNC_INPUT)
1629                return handle_inbound(irq_ptr->input_qs[q_nr],
1630                                      callflags, bufnr, count);
1631        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1632                return handle_outbound(irq_ptr->output_qs[q_nr],
1633                                       callflags, bufnr, count);
1634        return -EINVAL;
1635}
1636EXPORT_SYMBOL_GPL(do_QDIO);
1637
1638/**
1639 * qdio_start_irq - process input buffers
1640 * @cdev: associated ccw_device for the qdio subchannel
1641 * @nr: input queue number
1642 *
1643 * Return codes
1644 *   0 - success
1645 *   1 - irqs not started since new data is available
1646 */
1647int qdio_start_irq(struct ccw_device *cdev, int nr)
1648{
1649        struct qdio_q *q;
1650        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1651
1652        if (!irq_ptr)
1653                return -ENODEV;
1654        q = irq_ptr->input_qs[nr];
1655
1656        clear_nonshared_ind(irq_ptr);
1657        qdio_stop_polling(q);
1658        clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1659
1660        /*
1661         * We need to check again to not lose initiative after
1662         * resetting the ACK state.
1663         */
1664        if (test_nonshared_ind(irq_ptr))
1665                goto rescan;
1666        if (!qdio_inbound_q_done(q))
1667                goto rescan;
1668        return 0;
1669
1670rescan:
1671        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1672                             &q->u.in.queue_irq_state))
1673                return 0;
1674        else
1675                return 1;
1676
1677}
1678EXPORT_SYMBOL(qdio_start_irq);
1679
1680/**
1681 * qdio_get_next_buffers - process input buffers
1682 * @cdev: associated ccw_device for the qdio subchannel
1683 * @nr: input queue number
1684 * @bufnr: first filled buffer number
1685 * @error: buffers are in error state
1686 *
1687 * Return codes
1688 *   < 0 - error
1689 *   = 0 - no new buffers found
1690 *   > 0 - number of processed buffers
1691 */
1692int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1693                          int *error)
1694{
1695        struct qdio_q *q;
1696        int start, end;
1697        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1698
1699        if (!irq_ptr)
1700                return -ENODEV;
1701        q = irq_ptr->input_qs[nr];
1702
1703        /*
1704         * Cannot rely on automatic sync after interrupt since queues may
1705         * also be examined without interrupt.
1706         */
1707        if (need_siga_sync(q))
1708                qdio_sync_queues(q);
1709
1710        /* check the PCI capable outbound queues. */
1711        qdio_check_outbound_after_thinint(q);
1712
1713        if (!qdio_inbound_q_moved(q))
1714                return 0;
1715
1716        /* Note: upper-layer MUST stop processing immediately here ... */
1717        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1718                return -EIO;
1719
1720        start = q->first_to_kick;
1721        end = q->first_to_check;
1722        *bufnr = start;
1723        *error = q->qdio_error;
1724
1725        /* for the next time */
1726        q->first_to_kick = end;
1727        q->qdio_error = 0;
1728        return sub_buf(end, start);
1729}
1730EXPORT_SYMBOL(qdio_get_next_buffers);
1731
1732/**
1733 * qdio_stop_irq - disable interrupt processing for the device
1734 * @cdev: associated ccw_device for the qdio subchannel
1735 * @nr: input queue number
1736 *
1737 * Return codes
1738 *   0 - interrupts were already disabled
1739 *   1 - interrupts successfully disabled
1740 */
1741int qdio_stop_irq(struct ccw_device *cdev, int nr)
1742{
1743        struct qdio_q *q;
1744        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1745
1746        if (!irq_ptr)
1747                return -ENODEV;
1748        q = irq_ptr->input_qs[nr];
1749
1750        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1751                             &q->u.in.queue_irq_state))
1752                return 0;
1753        else
1754                return 1;
1755}
1756EXPORT_SYMBOL(qdio_stop_irq);
1757
1758/**
1759 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1760 * @schid:              Subchannel ID.
1761 * @cnc:                Boolean Change-Notification Control
1762 * @response:           Response code will be stored at this address
1763 * @cb:                 Callback function will be executed for each element
1764 *                      of the address list
1765 * @priv:               Pointer to pass to the callback function.
1766 *
1767 * Performs "Store-network-bridging-information list" operation and calls
1768 * the callback function for every entry in the list. If "change-
1769 * notification-control" is set, further changes in the address list
1770 * will be reported via the IPA command.
1771 */
1772int qdio_pnso_brinfo(struct subchannel_id schid,
1773                int cnc, u16 *response,
1774                void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1775                                void *entry),
1776                void *priv)
1777{
1778        struct chsc_pnso_area *rr;
1779        int rc;
1780        u32 prev_instance = 0;
1781        int isfirstblock = 1;
1782        int i, size, elems;
1783
1784        rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1785        if (rr == NULL)
1786                return -ENOMEM;
1787        do {
1788                /* on the first iteration, naihdr.resume_token will be zero */
1789                rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1790                if (rc != 0 && rc != -EBUSY)
1791                        goto out;
1792                if (rr->response.code != 1) {
1793                        rc = -EIO;
1794                        continue;
1795                } else
1796                        rc = 0;
1797
1798                if (cb == NULL)
1799                        continue;
1800
1801                size = rr->naihdr.naids;
1802                elems = (rr->response.length -
1803                                sizeof(struct chsc_header) -
1804                                sizeof(struct chsc_brinfo_naihdr)) /
1805                                size;
1806
1807                if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1808                        /* Inform the caller that they need to scrap */
1809                        /* the data that was already reported via cb */
1810                                rc = -EAGAIN;
1811                                break;
1812                }
1813                isfirstblock = 0;
1814                prev_instance = rr->naihdr.instance;
1815                for (i = 0; i < elems; i++)
1816                        switch (size) {
1817                        case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1818                                (*cb)(priv, l3_ipv6_addr,
1819                                                &rr->entries.l3_ipv6[i]);
1820                                break;
1821                        case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1822                                (*cb)(priv, l3_ipv4_addr,
1823                                                &rr->entries.l3_ipv4[i]);
1824                                break;
1825                        case sizeof(struct qdio_brinfo_entry_l2):
1826                                (*cb)(priv, l2_addr_lnid,
1827                                                &rr->entries.l2[i]);
1828                                break;
1829                        default:
1830                                WARN_ON_ONCE(1);
1831                                rc = -EIO;
1832                                goto out;
1833                        }
1834        } while (rr->response.code == 0x0107 ||  /* channel busy */
1835                  (rr->response.code == 1 && /* list stored */
1836                   /* resume token is non-zero => list incomplete */
1837                   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1838        (*response) = rr->response.code;
1839
1840out:
1841        free_page((unsigned long)rr);
1842        return rc;
1843}
1844EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1845
1846static int __init init_QDIO(void)
1847{
1848        int rc;
1849
1850        rc = qdio_debug_init();
1851        if (rc)
1852                return rc;
1853        rc = qdio_setup_init();
1854        if (rc)
1855                goto out_debug;
1856        rc = tiqdio_allocate_memory();
1857        if (rc)
1858                goto out_cache;
1859        rc = tiqdio_register_thinints();
1860        if (rc)
1861                goto out_ti;
1862        return 0;
1863
1864out_ti:
1865        tiqdio_free_memory();
1866out_cache:
1867        qdio_setup_exit();
1868out_debug:
1869        qdio_debug_exit();
1870        return rc;
1871}
1872
1873static void __exit exit_QDIO(void)
1874{
1875        tiqdio_unregister_thinints();
1876        tiqdio_free_memory();
1877        qdio_setup_exit();
1878        qdio_debug_exit();
1879}
1880
1881module_init(init_QDIO);
1882module_exit(exit_QDIO);
1883