linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1/*
   2 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   3 *
   4 * Copyright IBM Corp. 2000, 2008
   5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   6 *            Jan Glauber <jang@linux.vnet.ibm.com>
   7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   8 */
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/timer.h>
  13#include <linux/delay.h>
  14#include <linux/gfp.h>
  15#include <linux/io.h>
  16#include <linux/atomic.h>
  17#include <asm/debug.h>
  18#include <asm/qdio.h>
  19#include <asm/ipl.h>
  20
  21#include "cio.h"
  22#include "css.h"
  23#include "device.h"
  24#include "qdio.h"
  25#include "qdio_debug.h"
  26
  27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  28        "Jan Glauber <jang@linux.vnet.ibm.com>");
  29MODULE_DESCRIPTION("QDIO base support");
  30MODULE_LICENSE("GPL");
  31
  32static inline int do_siga_sync(unsigned long schid,
  33                               unsigned int out_mask, unsigned int in_mask,
  34                               unsigned int fc)
  35{
  36        register unsigned long __fc asm ("0") = fc;
  37        register unsigned long __schid asm ("1") = schid;
  38        register unsigned long out asm ("2") = out_mask;
  39        register unsigned long in asm ("3") = in_mask;
  40        int cc;
  41
  42        asm volatile(
  43                "       siga    0\n"
  44                "       ipm     %0\n"
  45                "       srl     %0,28\n"
  46                : "=d" (cc)
  47                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  48        return cc;
  49}
  50
  51static inline int do_siga_input(unsigned long schid, unsigned int mask,
  52                                unsigned int fc)
  53{
  54        register unsigned long __fc asm ("0") = fc;
  55        register unsigned long __schid asm ("1") = schid;
  56        register unsigned long __mask asm ("2") = mask;
  57        int cc;
  58
  59        asm volatile(
  60                "       siga    0\n"
  61                "       ipm     %0\n"
  62                "       srl     %0,28\n"
  63                : "=d" (cc)
  64                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  65        return cc;
  66}
  67
  68/**
  69 * do_siga_output - perform SIGA-w/wt function
  70 * @schid: subchannel id or in case of QEBSM the subchannel token
  71 * @mask: which output queues to process
  72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  73 * @fc: function code to perform
  74 *
  75 * Returns condition code.
  76 * Note: For IQDC unicast queues only the highest priority queue is processed.
  77 */
  78static inline int do_siga_output(unsigned long schid, unsigned long mask,
  79                                 unsigned int *bb, unsigned int fc,
  80                                 unsigned long aob)
  81{
  82        register unsigned long __fc asm("0") = fc;
  83        register unsigned long __schid asm("1") = schid;
  84        register unsigned long __mask asm("2") = mask;
  85        register unsigned long __aob asm("3") = aob;
  86        int cc;
  87
  88        asm volatile(
  89                "       siga    0\n"
  90                "       ipm     %0\n"
  91                "       srl     %0,28\n"
  92                : "=d" (cc), "+d" (__fc), "+d" (__aob)
  93                : "d" (__schid), "d" (__mask)
  94                : "cc");
  95        *bb = __fc >> 31;
  96        return cc;
  97}
  98
  99static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 100{
 101        /* all done or next buffer state different */
 102        if (ccq == 0 || ccq == 32)
 103                return 0;
 104        /* no buffer processed */
 105        if (ccq == 97)
 106                return 1;
 107        /* not all buffers processed */
 108        if (ccq == 96)
 109                return 2;
 110        /* notify devices immediately */
 111        DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 112        return -EIO;
 113}
 114
 115/**
 116 * qdio_do_eqbs - extract buffer states for QEBSM
 117 * @q: queue to manipulate
 118 * @state: state of the extracted buffers
 119 * @start: buffer number to start at
 120 * @count: count of buffers to examine
 121 * @auto_ack: automatically acknowledge buffers
 122 *
 123 * Returns the number of successfully extracted equal buffer states.
 124 * Stops processing if a state is different from the last buffers state.
 125 */
 126static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 127                        int start, int count, int auto_ack)
 128{
 129        int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
 130        unsigned int ccq = 0;
 131
 132        qperf_inc(q, eqbs);
 133
 134        if (!q->is_input_q)
 135                nr += q->irq_ptr->nr_input_qs;
 136again:
 137        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 138                      auto_ack);
 139        rc = qdio_check_ccq(q, ccq);
 140        if (!rc)
 141                return count - tmp_count;
 142
 143        if (rc == 1) {
 144                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 145                goto again;
 146        }
 147
 148        if (rc == 2) {
 149                qperf_inc(q, eqbs_partial);
 150                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 151                        tmp_count);
 152                /*
 153                 * Retry once, if that fails bail out and process the
 154                 * extracted buffers before trying again.
 155                 */
 156                if (!retried++)
 157                        goto again;
 158                else
 159                        return count - tmp_count;
 160        }
 161
 162        DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 163        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 164        q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
 165                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 166        return 0;
 167}
 168
 169/**
 170 * qdio_do_sqbs - set buffer states for QEBSM
 171 * @q: queue to manipulate
 172 * @state: new state of the buffers
 173 * @start: first buffer number to change
 174 * @count: how many buffers to change
 175 *
 176 * Returns the number of successfully changed buffers.
 177 * Does retrying until the specified count of buffer states is set or an
 178 * error occurs.
 179 */
 180static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 181                        int count)
 182{
 183        unsigned int ccq = 0;
 184        int tmp_count = count, tmp_start = start;
 185        int nr = q->nr;
 186        int rc;
 187
 188        if (!count)
 189                return 0;
 190        qperf_inc(q, sqbs);
 191
 192        if (!q->is_input_q)
 193                nr += q->irq_ptr->nr_input_qs;
 194again:
 195        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 196        rc = qdio_check_ccq(q, ccq);
 197        if (!rc) {
 198                WARN_ON_ONCE(tmp_count);
 199                return count - tmp_count;
 200        }
 201
 202        if (rc == 1 || rc == 2) {
 203                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 204                qperf_inc(q, sqbs_partial);
 205                goto again;
 206        }
 207
 208        DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 209        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 210        q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
 211                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 212        return 0;
 213}
 214
 215/* returns number of examined buffers and their common state in *state */
 216static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 217                                 unsigned char *state, unsigned int count,
 218                                 int auto_ack, int merge_pending)
 219{
 220        unsigned char __state = 0;
 221        int i;
 222
 223        if (is_qebsm(q))
 224                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 225
 226        for (i = 0; i < count; i++) {
 227                if (!__state) {
 228                        __state = q->slsb.val[bufnr];
 229                        if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 230                                __state = SLSB_P_OUTPUT_EMPTY;
 231                } else if (merge_pending) {
 232                        if ((q->slsb.val[bufnr] & __state) != __state)
 233                                break;
 234                } else if (q->slsb.val[bufnr] != __state)
 235                        break;
 236                bufnr = next_buf(bufnr);
 237        }
 238        *state = __state;
 239        return i;
 240}
 241
 242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 243                                unsigned char *state, int auto_ack)
 244{
 245        return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 246}
 247
 248/* wrap-around safe setting of slsb states, returns number of changed buffers */
 249static inline int set_buf_states(struct qdio_q *q, int bufnr,
 250                                 unsigned char state, int count)
 251{
 252        int i;
 253
 254        if (is_qebsm(q))
 255                return qdio_do_sqbs(q, state, bufnr, count);
 256
 257        for (i = 0; i < count; i++) {
 258                xchg(&q->slsb.val[bufnr], state);
 259                bufnr = next_buf(bufnr);
 260        }
 261        return count;
 262}
 263
 264static inline int set_buf_state(struct qdio_q *q, int bufnr,
 265                                unsigned char state)
 266{
 267        return set_buf_states(q, bufnr, state, 1);
 268}
 269
 270/* set slsb states to initial state */
 271static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 272{
 273        struct qdio_q *q;
 274        int i;
 275
 276        for_each_input_queue(irq_ptr, q, i)
 277                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 278                               QDIO_MAX_BUFFERS_PER_Q);
 279        for_each_output_queue(irq_ptr, q, i)
 280                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 281                               QDIO_MAX_BUFFERS_PER_Q);
 282}
 283
 284static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 285                          unsigned int input)
 286{
 287        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 288        unsigned int fc = QDIO_SIGA_SYNC;
 289        int cc;
 290
 291        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 292        qperf_inc(q, siga_sync);
 293
 294        if (is_qebsm(q)) {
 295                schid = q->irq_ptr->sch_token;
 296                fc |= QDIO_SIGA_QEBSM_FLAG;
 297        }
 298
 299        cc = do_siga_sync(schid, output, input, fc);
 300        if (unlikely(cc))
 301                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 302        return (cc) ? -EIO : 0;
 303}
 304
 305static inline int qdio_siga_sync_q(struct qdio_q *q)
 306{
 307        if (q->is_input_q)
 308                return qdio_siga_sync(q, 0, q->mask);
 309        else
 310                return qdio_siga_sync(q, q->mask, 0);
 311}
 312
 313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 314        unsigned long aob)
 315{
 316        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 317        unsigned int fc = QDIO_SIGA_WRITE;
 318        u64 start_time = 0;
 319        int retries = 0, cc;
 320        unsigned long laob = 0;
 321
 322        if (q->u.out.use_cq && aob != 0) {
 323                fc = QDIO_SIGA_WRITEQ;
 324                laob = aob;
 325        }
 326
 327        if (is_qebsm(q)) {
 328                schid = q->irq_ptr->sch_token;
 329                fc |= QDIO_SIGA_QEBSM_FLAG;
 330        }
 331again:
 332        WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
 333                (aob && fc != QDIO_SIGA_WRITEQ));
 334        cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 335
 336        /* hipersocket busy condition */
 337        if (unlikely(*busy_bit)) {
 338                retries++;
 339
 340                if (!start_time) {
 341                        start_time = get_tod_clock();
 342                        goto again;
 343                }
 344                if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
 345                        goto again;
 346        }
 347        if (retries) {
 348                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 349                              "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 350                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 351        }
 352        return cc;
 353}
 354
 355static inline int qdio_siga_input(struct qdio_q *q)
 356{
 357        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 358        unsigned int fc = QDIO_SIGA_READ;
 359        int cc;
 360
 361        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 362        qperf_inc(q, siga_read);
 363
 364        if (is_qebsm(q)) {
 365                schid = q->irq_ptr->sch_token;
 366                fc |= QDIO_SIGA_QEBSM_FLAG;
 367        }
 368
 369        cc = do_siga_input(schid, q->mask, fc);
 370        if (unlikely(cc))
 371                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 372        return (cc) ? -EIO : 0;
 373}
 374
 375#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 376#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 377
 378static inline void qdio_sync_queues(struct qdio_q *q)
 379{
 380        /* PCI capable outbound queues will also be scanned so sync them too */
 381        if (pci_out_supported(q))
 382                qdio_siga_sync_all(q);
 383        else
 384                qdio_siga_sync_q(q);
 385}
 386
 387int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 388                        unsigned char *state)
 389{
 390        if (need_siga_sync(q))
 391                qdio_siga_sync_q(q);
 392        return get_buf_states(q, bufnr, state, 1, 0, 0);
 393}
 394
 395static inline void qdio_stop_polling(struct qdio_q *q)
 396{
 397        if (!q->u.in.polling)
 398                return;
 399
 400        q->u.in.polling = 0;
 401        qperf_inc(q, stop_polling);
 402
 403        /* show the card that we are not polling anymore */
 404        if (is_qebsm(q)) {
 405                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 406                               q->u.in.ack_count);
 407                q->u.in.ack_count = 0;
 408        } else
 409                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 410}
 411
 412static inline void account_sbals(struct qdio_q *q, int count)
 413{
 414        int pos = 0;
 415
 416        q->q_stats.nr_sbal_total += count;
 417        if (count == QDIO_MAX_BUFFERS_MASK) {
 418                q->q_stats.nr_sbals[7]++;
 419                return;
 420        }
 421        while (count >>= 1)
 422                pos++;
 423        q->q_stats.nr_sbals[pos]++;
 424}
 425
 426static void process_buffer_error(struct qdio_q *q, int count)
 427{
 428        unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 429                                        SLSB_P_OUTPUT_NOT_INIT;
 430
 431        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 432
 433        /* special handling for no target buffer empty */
 434        if ((!q->is_input_q &&
 435            (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
 436                qperf_inc(q, target_full);
 437                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 438                              q->first_to_check);
 439                goto set;
 440        }
 441
 442        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 443        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 444        DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 445        DBF_ERROR("F14:%2x F15:%2x",
 446                  q->sbal[q->first_to_check]->element[14].sflags,
 447                  q->sbal[q->first_to_check]->element[15].sflags);
 448
 449set:
 450        /*
 451         * Interrupts may be avoided as long as the error is present
 452         * so change the buffer state immediately to avoid starvation.
 453         */
 454        set_buf_states(q, q->first_to_check, state, count);
 455}
 456
 457static inline void inbound_primed(struct qdio_q *q, int count)
 458{
 459        int new;
 460
 461        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
 462
 463        /* for QEBSM the ACK was already set by EQBS */
 464        if (is_qebsm(q)) {
 465                if (!q->u.in.polling) {
 466                        q->u.in.polling = 1;
 467                        q->u.in.ack_count = count;
 468                        q->u.in.ack_start = q->first_to_check;
 469                        return;
 470                }
 471
 472                /* delete the previous ACK's */
 473                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 474                               q->u.in.ack_count);
 475                q->u.in.ack_count = count;
 476                q->u.in.ack_start = q->first_to_check;
 477                return;
 478        }
 479
 480        /*
 481         * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 482         * or by the next inbound run.
 483         */
 484        new = add_buf(q->first_to_check, count - 1);
 485        if (q->u.in.polling) {
 486                /* reset the previous ACK but first set the new one */
 487                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 488                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 489        } else {
 490                q->u.in.polling = 1;
 491                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 492        }
 493
 494        q->u.in.ack_start = new;
 495        count--;
 496        if (!count)
 497                return;
 498        /* need to change ALL buffers to get more interrupts */
 499        set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 500}
 501
 502static int get_inbound_buffer_frontier(struct qdio_q *q)
 503{
 504        int count, stop;
 505        unsigned char state = 0;
 506
 507        q->timestamp = get_tod_clock();
 508
 509        /*
 510         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 511         * would return 0.
 512         */
 513        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 514        stop = add_buf(q->first_to_check, count);
 515
 516        if (q->first_to_check == stop)
 517                goto out;
 518
 519        /*
 520         * No siga sync here, as a PCI or we after a thin interrupt
 521         * already sync'ed the queues.
 522         */
 523        count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
 524        if (!count)
 525                goto out;
 526
 527        switch (state) {
 528        case SLSB_P_INPUT_PRIMED:
 529                inbound_primed(q, count);
 530                q->first_to_check = add_buf(q->first_to_check, count);
 531                if (atomic_sub(count, &q->nr_buf_used) == 0)
 532                        qperf_inc(q, inbound_queue_full);
 533                if (q->irq_ptr->perf_stat_enabled)
 534                        account_sbals(q, count);
 535                break;
 536        case SLSB_P_INPUT_ERROR:
 537                process_buffer_error(q, count);
 538                q->first_to_check = add_buf(q->first_to_check, count);
 539                atomic_sub(count, &q->nr_buf_used);
 540                if (q->irq_ptr->perf_stat_enabled)
 541                        account_sbals_error(q, count);
 542                break;
 543        case SLSB_CU_INPUT_EMPTY:
 544        case SLSB_P_INPUT_NOT_INIT:
 545        case SLSB_P_INPUT_ACK:
 546                if (q->irq_ptr->perf_stat_enabled)
 547                        q->q_stats.nr_sbal_nop++;
 548                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
 549                break;
 550        default:
 551                WARN_ON_ONCE(1);
 552        }
 553out:
 554        return q->first_to_check;
 555}
 556
 557static int qdio_inbound_q_moved(struct qdio_q *q)
 558{
 559        int bufnr;
 560
 561        bufnr = get_inbound_buffer_frontier(q);
 562
 563        if (bufnr != q->last_move) {
 564                q->last_move = bufnr;
 565                if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 566                        q->u.in.timestamp = get_tod_clock();
 567                return 1;
 568        } else
 569                return 0;
 570}
 571
 572static inline int qdio_inbound_q_done(struct qdio_q *q)
 573{
 574        unsigned char state = 0;
 575
 576        if (!atomic_read(&q->nr_buf_used))
 577                return 1;
 578
 579        if (need_siga_sync(q))
 580                qdio_siga_sync_q(q);
 581        get_buf_state(q, q->first_to_check, &state, 0);
 582
 583        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 584                /* more work coming */
 585                return 0;
 586
 587        if (is_thinint_irq(q->irq_ptr))
 588                return 1;
 589
 590        /* don't poll under z/VM */
 591        if (MACHINE_IS_VM)
 592                return 1;
 593
 594        /*
 595         * At this point we know, that inbound first_to_check
 596         * has (probably) not moved (see qdio_inbound_processing).
 597         */
 598        if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 599                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 600                              q->first_to_check);
 601                return 1;
 602        } else
 603                return 0;
 604}
 605
 606static inline int contains_aobs(struct qdio_q *q)
 607{
 608        return !q->is_input_q && q->u.out.use_cq;
 609}
 610
 611static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 612{
 613        unsigned char state = 0;
 614        int j, b = start;
 615
 616        if (!contains_aobs(q))
 617                return;
 618
 619        for (j = 0; j < count; ++j) {
 620                get_buf_state(q, b, &state, 0);
 621                if (state == SLSB_P_OUTPUT_PENDING) {
 622                        struct qaob *aob = q->u.out.aobs[b];
 623                        if (aob == NULL)
 624                                continue;
 625
 626                        q->u.out.sbal_state[b].flags |=
 627                                QDIO_OUTBUF_STATE_FLAG_PENDING;
 628                        q->u.out.aobs[b] = NULL;
 629                } else if (state == SLSB_P_OUTPUT_EMPTY) {
 630                        q->u.out.sbal_state[b].aob = NULL;
 631                }
 632                b = next_buf(b);
 633        }
 634}
 635
 636static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 637                                        int bufnr)
 638{
 639        unsigned long phys_aob = 0;
 640
 641        if (!q->use_cq)
 642                goto out;
 643
 644        if (!q->aobs[bufnr]) {
 645                struct qaob *aob = qdio_allocate_aob();
 646                q->aobs[bufnr] = aob;
 647        }
 648        if (q->aobs[bufnr]) {
 649                q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 650                q->sbal_state[bufnr].aob = q->aobs[bufnr];
 651                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 652                phys_aob = virt_to_phys(q->aobs[bufnr]);
 653                WARN_ON_ONCE(phys_aob & 0xFF);
 654        }
 655
 656out:
 657        return phys_aob;
 658}
 659
 660static void qdio_kick_handler(struct qdio_q *q)
 661{
 662        int start = q->first_to_kick;
 663        int end = q->first_to_check;
 664        int count;
 665
 666        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 667                return;
 668
 669        count = sub_buf(end, start);
 670
 671        if (q->is_input_q) {
 672                qperf_inc(q, inbound_handler);
 673                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 674        } else {
 675                qperf_inc(q, outbound_handler);
 676                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 677                              start, count);
 678        }
 679
 680        qdio_handle_aobs(q, start, count);
 681
 682        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 683                   q->irq_ptr->int_parm);
 684
 685        /* for the next time */
 686        q->first_to_kick = end;
 687        q->qdio_error = 0;
 688}
 689
 690static void __qdio_inbound_processing(struct qdio_q *q)
 691{
 692        qperf_inc(q, tasklet_inbound);
 693
 694        if (!qdio_inbound_q_moved(q))
 695                return;
 696
 697        qdio_kick_handler(q);
 698
 699        if (!qdio_inbound_q_done(q)) {
 700                /* means poll time is not yet over */
 701                qperf_inc(q, tasklet_inbound_resched);
 702                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 703                        tasklet_schedule(&q->tasklet);
 704                        return;
 705                }
 706        }
 707
 708        qdio_stop_polling(q);
 709        /*
 710         * We need to check again to not lose initiative after
 711         * resetting the ACK state.
 712         */
 713        if (!qdio_inbound_q_done(q)) {
 714                qperf_inc(q, tasklet_inbound_resched2);
 715                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 716                        tasklet_schedule(&q->tasklet);
 717        }
 718}
 719
 720void qdio_inbound_processing(unsigned long data)
 721{
 722        struct qdio_q *q = (struct qdio_q *)data;
 723        __qdio_inbound_processing(q);
 724}
 725
 726static int get_outbound_buffer_frontier(struct qdio_q *q)
 727{
 728        int count, stop;
 729        unsigned char state = 0;
 730
 731        q->timestamp = get_tod_clock();
 732
 733        if (need_siga_sync(q))
 734                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 735                    !pci_out_supported(q)) ||
 736                    (queue_type(q) == QDIO_IQDIO_QFMT &&
 737                    multicast_outbound(q)))
 738                        qdio_siga_sync_q(q);
 739
 740        /*
 741         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 742         * would return 0.
 743         */
 744        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 745        stop = add_buf(q->first_to_check, count);
 746        if (q->first_to_check == stop)
 747                goto out;
 748
 749        count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
 750        if (!count)
 751                goto out;
 752
 753        switch (state) {
 754        case SLSB_P_OUTPUT_EMPTY:
 755                /* the adapter got it */
 756                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 757                        "out empty:%1d %02x", q->nr, count);
 758
 759                atomic_sub(count, &q->nr_buf_used);
 760                q->first_to_check = add_buf(q->first_to_check, count);
 761                if (q->irq_ptr->perf_stat_enabled)
 762                        account_sbals(q, count);
 763
 764                break;
 765        case SLSB_P_OUTPUT_ERROR:
 766                process_buffer_error(q, count);
 767                q->first_to_check = add_buf(q->first_to_check, count);
 768                atomic_sub(count, &q->nr_buf_used);
 769                if (q->irq_ptr->perf_stat_enabled)
 770                        account_sbals_error(q, count);
 771                break;
 772        case SLSB_CU_OUTPUT_PRIMED:
 773                /* the adapter has not fetched the output yet */
 774                if (q->irq_ptr->perf_stat_enabled)
 775                        q->q_stats.nr_sbal_nop++;
 776                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 777                              q->nr);
 778                break;
 779        case SLSB_P_OUTPUT_NOT_INIT:
 780        case SLSB_P_OUTPUT_HALTED:
 781                break;
 782        default:
 783                WARN_ON_ONCE(1);
 784        }
 785
 786out:
 787        return q->first_to_check;
 788}
 789
 790/* all buffers processed? */
 791static inline int qdio_outbound_q_done(struct qdio_q *q)
 792{
 793        return atomic_read(&q->nr_buf_used) == 0;
 794}
 795
 796static inline int qdio_outbound_q_moved(struct qdio_q *q)
 797{
 798        int bufnr;
 799
 800        bufnr = get_outbound_buffer_frontier(q);
 801
 802        if (bufnr != q->last_move) {
 803                q->last_move = bufnr;
 804                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 805                return 1;
 806        } else
 807                return 0;
 808}
 809
 810static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 811{
 812        int retries = 0, cc;
 813        unsigned int busy_bit;
 814
 815        if (!need_siga_out(q))
 816                return 0;
 817
 818        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 819retry:
 820        qperf_inc(q, siga_write);
 821
 822        cc = qdio_siga_output(q, &busy_bit, aob);
 823        switch (cc) {
 824        case 0:
 825                break;
 826        case 2:
 827                if (busy_bit) {
 828                        while (++retries < QDIO_BUSY_BIT_RETRIES) {
 829                                mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 830                                goto retry;
 831                        }
 832                        DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 833                        cc = -EBUSY;
 834                } else {
 835                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 836                        cc = -ENOBUFS;
 837                }
 838                break;
 839        case 1:
 840        case 3:
 841                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 842                cc = -EIO;
 843                break;
 844        }
 845        if (retries) {
 846                DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 847                DBF_ERROR("count:%u", retries);
 848        }
 849        return cc;
 850}
 851
 852static void __qdio_outbound_processing(struct qdio_q *q)
 853{
 854        qperf_inc(q, tasklet_outbound);
 855        WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 856
 857        if (qdio_outbound_q_moved(q))
 858                qdio_kick_handler(q);
 859
 860        if (queue_type(q) == QDIO_ZFCP_QFMT)
 861                if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 862                        goto sched;
 863
 864        if (q->u.out.pci_out_enabled)
 865                return;
 866
 867        /*
 868         * Now we know that queue type is either qeth without pci enabled
 869         * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 870         * is noticed and outbound_handler is called after some time.
 871         */
 872        if (qdio_outbound_q_done(q))
 873                del_timer(&q->u.out.timer);
 874        else
 875                if (!timer_pending(&q->u.out.timer))
 876                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 877        return;
 878
 879sched:
 880        if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 881                return;
 882        tasklet_schedule(&q->tasklet);
 883}
 884
 885/* outbound tasklet */
 886void qdio_outbound_processing(unsigned long data)
 887{
 888        struct qdio_q *q = (struct qdio_q *)data;
 889        __qdio_outbound_processing(q);
 890}
 891
 892void qdio_outbound_timer(unsigned long data)
 893{
 894        struct qdio_q *q = (struct qdio_q *)data;
 895
 896        if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 897                return;
 898        tasklet_schedule(&q->tasklet);
 899}
 900
 901static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 902{
 903        struct qdio_q *out;
 904        int i;
 905
 906        if (!pci_out_supported(q))
 907                return;
 908
 909        for_each_output_queue(q->irq_ptr, out, i)
 910                if (!qdio_outbound_q_done(out))
 911                        tasklet_schedule(&out->tasklet);
 912}
 913
 914static void __tiqdio_inbound_processing(struct qdio_q *q)
 915{
 916        qperf_inc(q, tasklet_inbound);
 917        if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 918                qdio_sync_queues(q);
 919
 920        /*
 921         * The interrupt could be caused by a PCI request. Check the
 922         * PCI capable outbound queues.
 923         */
 924        qdio_check_outbound_after_thinint(q);
 925
 926        if (!qdio_inbound_q_moved(q))
 927                return;
 928
 929        qdio_kick_handler(q);
 930
 931        if (!qdio_inbound_q_done(q)) {
 932                qperf_inc(q, tasklet_inbound_resched);
 933                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 934                        tasklet_schedule(&q->tasklet);
 935                        return;
 936                }
 937        }
 938
 939        qdio_stop_polling(q);
 940        /*
 941         * We need to check again to not lose initiative after
 942         * resetting the ACK state.
 943         */
 944        if (!qdio_inbound_q_done(q)) {
 945                qperf_inc(q, tasklet_inbound_resched2);
 946                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 947                        tasklet_schedule(&q->tasklet);
 948        }
 949}
 950
 951void tiqdio_inbound_processing(unsigned long data)
 952{
 953        struct qdio_q *q = (struct qdio_q *)data;
 954        __tiqdio_inbound_processing(q);
 955}
 956
 957static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 958                                  enum qdio_irq_states state)
 959{
 960        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 961
 962        irq_ptr->state = state;
 963        mb();
 964}
 965
 966static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 967{
 968        if (irb->esw.esw0.erw.cons) {
 969                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 970                DBF_ERROR_HEX(irb, 64);
 971                DBF_ERROR_HEX(irb->ecw, 64);
 972        }
 973}
 974
 975/* PCI interrupt handler */
 976static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 977{
 978        int i;
 979        struct qdio_q *q;
 980
 981        if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 982                return;
 983
 984        for_each_input_queue(irq_ptr, q, i) {
 985                if (q->u.in.queue_start_poll) {
 986                        /* skip if polling is enabled or already in work */
 987                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 988                                     &q->u.in.queue_irq_state)) {
 989                                qperf_inc(q, int_discarded);
 990                                continue;
 991                        }
 992                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 993                                                 q->irq_ptr->int_parm);
 994                } else {
 995                        tasklet_schedule(&q->tasklet);
 996                }
 997        }
 998
 999        if (!pci_out_supported(q))
1000                return;
1001
1002        for_each_output_queue(irq_ptr, q, i) {
1003                if (qdio_outbound_q_done(q))
1004                        continue;
1005                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1006                        qdio_siga_sync_q(q);
1007                tasklet_schedule(&q->tasklet);
1008        }
1009}
1010
1011static void qdio_handle_activate_check(struct ccw_device *cdev,
1012                                unsigned long intparm, int cstat, int dstat)
1013{
1014        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1015        struct qdio_q *q;
1016        int count;
1017
1018        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1019        DBF_ERROR("intp :%lx", intparm);
1020        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1021
1022        if (irq_ptr->nr_input_qs) {
1023                q = irq_ptr->input_qs[0];
1024        } else if (irq_ptr->nr_output_qs) {
1025                q = irq_ptr->output_qs[0];
1026        } else {
1027                dump_stack();
1028                goto no_handler;
1029        }
1030
1031        count = sub_buf(q->first_to_check, q->first_to_kick);
1032        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1033                   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1034no_handler:
1035        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1036        /*
1037         * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1038         * Therefore we call the LGR detection function here.
1039         */
1040        lgr_info_log();
1041}
1042
1043static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1044                                      int dstat)
1045{
1046        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1047
1048        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1049
1050        if (cstat)
1051                goto error;
1052        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1053                goto error;
1054        if (!(dstat & DEV_STAT_DEV_END))
1055                goto error;
1056        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1057        return;
1058
1059error:
1060        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1061        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1062        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1063}
1064
1065/* qdio interrupt handler */
1066void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1067                      struct irb *irb)
1068{
1069        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1070        int cstat, dstat;
1071
1072        if (!intparm || !irq_ptr) {
1073                DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1074                return;
1075        }
1076
1077        if (irq_ptr->perf_stat_enabled)
1078                irq_ptr->perf_stat.qdio_int++;
1079
1080        if (IS_ERR(irb)) {
1081                DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1082                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1083                wake_up(&cdev->private->wait_q);
1084                return;
1085        }
1086        qdio_irq_check_sense(irq_ptr, irb);
1087        cstat = irb->scsw.cmd.cstat;
1088        dstat = irb->scsw.cmd.dstat;
1089
1090        switch (irq_ptr->state) {
1091        case QDIO_IRQ_STATE_INACTIVE:
1092                qdio_establish_handle_irq(cdev, cstat, dstat);
1093                break;
1094        case QDIO_IRQ_STATE_CLEANUP:
1095                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1096                break;
1097        case QDIO_IRQ_STATE_ESTABLISHED:
1098        case QDIO_IRQ_STATE_ACTIVE:
1099                if (cstat & SCHN_STAT_PCI) {
1100                        qdio_int_handler_pci(irq_ptr);
1101                        return;
1102                }
1103                if (cstat || dstat)
1104                        qdio_handle_activate_check(cdev, intparm, cstat,
1105                                                   dstat);
1106                break;
1107        case QDIO_IRQ_STATE_STOPPED:
1108                break;
1109        default:
1110                WARN_ON_ONCE(1);
1111        }
1112        wake_up(&cdev->private->wait_q);
1113}
1114
1115/**
1116 * qdio_get_ssqd_desc - get qdio subchannel description
1117 * @cdev: ccw device to get description for
1118 * @data: where to store the ssqd
1119 *
1120 * Returns 0 or an error code. The results of the chsc are stored in the
1121 * specified structure.
1122 */
1123int qdio_get_ssqd_desc(struct ccw_device *cdev,
1124                       struct qdio_ssqd_desc *data)
1125{
1126
1127        if (!cdev || !cdev->private)
1128                return -EINVAL;
1129
1130        DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1131        return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1132}
1133EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1134
1135static void qdio_shutdown_queues(struct ccw_device *cdev)
1136{
1137        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138        struct qdio_q *q;
1139        int i;
1140
1141        for_each_input_queue(irq_ptr, q, i)
1142                tasklet_kill(&q->tasklet);
1143
1144        for_each_output_queue(irq_ptr, q, i) {
1145                del_timer(&q->u.out.timer);
1146                tasklet_kill(&q->tasklet);
1147        }
1148}
1149
1150/**
1151 * qdio_shutdown - shut down a qdio subchannel
1152 * @cdev: associated ccw device
1153 * @how: use halt or clear to shutdown
1154 */
1155int qdio_shutdown(struct ccw_device *cdev, int how)
1156{
1157        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1158        int rc;
1159        unsigned long flags;
1160
1161        if (!irq_ptr)
1162                return -ENODEV;
1163
1164        WARN_ON_ONCE(irqs_disabled());
1165        DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1166
1167        mutex_lock(&irq_ptr->setup_mutex);
1168        /*
1169         * Subchannel was already shot down. We cannot prevent being called
1170         * twice since cio may trigger a shutdown asynchronously.
1171         */
1172        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1173                mutex_unlock(&irq_ptr->setup_mutex);
1174                return 0;
1175        }
1176
1177        /*
1178         * Indicate that the device is going down. Scheduling the queue
1179         * tasklets is forbidden from here on.
1180         */
1181        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1182
1183        tiqdio_remove_input_queues(irq_ptr);
1184        qdio_shutdown_queues(cdev);
1185        qdio_shutdown_debug_entries(irq_ptr);
1186
1187        /* cleanup subchannel */
1188        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1189
1190        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1191                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1192        else
1193                /* default behaviour is halt */
1194                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1195        if (rc) {
1196                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1197                DBF_ERROR("rc:%4d", rc);
1198                goto no_cleanup;
1199        }
1200
1201        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1202        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1203        wait_event_interruptible_timeout(cdev->private->wait_q,
1204                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1205                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1206                10 * HZ);
1207        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1208
1209no_cleanup:
1210        qdio_shutdown_thinint(irq_ptr);
1211
1212        /* restore interrupt handler */
1213        if ((void *)cdev->handler == (void *)qdio_int_handler)
1214                cdev->handler = irq_ptr->orig_handler;
1215        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1216
1217        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1218        mutex_unlock(&irq_ptr->setup_mutex);
1219        if (rc)
1220                return rc;
1221        return 0;
1222}
1223EXPORT_SYMBOL_GPL(qdio_shutdown);
1224
1225/**
1226 * qdio_free - free data structures for a qdio subchannel
1227 * @cdev: associated ccw device
1228 */
1229int qdio_free(struct ccw_device *cdev)
1230{
1231        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1232
1233        if (!irq_ptr)
1234                return -ENODEV;
1235
1236        DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1237        mutex_lock(&irq_ptr->setup_mutex);
1238
1239        if (irq_ptr->debug_area != NULL) {
1240                debug_unregister(irq_ptr->debug_area);
1241                irq_ptr->debug_area = NULL;
1242        }
1243        cdev->private->qdio_data = NULL;
1244        mutex_unlock(&irq_ptr->setup_mutex);
1245
1246        qdio_release_memory(irq_ptr);
1247        return 0;
1248}
1249EXPORT_SYMBOL_GPL(qdio_free);
1250
1251/**
1252 * qdio_allocate - allocate qdio queues and associated data
1253 * @init_data: initialization data
1254 */
1255int qdio_allocate(struct qdio_initialize *init_data)
1256{
1257        struct qdio_irq *irq_ptr;
1258
1259        DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1260
1261        if ((init_data->no_input_qs && !init_data->input_handler) ||
1262            (init_data->no_output_qs && !init_data->output_handler))
1263                return -EINVAL;
1264
1265        if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1266            (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1267                return -EINVAL;
1268
1269        if ((!init_data->input_sbal_addr_array) ||
1270            (!init_data->output_sbal_addr_array))
1271                return -EINVAL;
1272
1273        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1274        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1275        if (!irq_ptr)
1276                goto out_err;
1277
1278        mutex_init(&irq_ptr->setup_mutex);
1279        qdio_allocate_dbf(init_data, irq_ptr);
1280
1281        /*
1282         * Allocate a page for the chsc calls in qdio_establish.
1283         * Must be pre-allocated since a zfcp recovery will call
1284         * qdio_establish. In case of low memory and swap on a zfcp disk
1285         * we may not be able to allocate memory otherwise.
1286         */
1287        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1288        if (!irq_ptr->chsc_page)
1289                goto out_rel;
1290
1291        /* qdr is used in ccw1.cda which is u32 */
1292        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1293        if (!irq_ptr->qdr)
1294                goto out_rel;
1295
1296        if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1297                             init_data->no_output_qs))
1298                goto out_rel;
1299
1300        init_data->cdev->private->qdio_data = irq_ptr;
1301        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1302        return 0;
1303out_rel:
1304        qdio_release_memory(irq_ptr);
1305out_err:
1306        return -ENOMEM;
1307}
1308EXPORT_SYMBOL_GPL(qdio_allocate);
1309
1310static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1311{
1312        struct qdio_q *q = irq_ptr->input_qs[0];
1313        int i, use_cq = 0;
1314
1315        if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1316                use_cq = 1;
1317
1318        for_each_output_queue(irq_ptr, q, i) {
1319                if (use_cq) {
1320                        if (qdio_enable_async_operation(&q->u.out) < 0) {
1321                                use_cq = 0;
1322                                continue;
1323                        }
1324                } else
1325                        qdio_disable_async_operation(&q->u.out);
1326        }
1327        DBF_EVENT("use_cq:%d", use_cq);
1328}
1329
1330/**
1331 * qdio_establish - establish queues on a qdio subchannel
1332 * @init_data: initialization data
1333 */
1334int qdio_establish(struct qdio_initialize *init_data)
1335{
1336        struct qdio_irq *irq_ptr;
1337        struct ccw_device *cdev = init_data->cdev;
1338        unsigned long saveflags;
1339        int rc;
1340
1341        DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1342
1343        irq_ptr = cdev->private->qdio_data;
1344        if (!irq_ptr)
1345                return -ENODEV;
1346
1347        if (cdev->private->state != DEV_STATE_ONLINE)
1348                return -EINVAL;
1349
1350        mutex_lock(&irq_ptr->setup_mutex);
1351        qdio_setup_irq(init_data);
1352
1353        rc = qdio_establish_thinint(irq_ptr);
1354        if (rc) {
1355                mutex_unlock(&irq_ptr->setup_mutex);
1356                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1357                return rc;
1358        }
1359
1360        /* establish q */
1361        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1362        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1363        irq_ptr->ccw.count = irq_ptr->equeue.count;
1364        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1365
1366        spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1367        ccw_device_set_options_mask(cdev, 0);
1368
1369        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1370        if (rc) {
1371                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1372                DBF_ERROR("rc:%4x", rc);
1373        }
1374        spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1375
1376        if (rc) {
1377                mutex_unlock(&irq_ptr->setup_mutex);
1378                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1379                return rc;
1380        }
1381
1382        wait_event_interruptible_timeout(cdev->private->wait_q,
1383                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1384                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1385
1386        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1387                mutex_unlock(&irq_ptr->setup_mutex);
1388                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1389                return -EIO;
1390        }
1391
1392        qdio_setup_ssqd_info(irq_ptr);
1393
1394        qdio_detect_hsicq(irq_ptr);
1395
1396        /* qebsm is now setup if available, initialize buffer states */
1397        qdio_init_buf_states(irq_ptr);
1398
1399        mutex_unlock(&irq_ptr->setup_mutex);
1400        qdio_print_subchannel_info(irq_ptr, cdev);
1401        qdio_setup_debug_entries(irq_ptr, cdev);
1402        return 0;
1403}
1404EXPORT_SYMBOL_GPL(qdio_establish);
1405
1406/**
1407 * qdio_activate - activate queues on a qdio subchannel
1408 * @cdev: associated cdev
1409 */
1410int qdio_activate(struct ccw_device *cdev)
1411{
1412        struct qdio_irq *irq_ptr;
1413        int rc;
1414        unsigned long saveflags;
1415
1416        DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1417
1418        irq_ptr = cdev->private->qdio_data;
1419        if (!irq_ptr)
1420                return -ENODEV;
1421
1422        if (cdev->private->state != DEV_STATE_ONLINE)
1423                return -EINVAL;
1424
1425        mutex_lock(&irq_ptr->setup_mutex);
1426        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1427                rc = -EBUSY;
1428                goto out;
1429        }
1430
1431        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1432        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1433        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1434        irq_ptr->ccw.cda = 0;
1435
1436        spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1437        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1438
1439        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1440                              0, DOIO_DENY_PREFETCH);
1441        if (rc) {
1442                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1443                DBF_ERROR("rc:%4x", rc);
1444        }
1445        spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1446
1447        if (rc)
1448                goto out;
1449
1450        if (is_thinint_irq(irq_ptr))
1451                tiqdio_add_input_queues(irq_ptr);
1452
1453        /* wait for subchannel to become active */
1454        msleep(5);
1455
1456        switch (irq_ptr->state) {
1457        case QDIO_IRQ_STATE_STOPPED:
1458        case QDIO_IRQ_STATE_ERR:
1459                rc = -EIO;
1460                break;
1461        default:
1462                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1463                rc = 0;
1464        }
1465out:
1466        mutex_unlock(&irq_ptr->setup_mutex);
1467        return rc;
1468}
1469EXPORT_SYMBOL_GPL(qdio_activate);
1470
1471static inline int buf_in_between(int bufnr, int start, int count)
1472{
1473        int end = add_buf(start, count);
1474
1475        if (end > start) {
1476                if (bufnr >= start && bufnr < end)
1477                        return 1;
1478                else
1479                        return 0;
1480        }
1481
1482        /* wrap-around case */
1483        if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1484            (bufnr < end))
1485                return 1;
1486        else
1487                return 0;
1488}
1489
1490/**
1491 * handle_inbound - reset processed input buffers
1492 * @q: queue containing the buffers
1493 * @callflags: flags
1494 * @bufnr: first buffer to process
1495 * @count: how many buffers are emptied
1496 */
1497static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1498                          int bufnr, int count)
1499{
1500        int diff;
1501
1502        qperf_inc(q, inbound_call);
1503
1504        if (!q->u.in.polling)
1505                goto set;
1506
1507        /* protect against stop polling setting an ACK for an emptied slsb */
1508        if (count == QDIO_MAX_BUFFERS_PER_Q) {
1509                /* overwriting everything, just delete polling status */
1510                q->u.in.polling = 0;
1511                q->u.in.ack_count = 0;
1512                goto set;
1513        } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1514                if (is_qebsm(q)) {
1515                        /* partial overwrite, just update ack_start */
1516                        diff = add_buf(bufnr, count);
1517                        diff = sub_buf(diff, q->u.in.ack_start);
1518                        q->u.in.ack_count -= diff;
1519                        if (q->u.in.ack_count <= 0) {
1520                                q->u.in.polling = 0;
1521                                q->u.in.ack_count = 0;
1522                                goto set;
1523                        }
1524                        q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1525                }
1526                else
1527                        /* the only ACK will be deleted, so stop polling */
1528                        q->u.in.polling = 0;
1529        }
1530
1531set:
1532        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1533        atomic_add(count, &q->nr_buf_used);
1534
1535        if (need_siga_in(q))
1536                return qdio_siga_input(q);
1537
1538        return 0;
1539}
1540
1541/**
1542 * handle_outbound - process filled outbound buffers
1543 * @q: queue containing the buffers
1544 * @callflags: flags
1545 * @bufnr: first buffer to process
1546 * @count: how many buffers are filled
1547 */
1548static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1549                           int bufnr, int count)
1550{
1551        unsigned char state = 0;
1552        int used, rc = 0;
1553
1554        qperf_inc(q, outbound_call);
1555
1556        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1557        used = atomic_add_return(count, &q->nr_buf_used);
1558
1559        if (used == QDIO_MAX_BUFFERS_PER_Q)
1560                qperf_inc(q, outbound_queue_full);
1561
1562        if (callflags & QDIO_FLAG_PCI_OUT) {
1563                q->u.out.pci_out_enabled = 1;
1564                qperf_inc(q, pci_request_int);
1565        } else
1566                q->u.out.pci_out_enabled = 0;
1567
1568        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1569                unsigned long phys_aob = 0;
1570
1571                /* One SIGA-W per buffer required for unicast HSI */
1572                WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1573
1574                phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1575
1576                rc = qdio_kick_outbound_q(q, phys_aob);
1577        } else if (need_siga_sync(q)) {
1578                rc = qdio_siga_sync_q(q);
1579        } else {
1580                /* try to fast requeue buffers */
1581                get_buf_state(q, prev_buf(bufnr), &state, 0);
1582                if (state != SLSB_CU_OUTPUT_PRIMED)
1583                        rc = qdio_kick_outbound_q(q, 0);
1584                else
1585                        qperf_inc(q, fast_requeue);
1586        }
1587
1588        /* in case of SIGA errors we must process the error immediately */
1589        if (used >= q->u.out.scan_threshold || rc)
1590                tasklet_schedule(&q->tasklet);
1591        else
1592                /* free the SBALs in case of no further traffic */
1593                if (!timer_pending(&q->u.out.timer))
1594                        mod_timer(&q->u.out.timer, jiffies + HZ);
1595        return rc;
1596}
1597
1598/**
1599 * do_QDIO - process input or output buffers
1600 * @cdev: associated ccw_device for the qdio subchannel
1601 * @callflags: input or output and special flags from the program
1602 * @q_nr: queue number
1603 * @bufnr: buffer number
1604 * @count: how many buffers to process
1605 */
1606int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1607            int q_nr, unsigned int bufnr, unsigned int count)
1608{
1609        struct qdio_irq *irq_ptr;
1610
1611        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1612                return -EINVAL;
1613
1614        irq_ptr = cdev->private->qdio_data;
1615        if (!irq_ptr)
1616                return -ENODEV;
1617
1618        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1619                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1620
1621        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1622                return -EIO;
1623        if (!count)
1624                return 0;
1625        if (callflags & QDIO_FLAG_SYNC_INPUT)
1626                return handle_inbound(irq_ptr->input_qs[q_nr],
1627                                      callflags, bufnr, count);
1628        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1629                return handle_outbound(irq_ptr->output_qs[q_nr],
1630                                       callflags, bufnr, count);
1631        return -EINVAL;
1632}
1633EXPORT_SYMBOL_GPL(do_QDIO);
1634
1635/**
1636 * qdio_start_irq - process input buffers
1637 * @cdev: associated ccw_device for the qdio subchannel
1638 * @nr: input queue number
1639 *
1640 * Return codes
1641 *   0 - success
1642 *   1 - irqs not started since new data is available
1643 */
1644int qdio_start_irq(struct ccw_device *cdev, int nr)
1645{
1646        struct qdio_q *q;
1647        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1648
1649        if (!irq_ptr)
1650                return -ENODEV;
1651        q = irq_ptr->input_qs[nr];
1652
1653        clear_nonshared_ind(irq_ptr);
1654        qdio_stop_polling(q);
1655        clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1656
1657        /*
1658         * We need to check again to not lose initiative after
1659         * resetting the ACK state.
1660         */
1661        if (test_nonshared_ind(irq_ptr))
1662                goto rescan;
1663        if (!qdio_inbound_q_done(q))
1664                goto rescan;
1665        return 0;
1666
1667rescan:
1668        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1669                             &q->u.in.queue_irq_state))
1670                return 0;
1671        else
1672                return 1;
1673
1674}
1675EXPORT_SYMBOL(qdio_start_irq);
1676
1677/**
1678 * qdio_get_next_buffers - process input buffers
1679 * @cdev: associated ccw_device for the qdio subchannel
1680 * @nr: input queue number
1681 * @bufnr: first filled buffer number
1682 * @error: buffers are in error state
1683 *
1684 * Return codes
1685 *   < 0 - error
1686 *   = 0 - no new buffers found
1687 *   > 0 - number of processed buffers
1688 */
1689int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1690                          int *error)
1691{
1692        struct qdio_q *q;
1693        int start, end;
1694        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1695
1696        if (!irq_ptr)
1697                return -ENODEV;
1698        q = irq_ptr->input_qs[nr];
1699
1700        /*
1701         * Cannot rely on automatic sync after interrupt since queues may
1702         * also be examined without interrupt.
1703         */
1704        if (need_siga_sync(q))
1705                qdio_sync_queues(q);
1706
1707        /* check the PCI capable outbound queues. */
1708        qdio_check_outbound_after_thinint(q);
1709
1710        if (!qdio_inbound_q_moved(q))
1711                return 0;
1712
1713        /* Note: upper-layer MUST stop processing immediately here ... */
1714        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1715                return -EIO;
1716
1717        start = q->first_to_kick;
1718        end = q->first_to_check;
1719        *bufnr = start;
1720        *error = q->qdio_error;
1721
1722        /* for the next time */
1723        q->first_to_kick = end;
1724        q->qdio_error = 0;
1725        return sub_buf(end, start);
1726}
1727EXPORT_SYMBOL(qdio_get_next_buffers);
1728
1729/**
1730 * qdio_stop_irq - disable interrupt processing for the device
1731 * @cdev: associated ccw_device for the qdio subchannel
1732 * @nr: input queue number
1733 *
1734 * Return codes
1735 *   0 - interrupts were already disabled
1736 *   1 - interrupts successfully disabled
1737 */
1738int qdio_stop_irq(struct ccw_device *cdev, int nr)
1739{
1740        struct qdio_q *q;
1741        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1742
1743        if (!irq_ptr)
1744                return -ENODEV;
1745        q = irq_ptr->input_qs[nr];
1746
1747        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1748                             &q->u.in.queue_irq_state))
1749                return 0;
1750        else
1751                return 1;
1752}
1753EXPORT_SYMBOL(qdio_stop_irq);
1754
1755static int __init init_QDIO(void)
1756{
1757        int rc;
1758
1759        rc = qdio_debug_init();
1760        if (rc)
1761                return rc;
1762        rc = qdio_setup_init();
1763        if (rc)
1764                goto out_debug;
1765        rc = tiqdio_allocate_memory();
1766        if (rc)
1767                goto out_cache;
1768        rc = tiqdio_register_thinints();
1769        if (rc)
1770                goto out_ti;
1771        return 0;
1772
1773out_ti:
1774        tiqdio_free_memory();
1775out_cache:
1776        qdio_setup_exit();
1777out_debug:
1778        qdio_debug_exit();
1779        return rc;
1780}
1781
1782static void __exit exit_QDIO(void)
1783{
1784        tiqdio_unregister_thinints();
1785        tiqdio_free_memory();
1786        qdio_setup_exit();
1787        qdio_debug_exit();
1788}
1789
1790module_init(init_QDIO);
1791module_exit(exit_QDIO);
1792