linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   4 *
   5 * Copyright IBM Corp. 2000, 2008
   6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   7 *            Jan Glauber <jang@linux.vnet.ibm.com>
   8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/timer.h>
  14#include <linux/delay.h>
  15#include <linux/gfp.h>
  16#include <linux/io.h>
  17#include <linux/atomic.h>
  18#include <asm/debug.h>
  19#include <asm/qdio.h>
  20#include <asm/ipl.h>
  21
  22#include "cio.h"
  23#include "css.h"
  24#include "device.h"
  25#include "qdio.h"
  26#include "qdio_debug.h"
  27
  28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  29        "Jan Glauber <jang@linux.vnet.ibm.com>");
  30MODULE_DESCRIPTION("QDIO base support");
  31MODULE_LICENSE("GPL");
  32
  33static inline int do_siga_sync(unsigned long schid,
  34                               unsigned int out_mask, unsigned int in_mask,
  35                               unsigned int fc)
  36{
  37        register unsigned long __fc asm ("0") = fc;
  38        register unsigned long __schid asm ("1") = schid;
  39        register unsigned long out asm ("2") = out_mask;
  40        register unsigned long in asm ("3") = in_mask;
  41        int cc;
  42
  43        asm volatile(
  44                "       siga    0\n"
  45                "       ipm     %0\n"
  46                "       srl     %0,28\n"
  47                : "=d" (cc)
  48                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  49        return cc;
  50}
  51
  52static inline int do_siga_input(unsigned long schid, unsigned int mask,
  53                                unsigned int fc)
  54{
  55        register unsigned long __fc asm ("0") = fc;
  56        register unsigned long __schid asm ("1") = schid;
  57        register unsigned long __mask asm ("2") = mask;
  58        int cc;
  59
  60        asm volatile(
  61                "       siga    0\n"
  62                "       ipm     %0\n"
  63                "       srl     %0,28\n"
  64                : "=d" (cc)
  65                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  66        return cc;
  67}
  68
  69/**
  70 * do_siga_output - perform SIGA-w/wt function
  71 * @schid: subchannel id or in case of QEBSM the subchannel token
  72 * @mask: which output queues to process
  73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  74 * @fc: function code to perform
  75 *
  76 * Returns condition code.
  77 * Note: For IQDC unicast queues only the highest priority queue is processed.
  78 */
  79static inline int do_siga_output(unsigned long schid, unsigned long mask,
  80                                 unsigned int *bb, unsigned int fc,
  81                                 unsigned long aob)
  82{
  83        register unsigned long __fc asm("0") = fc;
  84        register unsigned long __schid asm("1") = schid;
  85        register unsigned long __mask asm("2") = mask;
  86        register unsigned long __aob asm("3") = aob;
  87        int cc;
  88
  89        asm volatile(
  90                "       siga    0\n"
  91                "       ipm     %0\n"
  92                "       srl     %0,28\n"
  93                : "=d" (cc), "+d" (__fc), "+d" (__aob)
  94                : "d" (__schid), "d" (__mask)
  95                : "cc");
  96        *bb = __fc >> 31;
  97        return cc;
  98}
  99
 100static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 101{
 102        /* all done or next buffer state different */
 103        if (ccq == 0 || ccq == 32)
 104                return 0;
 105        /* no buffer processed */
 106        if (ccq == 97)
 107                return 1;
 108        /* not all buffers processed */
 109        if (ccq == 96)
 110                return 2;
 111        /* notify devices immediately */
 112        DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 113        return -EIO;
 114}
 115
 116/**
 117 * qdio_do_eqbs - extract buffer states for QEBSM
 118 * @q: queue to manipulate
 119 * @state: state of the extracted buffers
 120 * @start: buffer number to start at
 121 * @count: count of buffers to examine
 122 * @auto_ack: automatically acknowledge buffers
 123 *
 124 * Returns the number of successfully extracted equal buffer states.
 125 * Stops processing if a state is different from the last buffers state.
 126 */
 127static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 128                        int start, int count, int auto_ack)
 129{
 130        int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
 131        unsigned int ccq = 0;
 132
 133        qperf_inc(q, eqbs);
 134
 135        if (!q->is_input_q)
 136                nr += q->irq_ptr->nr_input_qs;
 137again:
 138        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 139                      auto_ack);
 140        rc = qdio_check_ccq(q, ccq);
 141        if (!rc)
 142                return count - tmp_count;
 143
 144        if (rc == 1) {
 145                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 146                goto again;
 147        }
 148
 149        if (rc == 2) {
 150                qperf_inc(q, eqbs_partial);
 151                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 152                        tmp_count);
 153                /*
 154                 * Retry once, if that fails bail out and process the
 155                 * extracted buffers before trying again.
 156                 */
 157                if (!retried++)
 158                        goto again;
 159                else
 160                        return count - tmp_count;
 161        }
 162
 163        DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 164        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 165        q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
 166                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 167        return 0;
 168}
 169
 170/**
 171 * qdio_do_sqbs - set buffer states for QEBSM
 172 * @q: queue to manipulate
 173 * @state: new state of the buffers
 174 * @start: first buffer number to change
 175 * @count: how many buffers to change
 176 *
 177 * Returns the number of successfully changed buffers.
 178 * Does retrying until the specified count of buffer states is set or an
 179 * error occurs.
 180 */
 181static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 182                        int count)
 183{
 184        unsigned int ccq = 0;
 185        int tmp_count = count, tmp_start = start;
 186        int nr = q->nr;
 187        int rc;
 188
 189        if (!count)
 190                return 0;
 191        qperf_inc(q, sqbs);
 192
 193        if (!q->is_input_q)
 194                nr += q->irq_ptr->nr_input_qs;
 195again:
 196        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 197        rc = qdio_check_ccq(q, ccq);
 198        if (!rc) {
 199                WARN_ON_ONCE(tmp_count);
 200                return count - tmp_count;
 201        }
 202
 203        if (rc == 1 || rc == 2) {
 204                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 205                qperf_inc(q, sqbs_partial);
 206                goto again;
 207        }
 208
 209        DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 210        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 211        q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
 212                   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 213        return 0;
 214}
 215
 216/* returns number of examined buffers and their common state in *state */
 217static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 218                                 unsigned char *state, unsigned int count,
 219                                 int auto_ack, int merge_pending)
 220{
 221        unsigned char __state = 0;
 222        int i;
 223
 224        if (is_qebsm(q))
 225                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 226
 227        for (i = 0; i < count; i++) {
 228                if (!__state) {
 229                        __state = q->slsb.val[bufnr];
 230                        if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 231                                __state = SLSB_P_OUTPUT_EMPTY;
 232                } else if (merge_pending) {
 233                        if ((q->slsb.val[bufnr] & __state) != __state)
 234                                break;
 235                } else if (q->slsb.val[bufnr] != __state)
 236                        break;
 237                bufnr = next_buf(bufnr);
 238        }
 239        *state = __state;
 240        return i;
 241}
 242
 243static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 244                                unsigned char *state, int auto_ack)
 245{
 246        return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 247}
 248
 249/* wrap-around safe setting of slsb states, returns number of changed buffers */
 250static inline int set_buf_states(struct qdio_q *q, int bufnr,
 251                                 unsigned char state, int count)
 252{
 253        int i;
 254
 255        if (is_qebsm(q))
 256                return qdio_do_sqbs(q, state, bufnr, count);
 257
 258        for (i = 0; i < count; i++) {
 259                xchg(&q->slsb.val[bufnr], state);
 260                bufnr = next_buf(bufnr);
 261        }
 262        return count;
 263}
 264
 265static inline int set_buf_state(struct qdio_q *q, int bufnr,
 266                                unsigned char state)
 267{
 268        return set_buf_states(q, bufnr, state, 1);
 269}
 270
 271/* set slsb states to initial state */
 272static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 273{
 274        struct qdio_q *q;
 275        int i;
 276
 277        for_each_input_queue(irq_ptr, q, i)
 278                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 279                               QDIO_MAX_BUFFERS_PER_Q);
 280        for_each_output_queue(irq_ptr, q, i)
 281                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 282                               QDIO_MAX_BUFFERS_PER_Q);
 283}
 284
 285static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 286                          unsigned int input)
 287{
 288        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 289        unsigned int fc = QDIO_SIGA_SYNC;
 290        int cc;
 291
 292        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 293        qperf_inc(q, siga_sync);
 294
 295        if (is_qebsm(q)) {
 296                schid = q->irq_ptr->sch_token;
 297                fc |= QDIO_SIGA_QEBSM_FLAG;
 298        }
 299
 300        cc = do_siga_sync(schid, output, input, fc);
 301        if (unlikely(cc))
 302                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 303        return (cc) ? -EIO : 0;
 304}
 305
 306static inline int qdio_siga_sync_q(struct qdio_q *q)
 307{
 308        if (q->is_input_q)
 309                return qdio_siga_sync(q, 0, q->mask);
 310        else
 311                return qdio_siga_sync(q, q->mask, 0);
 312}
 313
 314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 315        unsigned long aob)
 316{
 317        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 318        unsigned int fc = QDIO_SIGA_WRITE;
 319        u64 start_time = 0;
 320        int retries = 0, cc;
 321        unsigned long laob = 0;
 322
 323        WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
 324                             !q->u.out.use_cq));
 325        if (q->u.out.use_cq && aob != 0) {
 326                fc = QDIO_SIGA_WRITEQ;
 327                laob = aob;
 328        }
 329
 330        if (is_qebsm(q)) {
 331                schid = q->irq_ptr->sch_token;
 332                fc |= QDIO_SIGA_QEBSM_FLAG;
 333        }
 334again:
 335        cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 336
 337        /* hipersocket busy condition */
 338        if (unlikely(*busy_bit)) {
 339                retries++;
 340
 341                if (!start_time) {
 342                        start_time = get_tod_clock_fast();
 343                        goto again;
 344                }
 345                if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
 346                        goto again;
 347        }
 348        if (retries) {
 349                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 350                              "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 351                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 352        }
 353        return cc;
 354}
 355
 356static inline int qdio_siga_input(struct qdio_q *q)
 357{
 358        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 359        unsigned int fc = QDIO_SIGA_READ;
 360        int cc;
 361
 362        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 363        qperf_inc(q, siga_read);
 364
 365        if (is_qebsm(q)) {
 366                schid = q->irq_ptr->sch_token;
 367                fc |= QDIO_SIGA_QEBSM_FLAG;
 368        }
 369
 370        cc = do_siga_input(schid, q->mask, fc);
 371        if (unlikely(cc))
 372                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 373        return (cc) ? -EIO : 0;
 374}
 375
 376#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 377#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 378
 379static inline void qdio_sync_queues(struct qdio_q *q)
 380{
 381        /* PCI capable outbound queues will also be scanned so sync them too */
 382        if (pci_out_supported(q))
 383                qdio_siga_sync_all(q);
 384        else
 385                qdio_siga_sync_q(q);
 386}
 387
 388int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 389                        unsigned char *state)
 390{
 391        if (need_siga_sync(q))
 392                qdio_siga_sync_q(q);
 393        return get_buf_states(q, bufnr, state, 1, 0, 0);
 394}
 395
 396static inline void qdio_stop_polling(struct qdio_q *q)
 397{
 398        if (!q->u.in.polling)
 399                return;
 400
 401        q->u.in.polling = 0;
 402        qperf_inc(q, stop_polling);
 403
 404        /* show the card that we are not polling anymore */
 405        if (is_qebsm(q)) {
 406                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 407                               q->u.in.ack_count);
 408                q->u.in.ack_count = 0;
 409        } else
 410                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 411}
 412
 413static inline void account_sbals(struct qdio_q *q, unsigned int count)
 414{
 415        int pos;
 416
 417        q->q_stats.nr_sbal_total += count;
 418        if (count == QDIO_MAX_BUFFERS_MASK) {
 419                q->q_stats.nr_sbals[7]++;
 420                return;
 421        }
 422        pos = ilog2(count);
 423        q->q_stats.nr_sbals[pos]++;
 424}
 425
 426static void process_buffer_error(struct qdio_q *q, int count)
 427{
 428        unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 429                                        SLSB_P_OUTPUT_NOT_INIT;
 430
 431        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 432
 433        /* special handling for no target buffer empty */
 434        if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
 435            q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
 436                qperf_inc(q, target_full);
 437                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 438                              q->first_to_check);
 439                goto set;
 440        }
 441
 442        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 443        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 444        DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 445        DBF_ERROR("F14:%2x F15:%2x",
 446                  q->sbal[q->first_to_check]->element[14].sflags,
 447                  q->sbal[q->first_to_check]->element[15].sflags);
 448
 449set:
 450        /*
 451         * Interrupts may be avoided as long as the error is present
 452         * so change the buffer state immediately to avoid starvation.
 453         */
 454        set_buf_states(q, q->first_to_check, state, count);
 455}
 456
 457static inline void inbound_primed(struct qdio_q *q, int count)
 458{
 459        int new;
 460
 461        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
 462
 463        /* for QEBSM the ACK was already set by EQBS */
 464        if (is_qebsm(q)) {
 465                if (!q->u.in.polling) {
 466                        q->u.in.polling = 1;
 467                        q->u.in.ack_count = count;
 468                        q->u.in.ack_start = q->first_to_check;
 469                        return;
 470                }
 471
 472                /* delete the previous ACK's */
 473                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 474                               q->u.in.ack_count);
 475                q->u.in.ack_count = count;
 476                q->u.in.ack_start = q->first_to_check;
 477                return;
 478        }
 479
 480        /*
 481         * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 482         * or by the next inbound run.
 483         */
 484        new = add_buf(q->first_to_check, count - 1);
 485        if (q->u.in.polling) {
 486                /* reset the previous ACK but first set the new one */
 487                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 488                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 489        } else {
 490                q->u.in.polling = 1;
 491                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 492        }
 493
 494        q->u.in.ack_start = new;
 495        count--;
 496        if (!count)
 497                return;
 498        /* need to change ALL buffers to get more interrupts */
 499        set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 500}
 501
 502static int get_inbound_buffer_frontier(struct qdio_q *q)
 503{
 504        int count, stop;
 505        unsigned char state = 0;
 506
 507        q->timestamp = get_tod_clock_fast();
 508
 509        /*
 510         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 511         * would return 0.
 512         */
 513        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 514        stop = add_buf(q->first_to_check, count);
 515
 516        if (q->first_to_check == stop)
 517                goto out;
 518
 519        /*
 520         * No siga sync here, as a PCI or we after a thin interrupt
 521         * already sync'ed the queues.
 522         */
 523        count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
 524        if (!count)
 525                goto out;
 526
 527        switch (state) {
 528        case SLSB_P_INPUT_PRIMED:
 529                inbound_primed(q, count);
 530                q->first_to_check = add_buf(q->first_to_check, count);
 531                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 532                        qperf_inc(q, inbound_queue_full);
 533                if (q->irq_ptr->perf_stat_enabled)
 534                        account_sbals(q, count);
 535                break;
 536        case SLSB_P_INPUT_ERROR:
 537                process_buffer_error(q, count);
 538                q->first_to_check = add_buf(q->first_to_check, count);
 539                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 540                        qperf_inc(q, inbound_queue_full);
 541                if (q->irq_ptr->perf_stat_enabled)
 542                        account_sbals_error(q, count);
 543                break;
 544        case SLSB_CU_INPUT_EMPTY:
 545        case SLSB_P_INPUT_NOT_INIT:
 546        case SLSB_P_INPUT_ACK:
 547                if (q->irq_ptr->perf_stat_enabled)
 548                        q->q_stats.nr_sbal_nop++;
 549                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
 550                        q->nr, q->first_to_check);
 551                break;
 552        default:
 553                WARN_ON_ONCE(1);
 554        }
 555out:
 556        return q->first_to_check;
 557}
 558
 559static int qdio_inbound_q_moved(struct qdio_q *q)
 560{
 561        int bufnr;
 562
 563        bufnr = get_inbound_buffer_frontier(q);
 564
 565        if (bufnr != q->last_move) {
 566                q->last_move = bufnr;
 567                if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 568                        q->u.in.timestamp = get_tod_clock();
 569                return 1;
 570        } else
 571                return 0;
 572}
 573
 574static inline int qdio_inbound_q_done(struct qdio_q *q)
 575{
 576        unsigned char state = 0;
 577
 578        if (!atomic_read(&q->nr_buf_used))
 579                return 1;
 580
 581        if (need_siga_sync(q))
 582                qdio_siga_sync_q(q);
 583        get_buf_state(q, q->first_to_check, &state, 0);
 584
 585        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 586                /* more work coming */
 587                return 0;
 588
 589        if (is_thinint_irq(q->irq_ptr))
 590                return 1;
 591
 592        /* don't poll under z/VM */
 593        if (MACHINE_IS_VM)
 594                return 1;
 595
 596        /*
 597         * At this point we know, that inbound first_to_check
 598         * has (probably) not moved (see qdio_inbound_processing).
 599         */
 600        if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 601                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 602                              q->first_to_check);
 603                return 1;
 604        } else
 605                return 0;
 606}
 607
 608static inline int contains_aobs(struct qdio_q *q)
 609{
 610        return !q->is_input_q && q->u.out.use_cq;
 611}
 612
 613static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 614{
 615        unsigned char state = 0;
 616        int j, b = start;
 617
 618        if (!contains_aobs(q))
 619                return;
 620
 621        for (j = 0; j < count; ++j) {
 622                get_buf_state(q, b, &state, 0);
 623                if (state == SLSB_P_OUTPUT_PENDING) {
 624                        struct qaob *aob = q->u.out.aobs[b];
 625                        if (aob == NULL)
 626                                continue;
 627
 628                        q->u.out.sbal_state[b].flags |=
 629                                QDIO_OUTBUF_STATE_FLAG_PENDING;
 630                        q->u.out.aobs[b] = NULL;
 631                } else if (state == SLSB_P_OUTPUT_EMPTY) {
 632                        q->u.out.sbal_state[b].aob = NULL;
 633                }
 634                b = next_buf(b);
 635        }
 636}
 637
 638static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 639                                        int bufnr)
 640{
 641        unsigned long phys_aob = 0;
 642
 643        if (!q->use_cq)
 644                goto out;
 645
 646        if (!q->aobs[bufnr]) {
 647                struct qaob *aob = qdio_allocate_aob();
 648                q->aobs[bufnr] = aob;
 649        }
 650        if (q->aobs[bufnr]) {
 651                q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 652                q->sbal_state[bufnr].aob = q->aobs[bufnr];
 653                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 654                phys_aob = virt_to_phys(q->aobs[bufnr]);
 655                WARN_ON_ONCE(phys_aob & 0xFF);
 656        }
 657
 658out:
 659        return phys_aob;
 660}
 661
 662static void qdio_kick_handler(struct qdio_q *q)
 663{
 664        int start = q->first_to_kick;
 665        int end = q->first_to_check;
 666        int count;
 667
 668        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 669                return;
 670
 671        count = sub_buf(end, start);
 672
 673        if (q->is_input_q) {
 674                qperf_inc(q, inbound_handler);
 675                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 676        } else {
 677                qperf_inc(q, outbound_handler);
 678                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 679                              start, count);
 680        }
 681
 682        qdio_handle_aobs(q, start, count);
 683
 684        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 685                   q->irq_ptr->int_parm);
 686
 687        /* for the next time */
 688        q->first_to_kick = end;
 689        q->qdio_error = 0;
 690}
 691
 692static inline int qdio_tasklet_schedule(struct qdio_q *q)
 693{
 694        if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
 695                tasklet_schedule(&q->tasklet);
 696                return 0;
 697        }
 698        return -EPERM;
 699}
 700
 701static void __qdio_inbound_processing(struct qdio_q *q)
 702{
 703        qperf_inc(q, tasklet_inbound);
 704
 705        if (!qdio_inbound_q_moved(q))
 706                return;
 707
 708        qdio_kick_handler(q);
 709
 710        if (!qdio_inbound_q_done(q)) {
 711                /* means poll time is not yet over */
 712                qperf_inc(q, tasklet_inbound_resched);
 713                if (!qdio_tasklet_schedule(q))
 714                        return;
 715        }
 716
 717        qdio_stop_polling(q);
 718        /*
 719         * We need to check again to not lose initiative after
 720         * resetting the ACK state.
 721         */
 722        if (!qdio_inbound_q_done(q)) {
 723                qperf_inc(q, tasklet_inbound_resched2);
 724                qdio_tasklet_schedule(q);
 725        }
 726}
 727
 728void qdio_inbound_processing(unsigned long data)
 729{
 730        struct qdio_q *q = (struct qdio_q *)data;
 731        __qdio_inbound_processing(q);
 732}
 733
 734static int get_outbound_buffer_frontier(struct qdio_q *q)
 735{
 736        int count, stop;
 737        unsigned char state = 0;
 738
 739        q->timestamp = get_tod_clock_fast();
 740
 741        if (need_siga_sync(q))
 742                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 743                    !pci_out_supported(q)) ||
 744                    (queue_type(q) == QDIO_IQDIO_QFMT &&
 745                    multicast_outbound(q)))
 746                        qdio_siga_sync_q(q);
 747
 748        /*
 749         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 750         * would return 0.
 751         */
 752        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 753        stop = add_buf(q->first_to_check, count);
 754        if (q->first_to_check == stop)
 755                goto out;
 756
 757        count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
 758        if (!count)
 759                goto out;
 760
 761        switch (state) {
 762        case SLSB_P_OUTPUT_EMPTY:
 763                /* the adapter got it */
 764                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 765                        "out empty:%1d %02x", q->nr, count);
 766
 767                atomic_sub(count, &q->nr_buf_used);
 768                q->first_to_check = add_buf(q->first_to_check, count);
 769                if (q->irq_ptr->perf_stat_enabled)
 770                        account_sbals(q, count);
 771
 772                break;
 773        case SLSB_P_OUTPUT_ERROR:
 774                process_buffer_error(q, count);
 775                q->first_to_check = add_buf(q->first_to_check, count);
 776                atomic_sub(count, &q->nr_buf_used);
 777                if (q->irq_ptr->perf_stat_enabled)
 778                        account_sbals_error(q, count);
 779                break;
 780        case SLSB_CU_OUTPUT_PRIMED:
 781                /* the adapter has not fetched the output yet */
 782                if (q->irq_ptr->perf_stat_enabled)
 783                        q->q_stats.nr_sbal_nop++;
 784                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 785                              q->nr);
 786                break;
 787        case SLSB_P_OUTPUT_NOT_INIT:
 788        case SLSB_P_OUTPUT_HALTED:
 789                break;
 790        default:
 791                WARN_ON_ONCE(1);
 792        }
 793
 794out:
 795        return q->first_to_check;
 796}
 797
 798/* all buffers processed? */
 799static inline int qdio_outbound_q_done(struct qdio_q *q)
 800{
 801        return atomic_read(&q->nr_buf_used) == 0;
 802}
 803
 804static inline int qdio_outbound_q_moved(struct qdio_q *q)
 805{
 806        int bufnr;
 807
 808        bufnr = get_outbound_buffer_frontier(q);
 809
 810        if (bufnr != q->last_move) {
 811                q->last_move = bufnr;
 812                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 813                return 1;
 814        } else
 815                return 0;
 816}
 817
 818static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 819{
 820        int retries = 0, cc;
 821        unsigned int busy_bit;
 822
 823        if (!need_siga_out(q))
 824                return 0;
 825
 826        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 827retry:
 828        qperf_inc(q, siga_write);
 829
 830        cc = qdio_siga_output(q, &busy_bit, aob);
 831        switch (cc) {
 832        case 0:
 833                break;
 834        case 2:
 835                if (busy_bit) {
 836                        while (++retries < QDIO_BUSY_BIT_RETRIES) {
 837                                mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 838                                goto retry;
 839                        }
 840                        DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 841                        cc = -EBUSY;
 842                } else {
 843                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 844                        cc = -ENOBUFS;
 845                }
 846                break;
 847        case 1:
 848        case 3:
 849                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 850                cc = -EIO;
 851                break;
 852        }
 853        if (retries) {
 854                DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 855                DBF_ERROR("count:%u", retries);
 856        }
 857        return cc;
 858}
 859
 860static void __qdio_outbound_processing(struct qdio_q *q)
 861{
 862        qperf_inc(q, tasklet_outbound);
 863        WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 864
 865        if (qdio_outbound_q_moved(q))
 866                qdio_kick_handler(q);
 867
 868        if (queue_type(q) == QDIO_ZFCP_QFMT)
 869                if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 870                        goto sched;
 871
 872        if (q->u.out.pci_out_enabled)
 873                return;
 874
 875        /*
 876         * Now we know that queue type is either qeth without pci enabled
 877         * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 878         * is noticed and outbound_handler is called after some time.
 879         */
 880        if (qdio_outbound_q_done(q))
 881                del_timer_sync(&q->u.out.timer);
 882        else
 883                if (!timer_pending(&q->u.out.timer) &&
 884                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
 885                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 886        return;
 887
 888sched:
 889        qdio_tasklet_schedule(q);
 890}
 891
 892/* outbound tasklet */
 893void qdio_outbound_processing(unsigned long data)
 894{
 895        struct qdio_q *q = (struct qdio_q *)data;
 896        __qdio_outbound_processing(q);
 897}
 898
 899void qdio_outbound_timer(struct timer_list *t)
 900{
 901        struct qdio_q *q = from_timer(q, t, u.out.timer);
 902
 903        qdio_tasklet_schedule(q);
 904}
 905
 906static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 907{
 908        struct qdio_q *out;
 909        int i;
 910
 911        if (!pci_out_supported(q))
 912                return;
 913
 914        for_each_output_queue(q->irq_ptr, out, i)
 915                if (!qdio_outbound_q_done(out))
 916                        qdio_tasklet_schedule(out);
 917}
 918
 919static void __tiqdio_inbound_processing(struct qdio_q *q)
 920{
 921        qperf_inc(q, tasklet_inbound);
 922        if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 923                qdio_sync_queues(q);
 924
 925        /*
 926         * The interrupt could be caused by a PCI request. Check the
 927         * PCI capable outbound queues.
 928         */
 929        qdio_check_outbound_after_thinint(q);
 930
 931        if (!qdio_inbound_q_moved(q))
 932                return;
 933
 934        qdio_kick_handler(q);
 935
 936        if (!qdio_inbound_q_done(q)) {
 937                qperf_inc(q, tasklet_inbound_resched);
 938                if (!qdio_tasklet_schedule(q))
 939                        return;
 940        }
 941
 942        qdio_stop_polling(q);
 943        /*
 944         * We need to check again to not lose initiative after
 945         * resetting the ACK state.
 946         */
 947        if (!qdio_inbound_q_done(q)) {
 948                qperf_inc(q, tasklet_inbound_resched2);
 949                qdio_tasklet_schedule(q);
 950        }
 951}
 952
 953void tiqdio_inbound_processing(unsigned long data)
 954{
 955        struct qdio_q *q = (struct qdio_q *)data;
 956        __tiqdio_inbound_processing(q);
 957}
 958
 959static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 960                                  enum qdio_irq_states state)
 961{
 962        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 963
 964        irq_ptr->state = state;
 965        mb();
 966}
 967
 968static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 969{
 970        if (irb->esw.esw0.erw.cons) {
 971                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 972                DBF_ERROR_HEX(irb, 64);
 973                DBF_ERROR_HEX(irb->ecw, 64);
 974        }
 975}
 976
 977/* PCI interrupt handler */
 978static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 979{
 980        int i;
 981        struct qdio_q *q;
 982
 983        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 984                return;
 985
 986        for_each_input_queue(irq_ptr, q, i) {
 987                if (q->u.in.queue_start_poll) {
 988                        /* skip if polling is enabled or already in work */
 989                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 990                                     &q->u.in.queue_irq_state)) {
 991                                qperf_inc(q, int_discarded);
 992                                continue;
 993                        }
 994                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 995                                                 q->irq_ptr->int_parm);
 996                } else {
 997                        tasklet_schedule(&q->tasklet);
 998                }
 999        }
1000
1001        if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
1002                return;
1003
1004        for_each_output_queue(irq_ptr, q, i) {
1005                if (qdio_outbound_q_done(q))
1006                        continue;
1007                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1008                        qdio_siga_sync_q(q);
1009                qdio_tasklet_schedule(q);
1010        }
1011}
1012
1013static void qdio_handle_activate_check(struct ccw_device *cdev,
1014                                unsigned long intparm, int cstat, int dstat)
1015{
1016        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1017        struct qdio_q *q;
1018        int count;
1019
1020        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1021        DBF_ERROR("intp :%lx", intparm);
1022        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1023
1024        if (irq_ptr->nr_input_qs) {
1025                q = irq_ptr->input_qs[0];
1026        } else if (irq_ptr->nr_output_qs) {
1027                q = irq_ptr->output_qs[0];
1028        } else {
1029                dump_stack();
1030                goto no_handler;
1031        }
1032
1033        count = sub_buf(q->first_to_check, q->first_to_kick);
1034        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1035                   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1036no_handler:
1037        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1038        /*
1039         * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1040         * Therefore we call the LGR detection function here.
1041         */
1042        lgr_info_log();
1043}
1044
1045static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1046                                      int dstat)
1047{
1048        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1049
1050        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1051
1052        if (cstat)
1053                goto error;
1054        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1055                goto error;
1056        if (!(dstat & DEV_STAT_DEV_END))
1057                goto error;
1058        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1059        return;
1060
1061error:
1062        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1063        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1064        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1065}
1066
1067/* qdio interrupt handler */
1068void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1069                      struct irb *irb)
1070{
1071        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1072        struct subchannel_id schid;
1073        int cstat, dstat;
1074
1075        if (!intparm || !irq_ptr) {
1076                ccw_device_get_schid(cdev, &schid);
1077                DBF_ERROR("qint:%4x", schid.sch_no);
1078                return;
1079        }
1080
1081        if (irq_ptr->perf_stat_enabled)
1082                irq_ptr->perf_stat.qdio_int++;
1083
1084        if (IS_ERR(irb)) {
1085                DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1086                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1087                wake_up(&cdev->private->wait_q);
1088                return;
1089        }
1090        qdio_irq_check_sense(irq_ptr, irb);
1091        cstat = irb->scsw.cmd.cstat;
1092        dstat = irb->scsw.cmd.dstat;
1093
1094        switch (irq_ptr->state) {
1095        case QDIO_IRQ_STATE_INACTIVE:
1096                qdio_establish_handle_irq(cdev, cstat, dstat);
1097                break;
1098        case QDIO_IRQ_STATE_CLEANUP:
1099                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1100                break;
1101        case QDIO_IRQ_STATE_ESTABLISHED:
1102        case QDIO_IRQ_STATE_ACTIVE:
1103                if (cstat & SCHN_STAT_PCI) {
1104                        qdio_int_handler_pci(irq_ptr);
1105                        return;
1106                }
1107                if (cstat || dstat)
1108                        qdio_handle_activate_check(cdev, intparm, cstat,
1109                                                   dstat);
1110                break;
1111        case QDIO_IRQ_STATE_STOPPED:
1112                break;
1113        default:
1114                WARN_ON_ONCE(1);
1115        }
1116        wake_up(&cdev->private->wait_q);
1117}
1118
1119/**
1120 * qdio_get_ssqd_desc - get qdio subchannel description
1121 * @cdev: ccw device to get description for
1122 * @data: where to store the ssqd
1123 *
1124 * Returns 0 or an error code. The results of the chsc are stored in the
1125 * specified structure.
1126 */
1127int qdio_get_ssqd_desc(struct ccw_device *cdev,
1128                       struct qdio_ssqd_desc *data)
1129{
1130        struct subchannel_id schid;
1131
1132        if (!cdev || !cdev->private)
1133                return -EINVAL;
1134
1135        ccw_device_get_schid(cdev, &schid);
1136        DBF_EVENT("get ssqd:%4x", schid.sch_no);
1137        return qdio_setup_get_ssqd(NULL, &schid, data);
1138}
1139EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1140
1141static void qdio_shutdown_queues(struct ccw_device *cdev)
1142{
1143        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1144        struct qdio_q *q;
1145        int i;
1146
1147        for_each_input_queue(irq_ptr, q, i)
1148                tasklet_kill(&q->tasklet);
1149
1150        for_each_output_queue(irq_ptr, q, i) {
1151                del_timer_sync(&q->u.out.timer);
1152                tasklet_kill(&q->tasklet);
1153        }
1154}
1155
1156/**
1157 * qdio_shutdown - shut down a qdio subchannel
1158 * @cdev: associated ccw device
1159 * @how: use halt or clear to shutdown
1160 */
1161int qdio_shutdown(struct ccw_device *cdev, int how)
1162{
1163        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1164        struct subchannel_id schid;
1165        int rc;
1166
1167        if (!irq_ptr)
1168                return -ENODEV;
1169
1170        WARN_ON_ONCE(irqs_disabled());
1171        ccw_device_get_schid(cdev, &schid);
1172        DBF_EVENT("qshutdown:%4x", schid.sch_no);
1173
1174        mutex_lock(&irq_ptr->setup_mutex);
1175        /*
1176         * Subchannel was already shot down. We cannot prevent being called
1177         * twice since cio may trigger a shutdown asynchronously.
1178         */
1179        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1180                mutex_unlock(&irq_ptr->setup_mutex);
1181                return 0;
1182        }
1183
1184        /*
1185         * Indicate that the device is going down. Scheduling the queue
1186         * tasklets is forbidden from here on.
1187         */
1188        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1189
1190        tiqdio_remove_input_queues(irq_ptr);
1191        qdio_shutdown_queues(cdev);
1192        qdio_shutdown_debug_entries(irq_ptr);
1193
1194        /* cleanup subchannel */
1195        spin_lock_irq(get_ccwdev_lock(cdev));
1196
1197        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1198                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1199        else
1200                /* default behaviour is halt */
1201                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1202        if (rc) {
1203                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1204                DBF_ERROR("rc:%4d", rc);
1205                goto no_cleanup;
1206        }
1207
1208        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1209        spin_unlock_irq(get_ccwdev_lock(cdev));
1210        wait_event_interruptible_timeout(cdev->private->wait_q,
1211                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1212                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1213                10 * HZ);
1214        spin_lock_irq(get_ccwdev_lock(cdev));
1215
1216no_cleanup:
1217        qdio_shutdown_thinint(irq_ptr);
1218
1219        /* restore interrupt handler */
1220        if ((void *)cdev->handler == (void *)qdio_int_handler)
1221                cdev->handler = irq_ptr->orig_handler;
1222        spin_unlock_irq(get_ccwdev_lock(cdev));
1223
1224        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1225        mutex_unlock(&irq_ptr->setup_mutex);
1226        if (rc)
1227                return rc;
1228        return 0;
1229}
1230EXPORT_SYMBOL_GPL(qdio_shutdown);
1231
1232/**
1233 * qdio_free - free data structures for a qdio subchannel
1234 * @cdev: associated ccw device
1235 */
1236int qdio_free(struct ccw_device *cdev)
1237{
1238        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1239        struct subchannel_id schid;
1240
1241        if (!irq_ptr)
1242                return -ENODEV;
1243
1244        ccw_device_get_schid(cdev, &schid);
1245        DBF_EVENT("qfree:%4x", schid.sch_no);
1246        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1247        mutex_lock(&irq_ptr->setup_mutex);
1248
1249        irq_ptr->debug_area = NULL;
1250        cdev->private->qdio_data = NULL;
1251        mutex_unlock(&irq_ptr->setup_mutex);
1252
1253        qdio_release_memory(irq_ptr);
1254        return 0;
1255}
1256EXPORT_SYMBOL_GPL(qdio_free);
1257
1258/**
1259 * qdio_allocate - allocate qdio queues and associated data
1260 * @init_data: initialization data
1261 */
1262int qdio_allocate(struct qdio_initialize *init_data)
1263{
1264        struct subchannel_id schid;
1265        struct qdio_irq *irq_ptr;
1266
1267        ccw_device_get_schid(init_data->cdev, &schid);
1268        DBF_EVENT("qallocate:%4x", schid.sch_no);
1269
1270        if ((init_data->no_input_qs && !init_data->input_handler) ||
1271            (init_data->no_output_qs && !init_data->output_handler))
1272                return -EINVAL;
1273
1274        if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1275            (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1276                return -EINVAL;
1277
1278        if ((!init_data->input_sbal_addr_array) ||
1279            (!init_data->output_sbal_addr_array))
1280                return -EINVAL;
1281
1282        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1283        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1284        if (!irq_ptr)
1285                goto out_err;
1286
1287        mutex_init(&irq_ptr->setup_mutex);
1288        if (qdio_allocate_dbf(init_data, irq_ptr))
1289                goto out_rel;
1290
1291        /*
1292         * Allocate a page for the chsc calls in qdio_establish.
1293         * Must be pre-allocated since a zfcp recovery will call
1294         * qdio_establish. In case of low memory and swap on a zfcp disk
1295         * we may not be able to allocate memory otherwise.
1296         */
1297        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1298        if (!irq_ptr->chsc_page)
1299                goto out_rel;
1300
1301        /* qdr is used in ccw1.cda which is u32 */
1302        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1303        if (!irq_ptr->qdr)
1304                goto out_rel;
1305
1306        if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1307                             init_data->no_output_qs))
1308                goto out_rel;
1309
1310        init_data->cdev->private->qdio_data = irq_ptr;
1311        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1312        return 0;
1313out_rel:
1314        qdio_release_memory(irq_ptr);
1315out_err:
1316        return -ENOMEM;
1317}
1318EXPORT_SYMBOL_GPL(qdio_allocate);
1319
1320static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1321{
1322        struct qdio_q *q = irq_ptr->input_qs[0];
1323        int i, use_cq = 0;
1324
1325        if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1326                use_cq = 1;
1327
1328        for_each_output_queue(irq_ptr, q, i) {
1329                if (use_cq) {
1330                        if (qdio_enable_async_operation(&q->u.out) < 0) {
1331                                use_cq = 0;
1332                                continue;
1333                        }
1334                } else
1335                        qdio_disable_async_operation(&q->u.out);
1336        }
1337        DBF_EVENT("use_cq:%d", use_cq);
1338}
1339
1340/**
1341 * qdio_establish - establish queues on a qdio subchannel
1342 * @init_data: initialization data
1343 */
1344int qdio_establish(struct qdio_initialize *init_data)
1345{
1346        struct ccw_device *cdev = init_data->cdev;
1347        struct subchannel_id schid;
1348        struct qdio_irq *irq_ptr;
1349        int rc;
1350
1351        ccw_device_get_schid(cdev, &schid);
1352        DBF_EVENT("qestablish:%4x", schid.sch_no);
1353
1354        irq_ptr = cdev->private->qdio_data;
1355        if (!irq_ptr)
1356                return -ENODEV;
1357
1358        mutex_lock(&irq_ptr->setup_mutex);
1359        qdio_setup_irq(init_data);
1360
1361        rc = qdio_establish_thinint(irq_ptr);
1362        if (rc) {
1363                mutex_unlock(&irq_ptr->setup_mutex);
1364                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1365                return rc;
1366        }
1367
1368        /* establish q */
1369        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1370        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1371        irq_ptr->ccw.count = irq_ptr->equeue.count;
1372        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1373
1374        spin_lock_irq(get_ccwdev_lock(cdev));
1375        ccw_device_set_options_mask(cdev, 0);
1376
1377        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1378        spin_unlock_irq(get_ccwdev_lock(cdev));
1379        if (rc) {
1380                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1381                DBF_ERROR("rc:%4x", rc);
1382                mutex_unlock(&irq_ptr->setup_mutex);
1383                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1384                return rc;
1385        }
1386
1387        wait_event_interruptible_timeout(cdev->private->wait_q,
1388                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1389                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1390
1391        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1392                mutex_unlock(&irq_ptr->setup_mutex);
1393                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1394                return -EIO;
1395        }
1396
1397        qdio_setup_ssqd_info(irq_ptr);
1398
1399        qdio_detect_hsicq(irq_ptr);
1400
1401        /* qebsm is now setup if available, initialize buffer states */
1402        qdio_init_buf_states(irq_ptr);
1403
1404        mutex_unlock(&irq_ptr->setup_mutex);
1405        qdio_print_subchannel_info(irq_ptr, cdev);
1406        qdio_setup_debug_entries(irq_ptr, cdev);
1407        return 0;
1408}
1409EXPORT_SYMBOL_GPL(qdio_establish);
1410
1411/**
1412 * qdio_activate - activate queues on a qdio subchannel
1413 * @cdev: associated cdev
1414 */
1415int qdio_activate(struct ccw_device *cdev)
1416{
1417        struct subchannel_id schid;
1418        struct qdio_irq *irq_ptr;
1419        int rc;
1420
1421        ccw_device_get_schid(cdev, &schid);
1422        DBF_EVENT("qactivate:%4x", schid.sch_no);
1423
1424        irq_ptr = cdev->private->qdio_data;
1425        if (!irq_ptr)
1426                return -ENODEV;
1427
1428        mutex_lock(&irq_ptr->setup_mutex);
1429        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1430                rc = -EBUSY;
1431                goto out;
1432        }
1433
1434        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1435        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1436        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1437        irq_ptr->ccw.cda = 0;
1438
1439        spin_lock_irq(get_ccwdev_lock(cdev));
1440        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1441
1442        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1443                              0, DOIO_DENY_PREFETCH);
1444        spin_unlock_irq(get_ccwdev_lock(cdev));
1445        if (rc) {
1446                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1447                DBF_ERROR("rc:%4x", rc);
1448                goto out;
1449        }
1450
1451        if (is_thinint_irq(irq_ptr))
1452                tiqdio_add_input_queues(irq_ptr);
1453
1454        /* wait for subchannel to become active */
1455        msleep(5);
1456
1457        switch (irq_ptr->state) {
1458        case QDIO_IRQ_STATE_STOPPED:
1459        case QDIO_IRQ_STATE_ERR:
1460                rc = -EIO;
1461                break;
1462        default:
1463                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1464                rc = 0;
1465        }
1466out:
1467        mutex_unlock(&irq_ptr->setup_mutex);
1468        return rc;
1469}
1470EXPORT_SYMBOL_GPL(qdio_activate);
1471
1472static inline int buf_in_between(int bufnr, int start, int count)
1473{
1474        int end = add_buf(start, count);
1475
1476        if (end > start) {
1477                if (bufnr >= start && bufnr < end)
1478                        return 1;
1479                else
1480                        return 0;
1481        }
1482
1483        /* wrap-around case */
1484        if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1485            (bufnr < end))
1486                return 1;
1487        else
1488                return 0;
1489}
1490
1491/**
1492 * handle_inbound - reset processed input buffers
1493 * @q: queue containing the buffers
1494 * @callflags: flags
1495 * @bufnr: first buffer to process
1496 * @count: how many buffers are emptied
1497 */
1498static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1499                          int bufnr, int count)
1500{
1501        int diff;
1502
1503        qperf_inc(q, inbound_call);
1504
1505        if (!q->u.in.polling)
1506                goto set;
1507
1508        /* protect against stop polling setting an ACK for an emptied slsb */
1509        if (count == QDIO_MAX_BUFFERS_PER_Q) {
1510                /* overwriting everything, just delete polling status */
1511                q->u.in.polling = 0;
1512                q->u.in.ack_count = 0;
1513                goto set;
1514        } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1515                if (is_qebsm(q)) {
1516                        /* partial overwrite, just update ack_start */
1517                        diff = add_buf(bufnr, count);
1518                        diff = sub_buf(diff, q->u.in.ack_start);
1519                        q->u.in.ack_count -= diff;
1520                        if (q->u.in.ack_count <= 0) {
1521                                q->u.in.polling = 0;
1522                                q->u.in.ack_count = 0;
1523                                goto set;
1524                        }
1525                        q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1526                }
1527                else
1528                        /* the only ACK will be deleted, so stop polling */
1529                        q->u.in.polling = 0;
1530        }
1531
1532set:
1533        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1534        atomic_add(count, &q->nr_buf_used);
1535
1536        if (need_siga_in(q))
1537                return qdio_siga_input(q);
1538
1539        return 0;
1540}
1541
1542/**
1543 * handle_outbound - process filled outbound buffers
1544 * @q: queue containing the buffers
1545 * @callflags: flags
1546 * @bufnr: first buffer to process
1547 * @count: how many buffers are filled
1548 */
1549static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1550                           int bufnr, int count)
1551{
1552        unsigned char state = 0;
1553        int used, rc = 0;
1554
1555        qperf_inc(q, outbound_call);
1556
1557        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1558        used = atomic_add_return(count, &q->nr_buf_used);
1559
1560        if (used == QDIO_MAX_BUFFERS_PER_Q)
1561                qperf_inc(q, outbound_queue_full);
1562
1563        if (callflags & QDIO_FLAG_PCI_OUT) {
1564                q->u.out.pci_out_enabled = 1;
1565                qperf_inc(q, pci_request_int);
1566        } else
1567                q->u.out.pci_out_enabled = 0;
1568
1569        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1570                unsigned long phys_aob = 0;
1571
1572                /* One SIGA-W per buffer required for unicast HSI */
1573                WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1574
1575                phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1576
1577                rc = qdio_kick_outbound_q(q, phys_aob);
1578        } else if (need_siga_sync(q)) {
1579                rc = qdio_siga_sync_q(q);
1580        } else {
1581                /* try to fast requeue buffers */
1582                get_buf_state(q, prev_buf(bufnr), &state, 0);
1583                if (state != SLSB_CU_OUTPUT_PRIMED)
1584                        rc = qdio_kick_outbound_q(q, 0);
1585                else
1586                        qperf_inc(q, fast_requeue);
1587        }
1588
1589        /* in case of SIGA errors we must process the error immediately */
1590        if (used >= q->u.out.scan_threshold || rc)
1591                qdio_tasklet_schedule(q);
1592        else
1593                /* free the SBALs in case of no further traffic */
1594                if (!timer_pending(&q->u.out.timer) &&
1595                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1596                        mod_timer(&q->u.out.timer, jiffies + HZ);
1597        return rc;
1598}
1599
1600/**
1601 * do_QDIO - process input or output buffers
1602 * @cdev: associated ccw_device for the qdio subchannel
1603 * @callflags: input or output and special flags from the program
1604 * @q_nr: queue number
1605 * @bufnr: buffer number
1606 * @count: how many buffers to process
1607 */
1608int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1609            int q_nr, unsigned int bufnr, unsigned int count)
1610{
1611        struct qdio_irq *irq_ptr;
1612
1613        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1614                return -EINVAL;
1615
1616        irq_ptr = cdev->private->qdio_data;
1617        if (!irq_ptr)
1618                return -ENODEV;
1619
1620        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1621                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1622
1623        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1624                return -EIO;
1625        if (!count)
1626                return 0;
1627        if (callflags & QDIO_FLAG_SYNC_INPUT)
1628                return handle_inbound(irq_ptr->input_qs[q_nr],
1629                                      callflags, bufnr, count);
1630        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1631                return handle_outbound(irq_ptr->output_qs[q_nr],
1632                                       callflags, bufnr, count);
1633        return -EINVAL;
1634}
1635EXPORT_SYMBOL_GPL(do_QDIO);
1636
1637/**
1638 * qdio_start_irq - process input buffers
1639 * @cdev: associated ccw_device for the qdio subchannel
1640 * @nr: input queue number
1641 *
1642 * Return codes
1643 *   0 - success
1644 *   1 - irqs not started since new data is available
1645 */
1646int qdio_start_irq(struct ccw_device *cdev, int nr)
1647{
1648        struct qdio_q *q;
1649        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1650
1651        if (!irq_ptr)
1652                return -ENODEV;
1653        q = irq_ptr->input_qs[nr];
1654
1655        clear_nonshared_ind(irq_ptr);
1656        qdio_stop_polling(q);
1657        clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1658
1659        /*
1660         * We need to check again to not lose initiative after
1661         * resetting the ACK state.
1662         */
1663        if (test_nonshared_ind(irq_ptr))
1664                goto rescan;
1665        if (!qdio_inbound_q_done(q))
1666                goto rescan;
1667        return 0;
1668
1669rescan:
1670        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1671                             &q->u.in.queue_irq_state))
1672                return 0;
1673        else
1674                return 1;
1675
1676}
1677EXPORT_SYMBOL(qdio_start_irq);
1678
1679/**
1680 * qdio_get_next_buffers - process input buffers
1681 * @cdev: associated ccw_device for the qdio subchannel
1682 * @nr: input queue number
1683 * @bufnr: first filled buffer number
1684 * @error: buffers are in error state
1685 *
1686 * Return codes
1687 *   < 0 - error
1688 *   = 0 - no new buffers found
1689 *   > 0 - number of processed buffers
1690 */
1691int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1692                          int *error)
1693{
1694        struct qdio_q *q;
1695        int start, end;
1696        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1697
1698        if (!irq_ptr)
1699                return -ENODEV;
1700        q = irq_ptr->input_qs[nr];
1701
1702        /*
1703         * Cannot rely on automatic sync after interrupt since queues may
1704         * also be examined without interrupt.
1705         */
1706        if (need_siga_sync(q))
1707                qdio_sync_queues(q);
1708
1709        /* check the PCI capable outbound queues. */
1710        qdio_check_outbound_after_thinint(q);
1711
1712        if (!qdio_inbound_q_moved(q))
1713                return 0;
1714
1715        /* Note: upper-layer MUST stop processing immediately here ... */
1716        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1717                return -EIO;
1718
1719        start = q->first_to_kick;
1720        end = q->first_to_check;
1721        *bufnr = start;
1722        *error = q->qdio_error;
1723
1724        /* for the next time */
1725        q->first_to_kick = end;
1726        q->qdio_error = 0;
1727        return sub_buf(end, start);
1728}
1729EXPORT_SYMBOL(qdio_get_next_buffers);
1730
1731/**
1732 * qdio_stop_irq - disable interrupt processing for the device
1733 * @cdev: associated ccw_device for the qdio subchannel
1734 * @nr: input queue number
1735 *
1736 * Return codes
1737 *   0 - interrupts were already disabled
1738 *   1 - interrupts successfully disabled
1739 */
1740int qdio_stop_irq(struct ccw_device *cdev, int nr)
1741{
1742        struct qdio_q *q;
1743        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1744
1745        if (!irq_ptr)
1746                return -ENODEV;
1747        q = irq_ptr->input_qs[nr];
1748
1749        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1750                             &q->u.in.queue_irq_state))
1751                return 0;
1752        else
1753                return 1;
1754}
1755EXPORT_SYMBOL(qdio_stop_irq);
1756
1757/**
1758 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1759 * @schid:              Subchannel ID.
1760 * @cnc:                Boolean Change-Notification Control
1761 * @response:           Response code will be stored at this address
1762 * @cb:                 Callback function will be executed for each element
1763 *                      of the address list
1764 * @priv:               Pointer passed from the caller to qdio_pnso_brinfo()
1765 * @type:               Type of the address entry passed to the callback
1766 * @entry:              Entry containg the address of the specified type
1767 * @priv:               Pointer to pass to the callback function.
1768 *
1769 * Performs "Store-network-bridging-information list" operation and calls
1770 * the callback function for every entry in the list. If "change-
1771 * notification-control" is set, further changes in the address list
1772 * will be reported via the IPA command.
1773 */
1774int qdio_pnso_brinfo(struct subchannel_id schid,
1775                int cnc, u16 *response,
1776                void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1777                                void *entry),
1778                void *priv)
1779{
1780        struct chsc_pnso_area *rr;
1781        int rc;
1782        u32 prev_instance = 0;
1783        int isfirstblock = 1;
1784        int i, size, elems;
1785
1786        rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1787        if (rr == NULL)
1788                return -ENOMEM;
1789        do {
1790                /* on the first iteration, naihdr.resume_token will be zero */
1791                rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1792                if (rc != 0 && rc != -EBUSY)
1793                        goto out;
1794                if (rr->response.code != 1) {
1795                        rc = -EIO;
1796                        continue;
1797                } else
1798                        rc = 0;
1799
1800                if (cb == NULL)
1801                        continue;
1802
1803                size = rr->naihdr.naids;
1804                elems = (rr->response.length -
1805                                sizeof(struct chsc_header) -
1806                                sizeof(struct chsc_brinfo_naihdr)) /
1807                                size;
1808
1809                if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1810                        /* Inform the caller that they need to scrap */
1811                        /* the data that was already reported via cb */
1812                                rc = -EAGAIN;
1813                                break;
1814                }
1815                isfirstblock = 0;
1816                prev_instance = rr->naihdr.instance;
1817                for (i = 0; i < elems; i++)
1818                        switch (size) {
1819                        case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1820                                (*cb)(priv, l3_ipv6_addr,
1821                                                &rr->entries.l3_ipv6[i]);
1822                                break;
1823                        case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1824                                (*cb)(priv, l3_ipv4_addr,
1825                                                &rr->entries.l3_ipv4[i]);
1826                                break;
1827                        case sizeof(struct qdio_brinfo_entry_l2):
1828                                (*cb)(priv, l2_addr_lnid,
1829                                                &rr->entries.l2[i]);
1830                                break;
1831                        default:
1832                                WARN_ON_ONCE(1);
1833                                rc = -EIO;
1834                                goto out;
1835                        }
1836        } while (rr->response.code == 0x0107 ||  /* channel busy */
1837                  (rr->response.code == 1 && /* list stored */
1838                   /* resume token is non-zero => list incomplete */
1839                   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1840        (*response) = rr->response.code;
1841
1842out:
1843        free_page((unsigned long)rr);
1844        return rc;
1845}
1846EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1847
1848static int __init init_QDIO(void)
1849{
1850        int rc;
1851
1852        rc = qdio_debug_init();
1853        if (rc)
1854                return rc;
1855        rc = qdio_setup_init();
1856        if (rc)
1857                goto out_debug;
1858        rc = tiqdio_allocate_memory();
1859        if (rc)
1860                goto out_cache;
1861        rc = tiqdio_register_thinints();
1862        if (rc)
1863                goto out_ti;
1864        return 0;
1865
1866out_ti:
1867        tiqdio_free_memory();
1868out_cache:
1869        qdio_setup_exit();
1870out_debug:
1871        qdio_debug_exit();
1872        return rc;
1873}
1874
1875static void __exit exit_QDIO(void)
1876{
1877        tiqdio_unregister_thinints();
1878        tiqdio_free_memory();
1879        qdio_setup_exit();
1880        qdio_debug_exit();
1881}
1882
1883module_init(init_QDIO);
1884module_exit(exit_QDIO);
1885