linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   4 *
   5 * Copyright IBM Corp. 2000, 2008
   6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   7 *            Jan Glauber <jang@linux.vnet.ibm.com>
   8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/timer.h>
  14#include <linux/delay.h>
  15#include <linux/gfp.h>
  16#include <linux/io.h>
  17#include <linux/atomic.h>
  18#include <asm/debug.h>
  19#include <asm/qdio.h>
  20#include <asm/ipl.h>
  21
  22#include "cio.h"
  23#include "css.h"
  24#include "device.h"
  25#include "qdio.h"
  26#include "qdio_debug.h"
  27
  28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  29        "Jan Glauber <jang@linux.vnet.ibm.com>");
  30MODULE_DESCRIPTION("QDIO base support");
  31MODULE_LICENSE("GPL");
  32
  33static inline int do_siga_sync(unsigned long schid,
  34                               unsigned int out_mask, unsigned int in_mask,
  35                               unsigned int fc)
  36{
  37        register unsigned long __fc asm ("0") = fc;
  38        register unsigned long __schid asm ("1") = schid;
  39        register unsigned long out asm ("2") = out_mask;
  40        register unsigned long in asm ("3") = in_mask;
  41        int cc;
  42
  43        asm volatile(
  44                "       siga    0\n"
  45                "       ipm     %0\n"
  46                "       srl     %0,28\n"
  47                : "=d" (cc)
  48                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  49        return cc;
  50}
  51
  52static inline int do_siga_input(unsigned long schid, unsigned int mask,
  53                                unsigned int fc)
  54{
  55        register unsigned long __fc asm ("0") = fc;
  56        register unsigned long __schid asm ("1") = schid;
  57        register unsigned long __mask asm ("2") = mask;
  58        int cc;
  59
  60        asm volatile(
  61                "       siga    0\n"
  62                "       ipm     %0\n"
  63                "       srl     %0,28\n"
  64                : "=d" (cc)
  65                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  66        return cc;
  67}
  68
  69/**
  70 * do_siga_output - perform SIGA-w/wt function
  71 * @schid: subchannel id or in case of QEBSM the subchannel token
  72 * @mask: which output queues to process
  73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  74 * @fc: function code to perform
  75 * @aob: asynchronous operation block
  76 *
  77 * Returns condition code.
  78 * Note: For IQDC unicast queues only the highest priority queue is processed.
  79 */
  80static inline int do_siga_output(unsigned long schid, unsigned long mask,
  81                                 unsigned int *bb, unsigned int fc,
  82                                 unsigned long aob)
  83{
  84        register unsigned long __fc asm("0") = fc;
  85        register unsigned long __schid asm("1") = schid;
  86        register unsigned long __mask asm("2") = mask;
  87        register unsigned long __aob asm("3") = aob;
  88        int cc;
  89
  90        asm volatile(
  91                "       siga    0\n"
  92                "       ipm     %0\n"
  93                "       srl     %0,28\n"
  94                : "=d" (cc), "+d" (__fc), "+d" (__aob)
  95                : "d" (__schid), "d" (__mask)
  96                : "cc");
  97        *bb = __fc >> 31;
  98        return cc;
  99}
 100
 101/**
 102 * qdio_do_eqbs - extract buffer states for QEBSM
 103 * @q: queue to manipulate
 104 * @state: state of the extracted buffers
 105 * @start: buffer number to start at
 106 * @count: count of buffers to examine
 107 * @auto_ack: automatically acknowledge buffers
 108 *
 109 * Returns the number of successfully extracted equal buffer states.
 110 * Stops processing if a state is different from the last buffers state.
 111 */
 112static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 113                        int start, int count, int auto_ack)
 114{
 115        int tmp_count = count, tmp_start = start, nr = q->nr;
 116        unsigned int ccq = 0;
 117
 118        qperf_inc(q, eqbs);
 119
 120        if (!q->is_input_q)
 121                nr += q->irq_ptr->nr_input_qs;
 122again:
 123        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 124                      auto_ack);
 125
 126        switch (ccq) {
 127        case 0:
 128        case 32:
 129                /* all done, or next buffer state different */
 130                return count - tmp_count;
 131        case 96:
 132                /* not all buffers processed */
 133                qperf_inc(q, eqbs_partial);
 134                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 135                        tmp_count);
 136                return count - tmp_count;
 137        case 97:
 138                /* no buffer processed */
 139                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 140                goto again;
 141        default:
 142                DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 143                DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 144                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 145                q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
 146                           q->first_to_kick, count, q->irq_ptr->int_parm);
 147                return 0;
 148        }
 149}
 150
 151/**
 152 * qdio_do_sqbs - set buffer states for QEBSM
 153 * @q: queue to manipulate
 154 * @state: new state of the buffers
 155 * @start: first buffer number to change
 156 * @count: how many buffers to change
 157 *
 158 * Returns the number of successfully changed buffers.
 159 * Does retrying until the specified count of buffer states is set or an
 160 * error occurs.
 161 */
 162static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 163                        int count)
 164{
 165        unsigned int ccq = 0;
 166        int tmp_count = count, tmp_start = start;
 167        int nr = q->nr;
 168
 169        if (!count)
 170                return 0;
 171        qperf_inc(q, sqbs);
 172
 173        if (!q->is_input_q)
 174                nr += q->irq_ptr->nr_input_qs;
 175again:
 176        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 177
 178        switch (ccq) {
 179        case 0:
 180        case 32:
 181                /* all done, or active buffer adapter-owned */
 182                WARN_ON_ONCE(tmp_count);
 183                return count - tmp_count;
 184        case 96:
 185                /* not all buffers processed */
 186                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 187                qperf_inc(q, sqbs_partial);
 188                goto again;
 189        default:
 190                DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 191                DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 192                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 193                q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
 194                           q->first_to_kick, count, q->irq_ptr->int_parm);
 195                return 0;
 196        }
 197}
 198
 199/*
 200 * Returns number of examined buffers and their common state in *state.
 201 * Requested number of buffers-to-examine must be > 0.
 202 */
 203static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 204                                 unsigned char *state, unsigned int count,
 205                                 int auto_ack, int merge_pending)
 206{
 207        unsigned char __state = 0;
 208        int i = 1;
 209
 210        if (is_qebsm(q))
 211                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 212
 213        /* get initial state: */
 214        __state = q->slsb.val[bufnr];
 215
 216        /* Bail out early if there is no work on the queue: */
 217        if (__state & SLSB_OWNER_CU)
 218                goto out;
 219
 220        if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 221                __state = SLSB_P_OUTPUT_EMPTY;
 222
 223        for (; i < count; i++) {
 224                bufnr = next_buf(bufnr);
 225
 226                /* merge PENDING into EMPTY: */
 227                if (merge_pending &&
 228                    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
 229                    __state == SLSB_P_OUTPUT_EMPTY)
 230                        continue;
 231
 232                /* stop if next state differs from initial state: */
 233                if (q->slsb.val[bufnr] != __state)
 234                        break;
 235        }
 236
 237out:
 238        *state = __state;
 239        return i;
 240}
 241
 242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 243                                unsigned char *state, int auto_ack)
 244{
 245        return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 246}
 247
 248/* wrap-around safe setting of slsb states, returns number of changed buffers */
 249static inline int set_buf_states(struct qdio_q *q, int bufnr,
 250                                 unsigned char state, int count)
 251{
 252        int i;
 253
 254        if (is_qebsm(q))
 255                return qdio_do_sqbs(q, state, bufnr, count);
 256
 257        for (i = 0; i < count; i++) {
 258                xchg(&q->slsb.val[bufnr], state);
 259                bufnr = next_buf(bufnr);
 260        }
 261        return count;
 262}
 263
 264static inline int set_buf_state(struct qdio_q *q, int bufnr,
 265                                unsigned char state)
 266{
 267        return set_buf_states(q, bufnr, state, 1);
 268}
 269
 270/* set slsb states to initial state */
 271static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 272{
 273        struct qdio_q *q;
 274        int i;
 275
 276        for_each_input_queue(irq_ptr, q, i)
 277                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 278                               QDIO_MAX_BUFFERS_PER_Q);
 279        for_each_output_queue(irq_ptr, q, i)
 280                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 281                               QDIO_MAX_BUFFERS_PER_Q);
 282}
 283
 284static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 285                          unsigned int input)
 286{
 287        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 288        unsigned int fc = QDIO_SIGA_SYNC;
 289        int cc;
 290
 291        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 292        qperf_inc(q, siga_sync);
 293
 294        if (is_qebsm(q)) {
 295                schid = q->irq_ptr->sch_token;
 296                fc |= QDIO_SIGA_QEBSM_FLAG;
 297        }
 298
 299        cc = do_siga_sync(schid, output, input, fc);
 300        if (unlikely(cc))
 301                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 302        return (cc) ? -EIO : 0;
 303}
 304
 305static inline int qdio_siga_sync_q(struct qdio_q *q)
 306{
 307        if (q->is_input_q)
 308                return qdio_siga_sync(q, 0, q->mask);
 309        else
 310                return qdio_siga_sync(q, q->mask, 0);
 311}
 312
 313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 314        unsigned long aob)
 315{
 316        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 317        unsigned int fc = QDIO_SIGA_WRITE;
 318        u64 start_time = 0;
 319        int retries = 0, cc;
 320        unsigned long laob = 0;
 321
 322        if (aob) {
 323                fc = QDIO_SIGA_WRITEQ;
 324                laob = aob;
 325        }
 326
 327        if (is_qebsm(q)) {
 328                schid = q->irq_ptr->sch_token;
 329                fc |= QDIO_SIGA_QEBSM_FLAG;
 330        }
 331again:
 332        cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 333
 334        /* hipersocket busy condition */
 335        if (unlikely(*busy_bit)) {
 336                retries++;
 337
 338                if (!start_time) {
 339                        start_time = get_tod_clock_fast();
 340                        goto again;
 341                }
 342                if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
 343                        goto again;
 344        }
 345        if (retries) {
 346                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 347                              "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 348                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 349        }
 350        return cc;
 351}
 352
 353static inline int qdio_siga_input(struct qdio_q *q)
 354{
 355        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 356        unsigned int fc = QDIO_SIGA_READ;
 357        int cc;
 358
 359        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 360        qperf_inc(q, siga_read);
 361
 362        if (is_qebsm(q)) {
 363                schid = q->irq_ptr->sch_token;
 364                fc |= QDIO_SIGA_QEBSM_FLAG;
 365        }
 366
 367        cc = do_siga_input(schid, q->mask, fc);
 368        if (unlikely(cc))
 369                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 370        return (cc) ? -EIO : 0;
 371}
 372
 373#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 374#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 375
 376static inline void qdio_sync_queues(struct qdio_q *q)
 377{
 378        /* PCI capable outbound queues will also be scanned so sync them too */
 379        if (pci_out_supported(q->irq_ptr))
 380                qdio_siga_sync_all(q);
 381        else
 382                qdio_siga_sync_q(q);
 383}
 384
 385int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 386                        unsigned char *state)
 387{
 388        if (need_siga_sync(q))
 389                qdio_siga_sync_q(q);
 390        return get_buf_state(q, bufnr, state, 0);
 391}
 392
 393static inline void qdio_stop_polling(struct qdio_q *q)
 394{
 395        if (!q->u.in.polling)
 396                return;
 397
 398        q->u.in.polling = 0;
 399        qperf_inc(q, stop_polling);
 400
 401        /* show the card that we are not polling anymore */
 402        if (is_qebsm(q)) {
 403                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 404                               q->u.in.ack_count);
 405                q->u.in.ack_count = 0;
 406        } else
 407                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 408}
 409
 410static inline void account_sbals(struct qdio_q *q, unsigned int count)
 411{
 412        int pos;
 413
 414        q->q_stats.nr_sbal_total += count;
 415        if (count == QDIO_MAX_BUFFERS_MASK) {
 416                q->q_stats.nr_sbals[7]++;
 417                return;
 418        }
 419        pos = ilog2(count);
 420        q->q_stats.nr_sbals[pos]++;
 421}
 422
 423static void process_buffer_error(struct qdio_q *q, unsigned int start,
 424                                 int count)
 425{
 426        unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 427                                        SLSB_P_OUTPUT_NOT_INIT;
 428
 429        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 430
 431        /* special handling for no target buffer empty */
 432        if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
 433            q->sbal[start]->element[15].sflags == 0x10) {
 434                qperf_inc(q, target_full);
 435                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
 436                goto set;
 437        }
 438
 439        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 440        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 441        DBF_ERROR("FTC:%3d C:%3d", start, count);
 442        DBF_ERROR("F14:%2x F15:%2x",
 443                  q->sbal[start]->element[14].sflags,
 444                  q->sbal[start]->element[15].sflags);
 445
 446set:
 447        /*
 448         * Interrupts may be avoided as long as the error is present
 449         * so change the buffer state immediately to avoid starvation.
 450         */
 451        set_buf_states(q, start, state, count);
 452}
 453
 454static inline void inbound_primed(struct qdio_q *q, unsigned int start,
 455                                  int count)
 456{
 457        int new;
 458
 459        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
 460
 461        /* for QEBSM the ACK was already set by EQBS */
 462        if (is_qebsm(q)) {
 463                if (!q->u.in.polling) {
 464                        q->u.in.polling = 1;
 465                        q->u.in.ack_count = count;
 466                        q->u.in.ack_start = start;
 467                        return;
 468                }
 469
 470                /* delete the previous ACK's */
 471                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 472                               q->u.in.ack_count);
 473                q->u.in.ack_count = count;
 474                q->u.in.ack_start = start;
 475                return;
 476        }
 477
 478        /*
 479         * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 480         * or by the next inbound run.
 481         */
 482        new = add_buf(start, count - 1);
 483        if (q->u.in.polling) {
 484                /* reset the previous ACK but first set the new one */
 485                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 486                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 487        } else {
 488                q->u.in.polling = 1;
 489                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 490        }
 491
 492        q->u.in.ack_start = new;
 493        count--;
 494        if (!count)
 495                return;
 496        /* need to change ALL buffers to get more interrupts */
 497        set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
 498}
 499
 500static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
 501{
 502        unsigned char state = 0;
 503        int count;
 504
 505        q->timestamp = get_tod_clock_fast();
 506
 507        /*
 508         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 509         * would return 0.
 510         */
 511        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 512        if (!count)
 513                return 0;
 514
 515        /*
 516         * No siga sync here, as a PCI or we after a thin interrupt
 517         * already sync'ed the queues.
 518         */
 519        count = get_buf_states(q, start, &state, count, 1, 0);
 520        if (!count)
 521                return 0;
 522
 523        switch (state) {
 524        case SLSB_P_INPUT_PRIMED:
 525                inbound_primed(q, start, count);
 526                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 527                        qperf_inc(q, inbound_queue_full);
 528                if (q->irq_ptr->perf_stat_enabled)
 529                        account_sbals(q, count);
 530                return count;
 531        case SLSB_P_INPUT_ERROR:
 532                process_buffer_error(q, start, count);
 533                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 534                        qperf_inc(q, inbound_queue_full);
 535                if (q->irq_ptr->perf_stat_enabled)
 536                        account_sbals_error(q, count);
 537                return count;
 538        case SLSB_CU_INPUT_EMPTY:
 539        case SLSB_P_INPUT_NOT_INIT:
 540        case SLSB_P_INPUT_ACK:
 541                if (q->irq_ptr->perf_stat_enabled)
 542                        q->q_stats.nr_sbal_nop++;
 543                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
 544                              q->nr, start);
 545                return 0;
 546        default:
 547                WARN_ON_ONCE(1);
 548                return 0;
 549        }
 550}
 551
 552static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
 553{
 554        int count;
 555
 556        count = get_inbound_buffer_frontier(q, start);
 557
 558        if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 559                q->u.in.timestamp = get_tod_clock();
 560
 561        return count;
 562}
 563
 564static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
 565{
 566        unsigned char state = 0;
 567
 568        if (!atomic_read(&q->nr_buf_used))
 569                return 1;
 570
 571        if (need_siga_sync(q))
 572                qdio_siga_sync_q(q);
 573        get_buf_state(q, start, &state, 0);
 574
 575        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 576                /* more work coming */
 577                return 0;
 578
 579        if (is_thinint_irq(q->irq_ptr))
 580                return 1;
 581
 582        /* don't poll under z/VM */
 583        if (MACHINE_IS_VM)
 584                return 1;
 585
 586        /*
 587         * At this point we know, that inbound first_to_check
 588         * has (probably) not moved (see qdio_inbound_processing).
 589         */
 590        if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 591                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
 592                return 1;
 593        } else
 594                return 0;
 595}
 596
 597static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 598{
 599        unsigned char state = 0;
 600        int j, b = start;
 601
 602        for (j = 0; j < count; ++j) {
 603                get_buf_state(q, b, &state, 0);
 604                if (state == SLSB_P_OUTPUT_PENDING) {
 605                        struct qaob *aob = q->u.out.aobs[b];
 606                        if (aob == NULL)
 607                                continue;
 608
 609                        q->u.out.sbal_state[b].flags |=
 610                                QDIO_OUTBUF_STATE_FLAG_PENDING;
 611                        q->u.out.aobs[b] = NULL;
 612                }
 613                b = next_buf(b);
 614        }
 615}
 616
 617static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 618                                        int bufnr)
 619{
 620        unsigned long phys_aob = 0;
 621
 622        if (!q->aobs[bufnr]) {
 623                struct qaob *aob = qdio_allocate_aob();
 624                q->aobs[bufnr] = aob;
 625        }
 626        if (q->aobs[bufnr]) {
 627                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 628                phys_aob = virt_to_phys(q->aobs[bufnr]);
 629                WARN_ON_ONCE(phys_aob & 0xFF);
 630        }
 631
 632        q->sbal_state[bufnr].flags = 0;
 633        return phys_aob;
 634}
 635
 636static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
 637{
 638        int start = q->first_to_kick;
 639
 640        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 641                return;
 642
 643        if (q->is_input_q) {
 644                qperf_inc(q, inbound_handler);
 645                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 646        } else {
 647                qperf_inc(q, outbound_handler);
 648                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 649                              start, count);
 650                if (q->u.out.use_cq)
 651                        qdio_handle_aobs(q, start, count);
 652        }
 653
 654        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 655                   q->irq_ptr->int_parm);
 656
 657        /* for the next time */
 658        q->first_to_kick = add_buf(start, count);
 659        q->qdio_error = 0;
 660}
 661
 662static inline int qdio_tasklet_schedule(struct qdio_q *q)
 663{
 664        if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
 665                tasklet_schedule(&q->tasklet);
 666                return 0;
 667        }
 668        return -EPERM;
 669}
 670
 671static void __qdio_inbound_processing(struct qdio_q *q)
 672{
 673        unsigned int start = q->first_to_check;
 674        int count;
 675
 676        qperf_inc(q, tasklet_inbound);
 677
 678        count = qdio_inbound_q_moved(q, start);
 679        if (count == 0)
 680                return;
 681
 682        start = add_buf(start, count);
 683        q->first_to_check = start;
 684        qdio_kick_handler(q, count);
 685
 686        if (!qdio_inbound_q_done(q, start)) {
 687                /* means poll time is not yet over */
 688                qperf_inc(q, tasklet_inbound_resched);
 689                if (!qdio_tasklet_schedule(q))
 690                        return;
 691        }
 692
 693        qdio_stop_polling(q);
 694        /*
 695         * We need to check again to not lose initiative after
 696         * resetting the ACK state.
 697         */
 698        if (!qdio_inbound_q_done(q, start)) {
 699                qperf_inc(q, tasklet_inbound_resched2);
 700                qdio_tasklet_schedule(q);
 701        }
 702}
 703
 704void qdio_inbound_processing(unsigned long data)
 705{
 706        struct qdio_q *q = (struct qdio_q *)data;
 707        __qdio_inbound_processing(q);
 708}
 709
 710static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
 711{
 712        unsigned char state = 0;
 713        int count;
 714
 715        q->timestamp = get_tod_clock_fast();
 716
 717        if (need_siga_sync(q))
 718                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 719                    !pci_out_supported(q->irq_ptr)) ||
 720                    (queue_type(q) == QDIO_IQDIO_QFMT &&
 721                    multicast_outbound(q)))
 722                        qdio_siga_sync_q(q);
 723
 724        count = atomic_read(&q->nr_buf_used);
 725        if (!count)
 726                return 0;
 727
 728        count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
 729        if (!count)
 730                return 0;
 731
 732        switch (state) {
 733        case SLSB_P_OUTPUT_EMPTY:
 734        case SLSB_P_OUTPUT_PENDING:
 735                /* the adapter got it */
 736                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 737                        "out empty:%1d %02x", q->nr, count);
 738
 739                atomic_sub(count, &q->nr_buf_used);
 740                if (q->irq_ptr->perf_stat_enabled)
 741                        account_sbals(q, count);
 742                return count;
 743        case SLSB_P_OUTPUT_ERROR:
 744                process_buffer_error(q, start, count);
 745                atomic_sub(count, &q->nr_buf_used);
 746                if (q->irq_ptr->perf_stat_enabled)
 747                        account_sbals_error(q, count);
 748                return count;
 749        case SLSB_CU_OUTPUT_PRIMED:
 750                /* the adapter has not fetched the output yet */
 751                if (q->irq_ptr->perf_stat_enabled)
 752                        q->q_stats.nr_sbal_nop++;
 753                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 754                              q->nr);
 755                return 0;
 756        case SLSB_P_OUTPUT_NOT_INIT:
 757        case SLSB_P_OUTPUT_HALTED:
 758                return 0;
 759        default:
 760                WARN_ON_ONCE(1);
 761                return 0;
 762        }
 763}
 764
 765/* all buffers processed? */
 766static inline int qdio_outbound_q_done(struct qdio_q *q)
 767{
 768        return atomic_read(&q->nr_buf_used) == 0;
 769}
 770
 771static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
 772{
 773        int count;
 774
 775        count = get_outbound_buffer_frontier(q, start);
 776
 777        if (count)
 778                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 779
 780        return count;
 781}
 782
 783static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 784{
 785        int retries = 0, cc;
 786        unsigned int busy_bit;
 787
 788        if (!need_siga_out(q))
 789                return 0;
 790
 791        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 792retry:
 793        qperf_inc(q, siga_write);
 794
 795        cc = qdio_siga_output(q, &busy_bit, aob);
 796        switch (cc) {
 797        case 0:
 798                break;
 799        case 2:
 800                if (busy_bit) {
 801                        while (++retries < QDIO_BUSY_BIT_RETRIES) {
 802                                mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 803                                goto retry;
 804                        }
 805                        DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 806                        cc = -EBUSY;
 807                } else {
 808                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 809                        cc = -ENOBUFS;
 810                }
 811                break;
 812        case 1:
 813        case 3:
 814                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 815                cc = -EIO;
 816                break;
 817        }
 818        if (retries) {
 819                DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 820                DBF_ERROR("count:%u", retries);
 821        }
 822        return cc;
 823}
 824
 825static void __qdio_outbound_processing(struct qdio_q *q)
 826{
 827        unsigned int start = q->first_to_check;
 828        int count;
 829
 830        qperf_inc(q, tasklet_outbound);
 831        WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 832
 833        count = qdio_outbound_q_moved(q, start);
 834        if (count) {
 835                q->first_to_check = add_buf(start, count);
 836                qdio_kick_handler(q, count);
 837        }
 838
 839        if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
 840            !qdio_outbound_q_done(q))
 841                goto sched;
 842
 843        if (q->u.out.pci_out_enabled)
 844                return;
 845
 846        /*
 847         * Now we know that queue type is either qeth without pci enabled
 848         * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 849         * is noticed and outbound_handler is called after some time.
 850         */
 851        if (qdio_outbound_q_done(q))
 852                del_timer_sync(&q->u.out.timer);
 853        else
 854                if (!timer_pending(&q->u.out.timer) &&
 855                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
 856                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 857        return;
 858
 859sched:
 860        qdio_tasklet_schedule(q);
 861}
 862
 863/* outbound tasklet */
 864void qdio_outbound_processing(unsigned long data)
 865{
 866        struct qdio_q *q = (struct qdio_q *)data;
 867        __qdio_outbound_processing(q);
 868}
 869
 870void qdio_outbound_timer(struct timer_list *t)
 871{
 872        struct qdio_q *q = from_timer(q, t, u.out.timer);
 873
 874        qdio_tasklet_schedule(q);
 875}
 876
 877static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
 878{
 879        struct qdio_q *out;
 880        int i;
 881
 882        if (!pci_out_supported(irq))
 883                return;
 884
 885        for_each_output_queue(irq, out, i)
 886                if (!qdio_outbound_q_done(out))
 887                        qdio_tasklet_schedule(out);
 888}
 889
 890static void __tiqdio_inbound_processing(struct qdio_q *q)
 891{
 892        unsigned int start = q->first_to_check;
 893        int count;
 894
 895        qperf_inc(q, tasklet_inbound);
 896        if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 897                qdio_sync_queues(q);
 898
 899        /* The interrupt could be caused by a PCI request: */
 900        qdio_check_outbound_pci_queues(q->irq_ptr);
 901
 902        count = qdio_inbound_q_moved(q, start);
 903        if (count == 0)
 904                return;
 905
 906        start = add_buf(start, count);
 907        q->first_to_check = start;
 908        qdio_kick_handler(q, count);
 909
 910        if (!qdio_inbound_q_done(q, start)) {
 911                qperf_inc(q, tasklet_inbound_resched);
 912                if (!qdio_tasklet_schedule(q))
 913                        return;
 914        }
 915
 916        qdio_stop_polling(q);
 917        /*
 918         * We need to check again to not lose initiative after
 919         * resetting the ACK state.
 920         */
 921        if (!qdio_inbound_q_done(q, start)) {
 922                qperf_inc(q, tasklet_inbound_resched2);
 923                qdio_tasklet_schedule(q);
 924        }
 925}
 926
 927void tiqdio_inbound_processing(unsigned long data)
 928{
 929        struct qdio_q *q = (struct qdio_q *)data;
 930        __tiqdio_inbound_processing(q);
 931}
 932
 933static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 934                                  enum qdio_irq_states state)
 935{
 936        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 937
 938        irq_ptr->state = state;
 939        mb();
 940}
 941
 942static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 943{
 944        if (irb->esw.esw0.erw.cons) {
 945                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 946                DBF_ERROR_HEX(irb, 64);
 947                DBF_ERROR_HEX(irb->ecw, 64);
 948        }
 949}
 950
 951/* PCI interrupt handler */
 952static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 953{
 954        int i;
 955        struct qdio_q *q;
 956
 957        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 958                return;
 959
 960        for_each_input_queue(irq_ptr, q, i) {
 961                if (q->u.in.queue_start_poll) {
 962                        /* skip if polling is enabled or already in work */
 963                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 964                                     &q->u.in.queue_irq_state)) {
 965                                qperf_inc(q, int_discarded);
 966                                continue;
 967                        }
 968                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 969                                                 q->irq_ptr->int_parm);
 970                } else {
 971                        tasklet_schedule(&q->tasklet);
 972                }
 973        }
 974
 975        if (!pci_out_supported(irq_ptr))
 976                return;
 977
 978        for_each_output_queue(irq_ptr, q, i) {
 979                if (qdio_outbound_q_done(q))
 980                        continue;
 981                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
 982                        qdio_siga_sync_q(q);
 983                qdio_tasklet_schedule(q);
 984        }
 985}
 986
 987static void qdio_handle_activate_check(struct ccw_device *cdev,
 988                                unsigned long intparm, int cstat, int dstat)
 989{
 990        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 991        struct qdio_q *q;
 992        int count;
 993
 994        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
 995        DBF_ERROR("intp :%lx", intparm);
 996        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 997
 998        if (irq_ptr->nr_input_qs) {
 999                q = irq_ptr->input_qs[0];
1000        } else if (irq_ptr->nr_output_qs) {
1001                q = irq_ptr->output_qs[0];
1002        } else {
1003                dump_stack();
1004                goto no_handler;
1005        }
1006
1007        count = sub_buf(q->first_to_check, q->first_to_kick);
1008        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1009                   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1010no_handler:
1011        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1012        /*
1013         * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1014         * Therefore we call the LGR detection function here.
1015         */
1016        lgr_info_log();
1017}
1018
1019static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1020                                      int dstat)
1021{
1022        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1023
1024        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1025
1026        if (cstat)
1027                goto error;
1028        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1029                goto error;
1030        if (!(dstat & DEV_STAT_DEV_END))
1031                goto error;
1032        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1033        return;
1034
1035error:
1036        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1037        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1038        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1039}
1040
1041/* qdio interrupt handler */
1042void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1043                      struct irb *irb)
1044{
1045        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1046        struct subchannel_id schid;
1047        int cstat, dstat;
1048
1049        if (!intparm || !irq_ptr) {
1050                ccw_device_get_schid(cdev, &schid);
1051                DBF_ERROR("qint:%4x", schid.sch_no);
1052                return;
1053        }
1054
1055        if (irq_ptr->perf_stat_enabled)
1056                irq_ptr->perf_stat.qdio_int++;
1057
1058        if (IS_ERR(irb)) {
1059                DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1060                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1061                wake_up(&cdev->private->wait_q);
1062                return;
1063        }
1064        qdio_irq_check_sense(irq_ptr, irb);
1065        cstat = irb->scsw.cmd.cstat;
1066        dstat = irb->scsw.cmd.dstat;
1067
1068        switch (irq_ptr->state) {
1069        case QDIO_IRQ_STATE_INACTIVE:
1070                qdio_establish_handle_irq(cdev, cstat, dstat);
1071                break;
1072        case QDIO_IRQ_STATE_CLEANUP:
1073                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1074                break;
1075        case QDIO_IRQ_STATE_ESTABLISHED:
1076        case QDIO_IRQ_STATE_ACTIVE:
1077                if (cstat & SCHN_STAT_PCI) {
1078                        qdio_int_handler_pci(irq_ptr);
1079                        return;
1080                }
1081                if (cstat || dstat)
1082                        qdio_handle_activate_check(cdev, intparm, cstat,
1083                                                   dstat);
1084                break;
1085        case QDIO_IRQ_STATE_STOPPED:
1086                break;
1087        default:
1088                WARN_ON_ONCE(1);
1089        }
1090        wake_up(&cdev->private->wait_q);
1091}
1092
1093/**
1094 * qdio_get_ssqd_desc - get qdio subchannel description
1095 * @cdev: ccw device to get description for
1096 * @data: where to store the ssqd
1097 *
1098 * Returns 0 or an error code. The results of the chsc are stored in the
1099 * specified structure.
1100 */
1101int qdio_get_ssqd_desc(struct ccw_device *cdev,
1102                       struct qdio_ssqd_desc *data)
1103{
1104        struct subchannel_id schid;
1105
1106        if (!cdev || !cdev->private)
1107                return -EINVAL;
1108
1109        ccw_device_get_schid(cdev, &schid);
1110        DBF_EVENT("get ssqd:%4x", schid.sch_no);
1111        return qdio_setup_get_ssqd(NULL, &schid, data);
1112}
1113EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1114
1115static void qdio_shutdown_queues(struct ccw_device *cdev)
1116{
1117        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1118        struct qdio_q *q;
1119        int i;
1120
1121        for_each_input_queue(irq_ptr, q, i)
1122                tasklet_kill(&q->tasklet);
1123
1124        for_each_output_queue(irq_ptr, q, i) {
1125                del_timer_sync(&q->u.out.timer);
1126                tasklet_kill(&q->tasklet);
1127        }
1128}
1129
1130/**
1131 * qdio_shutdown - shut down a qdio subchannel
1132 * @cdev: associated ccw device
1133 * @how: use halt or clear to shutdown
1134 */
1135int qdio_shutdown(struct ccw_device *cdev, int how)
1136{
1137        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138        struct subchannel_id schid;
1139        int rc;
1140
1141        if (!irq_ptr)
1142                return -ENODEV;
1143
1144        WARN_ON_ONCE(irqs_disabled());
1145        ccw_device_get_schid(cdev, &schid);
1146        DBF_EVENT("qshutdown:%4x", schid.sch_no);
1147
1148        mutex_lock(&irq_ptr->setup_mutex);
1149        /*
1150         * Subchannel was already shot down. We cannot prevent being called
1151         * twice since cio may trigger a shutdown asynchronously.
1152         */
1153        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1154                mutex_unlock(&irq_ptr->setup_mutex);
1155                return 0;
1156        }
1157
1158        /*
1159         * Indicate that the device is going down. Scheduling the queue
1160         * tasklets is forbidden from here on.
1161         */
1162        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1163
1164        tiqdio_remove_input_queues(irq_ptr);
1165        qdio_shutdown_queues(cdev);
1166        qdio_shutdown_debug_entries(irq_ptr);
1167
1168        /* cleanup subchannel */
1169        spin_lock_irq(get_ccwdev_lock(cdev));
1170
1171        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1172                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1173        else
1174                /* default behaviour is halt */
1175                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1176        if (rc) {
1177                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1178                DBF_ERROR("rc:%4d", rc);
1179                goto no_cleanup;
1180        }
1181
1182        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1183        spin_unlock_irq(get_ccwdev_lock(cdev));
1184        wait_event_interruptible_timeout(cdev->private->wait_q,
1185                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1186                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1187                10 * HZ);
1188        spin_lock_irq(get_ccwdev_lock(cdev));
1189
1190no_cleanup:
1191        qdio_shutdown_thinint(irq_ptr);
1192
1193        /* restore interrupt handler */
1194        if ((void *)cdev->handler == (void *)qdio_int_handler) {
1195                cdev->handler = irq_ptr->orig_handler;
1196                cdev->private->intparm = 0;
1197        }
1198        spin_unlock_irq(get_ccwdev_lock(cdev));
1199
1200        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1201        mutex_unlock(&irq_ptr->setup_mutex);
1202        if (rc)
1203                return rc;
1204        return 0;
1205}
1206EXPORT_SYMBOL_GPL(qdio_shutdown);
1207
1208/**
1209 * qdio_free - free data structures for a qdio subchannel
1210 * @cdev: associated ccw device
1211 */
1212int qdio_free(struct ccw_device *cdev)
1213{
1214        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1215        struct subchannel_id schid;
1216
1217        if (!irq_ptr)
1218                return -ENODEV;
1219
1220        ccw_device_get_schid(cdev, &schid);
1221        DBF_EVENT("qfree:%4x", schid.sch_no);
1222        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1223        mutex_lock(&irq_ptr->setup_mutex);
1224
1225        irq_ptr->debug_area = NULL;
1226        cdev->private->qdio_data = NULL;
1227        mutex_unlock(&irq_ptr->setup_mutex);
1228
1229        qdio_release_memory(irq_ptr);
1230        return 0;
1231}
1232EXPORT_SYMBOL_GPL(qdio_free);
1233
1234/**
1235 * qdio_allocate - allocate qdio queues and associated data
1236 * @init_data: initialization data
1237 */
1238int qdio_allocate(struct qdio_initialize *init_data)
1239{
1240        struct subchannel_id schid;
1241        struct qdio_irq *irq_ptr;
1242
1243        ccw_device_get_schid(init_data->cdev, &schid);
1244        DBF_EVENT("qallocate:%4x", schid.sch_no);
1245
1246        if ((init_data->no_input_qs && !init_data->input_handler) ||
1247            (init_data->no_output_qs && !init_data->output_handler))
1248                return -EINVAL;
1249
1250        if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1251            (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1252                return -EINVAL;
1253
1254        if ((!init_data->input_sbal_addr_array) ||
1255            (!init_data->output_sbal_addr_array))
1256                return -EINVAL;
1257
1258        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1259        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1260        if (!irq_ptr)
1261                goto out_err;
1262
1263        mutex_init(&irq_ptr->setup_mutex);
1264        if (qdio_allocate_dbf(init_data, irq_ptr))
1265                goto out_rel;
1266
1267        /*
1268         * Allocate a page for the chsc calls in qdio_establish.
1269         * Must be pre-allocated since a zfcp recovery will call
1270         * qdio_establish. In case of low memory and swap on a zfcp disk
1271         * we may not be able to allocate memory otherwise.
1272         */
1273        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1274        if (!irq_ptr->chsc_page)
1275                goto out_rel;
1276
1277        /* qdr is used in ccw1.cda which is u32 */
1278        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1279        if (!irq_ptr->qdr)
1280                goto out_rel;
1281
1282        if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1283                             init_data->no_output_qs))
1284                goto out_rel;
1285
1286        init_data->cdev->private->qdio_data = irq_ptr;
1287        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1288        return 0;
1289out_rel:
1290        qdio_release_memory(irq_ptr);
1291out_err:
1292        return -ENOMEM;
1293}
1294EXPORT_SYMBOL_GPL(qdio_allocate);
1295
1296static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1297{
1298        struct qdio_q *q = irq_ptr->input_qs[0];
1299        int i, use_cq = 0;
1300
1301        if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1302                use_cq = 1;
1303
1304        for_each_output_queue(irq_ptr, q, i) {
1305                if (use_cq) {
1306                        if (multicast_outbound(q))
1307                                continue;
1308                        if (qdio_enable_async_operation(&q->u.out) < 0) {
1309                                use_cq = 0;
1310                                continue;
1311                        }
1312                } else
1313                        qdio_disable_async_operation(&q->u.out);
1314        }
1315        DBF_EVENT("use_cq:%d", use_cq);
1316}
1317
1318/**
1319 * qdio_establish - establish queues on a qdio subchannel
1320 * @init_data: initialization data
1321 */
1322int qdio_establish(struct qdio_initialize *init_data)
1323{
1324        struct ccw_device *cdev = init_data->cdev;
1325        struct subchannel_id schid;
1326        struct qdio_irq *irq_ptr;
1327        int rc;
1328
1329        ccw_device_get_schid(cdev, &schid);
1330        DBF_EVENT("qestablish:%4x", schid.sch_no);
1331
1332        irq_ptr = cdev->private->qdio_data;
1333        if (!irq_ptr)
1334                return -ENODEV;
1335
1336        mutex_lock(&irq_ptr->setup_mutex);
1337        qdio_setup_irq(init_data);
1338
1339        rc = qdio_establish_thinint(irq_ptr);
1340        if (rc) {
1341                mutex_unlock(&irq_ptr->setup_mutex);
1342                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1343                return rc;
1344        }
1345
1346        /* establish q */
1347        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1348        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1349        irq_ptr->ccw.count = irq_ptr->equeue.count;
1350        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1351
1352        spin_lock_irq(get_ccwdev_lock(cdev));
1353        ccw_device_set_options_mask(cdev, 0);
1354
1355        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1356        spin_unlock_irq(get_ccwdev_lock(cdev));
1357        if (rc) {
1358                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1359                DBF_ERROR("rc:%4x", rc);
1360                mutex_unlock(&irq_ptr->setup_mutex);
1361                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1362                return rc;
1363        }
1364
1365        wait_event_interruptible_timeout(cdev->private->wait_q,
1366                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1367                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1368
1369        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1370                mutex_unlock(&irq_ptr->setup_mutex);
1371                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1372                return -EIO;
1373        }
1374
1375        qdio_setup_ssqd_info(irq_ptr);
1376
1377        qdio_detect_hsicq(irq_ptr);
1378
1379        /* qebsm is now setup if available, initialize buffer states */
1380        qdio_init_buf_states(irq_ptr);
1381
1382        mutex_unlock(&irq_ptr->setup_mutex);
1383        qdio_print_subchannel_info(irq_ptr, cdev);
1384        qdio_setup_debug_entries(irq_ptr, cdev);
1385        return 0;
1386}
1387EXPORT_SYMBOL_GPL(qdio_establish);
1388
1389/**
1390 * qdio_activate - activate queues on a qdio subchannel
1391 * @cdev: associated cdev
1392 */
1393int qdio_activate(struct ccw_device *cdev)
1394{
1395        struct subchannel_id schid;
1396        struct qdio_irq *irq_ptr;
1397        int rc;
1398
1399        ccw_device_get_schid(cdev, &schid);
1400        DBF_EVENT("qactivate:%4x", schid.sch_no);
1401
1402        irq_ptr = cdev->private->qdio_data;
1403        if (!irq_ptr)
1404                return -ENODEV;
1405
1406        mutex_lock(&irq_ptr->setup_mutex);
1407        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1408                rc = -EBUSY;
1409                goto out;
1410        }
1411
1412        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1413        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1414        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1415        irq_ptr->ccw.cda = 0;
1416
1417        spin_lock_irq(get_ccwdev_lock(cdev));
1418        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1419
1420        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1421                              0, DOIO_DENY_PREFETCH);
1422        spin_unlock_irq(get_ccwdev_lock(cdev));
1423        if (rc) {
1424                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1425                DBF_ERROR("rc:%4x", rc);
1426                goto out;
1427        }
1428
1429        if (is_thinint_irq(irq_ptr))
1430                tiqdio_add_input_queues(irq_ptr);
1431
1432        /* wait for subchannel to become active */
1433        msleep(5);
1434
1435        switch (irq_ptr->state) {
1436        case QDIO_IRQ_STATE_STOPPED:
1437        case QDIO_IRQ_STATE_ERR:
1438                rc = -EIO;
1439                break;
1440        default:
1441                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1442                rc = 0;
1443        }
1444out:
1445        mutex_unlock(&irq_ptr->setup_mutex);
1446        return rc;
1447}
1448EXPORT_SYMBOL_GPL(qdio_activate);
1449
1450static inline int buf_in_between(int bufnr, int start, int count)
1451{
1452        int end = add_buf(start, count);
1453
1454        if (end > start) {
1455                if (bufnr >= start && bufnr < end)
1456                        return 1;
1457                else
1458                        return 0;
1459        }
1460
1461        /* wrap-around case */
1462        if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1463            (bufnr < end))
1464                return 1;
1465        else
1466                return 0;
1467}
1468
1469/**
1470 * handle_inbound - reset processed input buffers
1471 * @q: queue containing the buffers
1472 * @callflags: flags
1473 * @bufnr: first buffer to process
1474 * @count: how many buffers are emptied
1475 */
1476static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1477                          int bufnr, int count)
1478{
1479        int diff;
1480
1481        qperf_inc(q, inbound_call);
1482
1483        if (!q->u.in.polling)
1484                goto set;
1485
1486        /* protect against stop polling setting an ACK for an emptied slsb */
1487        if (count == QDIO_MAX_BUFFERS_PER_Q) {
1488                /* overwriting everything, just delete polling status */
1489                q->u.in.polling = 0;
1490                q->u.in.ack_count = 0;
1491                goto set;
1492        } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1493                if (is_qebsm(q)) {
1494                        /* partial overwrite, just update ack_start */
1495                        diff = add_buf(bufnr, count);
1496                        diff = sub_buf(diff, q->u.in.ack_start);
1497                        q->u.in.ack_count -= diff;
1498                        if (q->u.in.ack_count <= 0) {
1499                                q->u.in.polling = 0;
1500                                q->u.in.ack_count = 0;
1501                                goto set;
1502                        }
1503                        q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1504                }
1505                else
1506                        /* the only ACK will be deleted, so stop polling */
1507                        q->u.in.polling = 0;
1508        }
1509
1510set:
1511        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1512        atomic_add(count, &q->nr_buf_used);
1513
1514        if (need_siga_in(q))
1515                return qdio_siga_input(q);
1516
1517        return 0;
1518}
1519
1520/**
1521 * handle_outbound - process filled outbound buffers
1522 * @q: queue containing the buffers
1523 * @callflags: flags
1524 * @bufnr: first buffer to process
1525 * @count: how many buffers are filled
1526 */
1527static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1528                           int bufnr, int count)
1529{
1530        unsigned char state = 0;
1531        int used, rc = 0;
1532
1533        qperf_inc(q, outbound_call);
1534
1535        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1536        used = atomic_add_return(count, &q->nr_buf_used);
1537
1538        if (used == QDIO_MAX_BUFFERS_PER_Q)
1539                qperf_inc(q, outbound_queue_full);
1540
1541        if (callflags & QDIO_FLAG_PCI_OUT) {
1542                q->u.out.pci_out_enabled = 1;
1543                qperf_inc(q, pci_request_int);
1544        } else
1545                q->u.out.pci_out_enabled = 0;
1546
1547        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1548                unsigned long phys_aob = 0;
1549
1550                /* One SIGA-W per buffer required for unicast HSI */
1551                WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1552
1553                if (q->u.out.use_cq)
1554                        phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1555
1556                rc = qdio_kick_outbound_q(q, phys_aob);
1557        } else if (need_siga_sync(q)) {
1558                rc = qdio_siga_sync_q(q);
1559        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1560                   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1561                   state == SLSB_CU_OUTPUT_PRIMED) {
1562                /* The previous buffer is not processed yet, tack on. */
1563                qperf_inc(q, fast_requeue);
1564        } else {
1565                rc = qdio_kick_outbound_q(q, 0);
1566        }
1567
1568        /* in case of SIGA errors we must process the error immediately */
1569        if (used >= q->u.out.scan_threshold || rc)
1570                qdio_tasklet_schedule(q);
1571        else
1572                /* free the SBALs in case of no further traffic */
1573                if (!timer_pending(&q->u.out.timer) &&
1574                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1575                        mod_timer(&q->u.out.timer, jiffies + HZ);
1576        return rc;
1577}
1578
1579/**
1580 * do_QDIO - process input or output buffers
1581 * @cdev: associated ccw_device for the qdio subchannel
1582 * @callflags: input or output and special flags from the program
1583 * @q_nr: queue number
1584 * @bufnr: buffer number
1585 * @count: how many buffers to process
1586 */
1587int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1588            int q_nr, unsigned int bufnr, unsigned int count)
1589{
1590        struct qdio_irq *irq_ptr;
1591
1592        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1593                return -EINVAL;
1594
1595        irq_ptr = cdev->private->qdio_data;
1596        if (!irq_ptr)
1597                return -ENODEV;
1598
1599        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1600                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1601
1602        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1603                return -EIO;
1604        if (!count)
1605                return 0;
1606        if (callflags & QDIO_FLAG_SYNC_INPUT)
1607                return handle_inbound(irq_ptr->input_qs[q_nr],
1608                                      callflags, bufnr, count);
1609        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1610                return handle_outbound(irq_ptr->output_qs[q_nr],
1611                                       callflags, bufnr, count);
1612        return -EINVAL;
1613}
1614EXPORT_SYMBOL_GPL(do_QDIO);
1615
1616/**
1617 * qdio_start_irq - process input buffers
1618 * @cdev: associated ccw_device for the qdio subchannel
1619 * @nr: input queue number
1620 *
1621 * Return codes
1622 *   0 - success
1623 *   1 - irqs not started since new data is available
1624 */
1625int qdio_start_irq(struct ccw_device *cdev, int nr)
1626{
1627        struct qdio_q *q;
1628        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1629
1630        if (!irq_ptr)
1631                return -ENODEV;
1632        q = irq_ptr->input_qs[nr];
1633
1634        clear_nonshared_ind(irq_ptr);
1635        qdio_stop_polling(q);
1636        clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1637
1638        /*
1639         * We need to check again to not lose initiative after
1640         * resetting the ACK state.
1641         */
1642        if (test_nonshared_ind(irq_ptr))
1643                goto rescan;
1644        if (!qdio_inbound_q_done(q, q->first_to_check))
1645                goto rescan;
1646        return 0;
1647
1648rescan:
1649        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1650                             &q->u.in.queue_irq_state))
1651                return 0;
1652        else
1653                return 1;
1654
1655}
1656EXPORT_SYMBOL(qdio_start_irq);
1657
1658/**
1659 * qdio_get_next_buffers - process input buffers
1660 * @cdev: associated ccw_device for the qdio subchannel
1661 * @nr: input queue number
1662 * @bufnr: first filled buffer number
1663 * @error: buffers are in error state
1664 *
1665 * Return codes
1666 *   < 0 - error
1667 *   = 0 - no new buffers found
1668 *   > 0 - number of processed buffers
1669 */
1670int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1671                          int *error)
1672{
1673        struct qdio_q *q;
1674        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1675        unsigned int start;
1676        int count;
1677
1678        if (!irq_ptr)
1679                return -ENODEV;
1680        q = irq_ptr->input_qs[nr];
1681        start = q->first_to_check;
1682
1683        /*
1684         * Cannot rely on automatic sync after interrupt since queues may
1685         * also be examined without interrupt.
1686         */
1687        if (need_siga_sync(q))
1688                qdio_sync_queues(q);
1689
1690        qdio_check_outbound_pci_queues(irq_ptr);
1691
1692        count = qdio_inbound_q_moved(q, start);
1693        if (count == 0)
1694                return 0;
1695
1696        start = add_buf(start, count);
1697        q->first_to_check = start;
1698
1699        /* Note: upper-layer MUST stop processing immediately here ... */
1700        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1701                return -EIO;
1702
1703        *bufnr = q->first_to_kick;
1704        *error = q->qdio_error;
1705
1706        /* for the next time */
1707        q->first_to_kick = add_buf(q->first_to_kick, count);
1708        q->qdio_error = 0;
1709
1710        return count;
1711}
1712EXPORT_SYMBOL(qdio_get_next_buffers);
1713
1714/**
1715 * qdio_stop_irq - disable interrupt processing for the device
1716 * @cdev: associated ccw_device for the qdio subchannel
1717 * @nr: input queue number
1718 *
1719 * Return codes
1720 *   0 - interrupts were already disabled
1721 *   1 - interrupts successfully disabled
1722 */
1723int qdio_stop_irq(struct ccw_device *cdev, int nr)
1724{
1725        struct qdio_q *q;
1726        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1727
1728        if (!irq_ptr)
1729                return -ENODEV;
1730        q = irq_ptr->input_qs[nr];
1731
1732        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1733                             &q->u.in.queue_irq_state))
1734                return 0;
1735        else
1736                return 1;
1737}
1738EXPORT_SYMBOL(qdio_stop_irq);
1739
1740/**
1741 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1742 * @schid:              Subchannel ID.
1743 * @cnc:                Boolean Change-Notification Control
1744 * @response:           Response code will be stored at this address
1745 * @cb:                 Callback function will be executed for each element
1746 *                      of the address list
1747 * @priv:               Pointer to pass to the callback function.
1748 *
1749 * Performs "Store-network-bridging-information list" operation and calls
1750 * the callback function for every entry in the list. If "change-
1751 * notification-control" is set, further changes in the address list
1752 * will be reported via the IPA command.
1753 */
1754int qdio_pnso_brinfo(struct subchannel_id schid,
1755                int cnc, u16 *response,
1756                void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1757                                void *entry),
1758                void *priv)
1759{
1760        struct chsc_pnso_area *rr;
1761        int rc;
1762        u32 prev_instance = 0;
1763        int isfirstblock = 1;
1764        int i, size, elems;
1765
1766        rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1767        if (rr == NULL)
1768                return -ENOMEM;
1769        do {
1770                /* on the first iteration, naihdr.resume_token will be zero */
1771                rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1772                if (rc != 0 && rc != -EBUSY)
1773                        goto out;
1774                if (rr->response.code != 1) {
1775                        rc = -EIO;
1776                        continue;
1777                } else
1778                        rc = 0;
1779
1780                if (cb == NULL)
1781                        continue;
1782
1783                size = rr->naihdr.naids;
1784                elems = (rr->response.length -
1785                                sizeof(struct chsc_header) -
1786                                sizeof(struct chsc_brinfo_naihdr)) /
1787                                size;
1788
1789                if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1790                        /* Inform the caller that they need to scrap */
1791                        /* the data that was already reported via cb */
1792                                rc = -EAGAIN;
1793                                break;
1794                }
1795                isfirstblock = 0;
1796                prev_instance = rr->naihdr.instance;
1797                for (i = 0; i < elems; i++)
1798                        switch (size) {
1799                        case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1800                                (*cb)(priv, l3_ipv6_addr,
1801                                                &rr->entries.l3_ipv6[i]);
1802                                break;
1803                        case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1804                                (*cb)(priv, l3_ipv4_addr,
1805                                                &rr->entries.l3_ipv4[i]);
1806                                break;
1807                        case sizeof(struct qdio_brinfo_entry_l2):
1808                                (*cb)(priv, l2_addr_lnid,
1809                                                &rr->entries.l2[i]);
1810                                break;
1811                        default:
1812                                WARN_ON_ONCE(1);
1813                                rc = -EIO;
1814                                goto out;
1815                        }
1816        } while (rr->response.code == 0x0107 ||  /* channel busy */
1817                  (rr->response.code == 1 && /* list stored */
1818                   /* resume token is non-zero => list incomplete */
1819                   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1820        (*response) = rr->response.code;
1821
1822out:
1823        free_page((unsigned long)rr);
1824        return rc;
1825}
1826EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1827
1828static int __init init_QDIO(void)
1829{
1830        int rc;
1831
1832        rc = qdio_debug_init();
1833        if (rc)
1834                return rc;
1835        rc = qdio_setup_init();
1836        if (rc)
1837                goto out_debug;
1838        rc = tiqdio_allocate_memory();
1839        if (rc)
1840                goto out_cache;
1841        rc = tiqdio_register_thinints();
1842        if (rc)
1843                goto out_ti;
1844        return 0;
1845
1846out_ti:
1847        tiqdio_free_memory();
1848out_cache:
1849        qdio_setup_exit();
1850out_debug:
1851        qdio_debug_exit();
1852        return rc;
1853}
1854
1855static void __exit exit_QDIO(void)
1856{
1857        tiqdio_unregister_thinints();
1858        tiqdio_free_memory();
1859        qdio_setup_exit();
1860        qdio_debug_exit();
1861}
1862
1863module_init(init_QDIO);
1864module_exit(exit_QDIO);
1865