linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   4 *
   5 * Copyright IBM Corp. 2000, 2008
   6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   7 *            Jan Glauber <jang@linux.vnet.ibm.com>
   8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/timer.h>
  14#include <linux/delay.h>
  15#include <linux/gfp.h>
  16#include <linux/io.h>
  17#include <linux/atomic.h>
  18#include <asm/debug.h>
  19#include <asm/qdio.h>
  20#include <asm/ipl.h>
  21
  22#include "cio.h"
  23#include "css.h"
  24#include "device.h"
  25#include "qdio.h"
  26#include "qdio_debug.h"
  27
  28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  29        "Jan Glauber <jang@linux.vnet.ibm.com>");
  30MODULE_DESCRIPTION("QDIO base support");
  31MODULE_LICENSE("GPL");
  32
  33static inline int do_siga_sync(unsigned long schid,
  34                               unsigned int out_mask, unsigned int in_mask,
  35                               unsigned int fc)
  36{
  37        register unsigned long __fc asm ("0") = fc;
  38        register unsigned long __schid asm ("1") = schid;
  39        register unsigned long out asm ("2") = out_mask;
  40        register unsigned long in asm ("3") = in_mask;
  41        int cc;
  42
  43        asm volatile(
  44                "       siga    0\n"
  45                "       ipm     %0\n"
  46                "       srl     %0,28\n"
  47                : "=d" (cc)
  48                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  49        return cc;
  50}
  51
  52static inline int do_siga_input(unsigned long schid, unsigned int mask,
  53                                unsigned int fc)
  54{
  55        register unsigned long __fc asm ("0") = fc;
  56        register unsigned long __schid asm ("1") = schid;
  57        register unsigned long __mask asm ("2") = mask;
  58        int cc;
  59
  60        asm volatile(
  61                "       siga    0\n"
  62                "       ipm     %0\n"
  63                "       srl     %0,28\n"
  64                : "=d" (cc)
  65                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  66        return cc;
  67}
  68
  69/**
  70 * do_siga_output - perform SIGA-w/wt function
  71 * @schid: subchannel id or in case of QEBSM the subchannel token
  72 * @mask: which output queues to process
  73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  74 * @fc: function code to perform
  75 * @aob: asynchronous operation block
  76 *
  77 * Returns condition code.
  78 * Note: For IQDC unicast queues only the highest priority queue is processed.
  79 */
  80static inline int do_siga_output(unsigned long schid, unsigned long mask,
  81                                 unsigned int *bb, unsigned int fc,
  82                                 unsigned long aob)
  83{
  84        register unsigned long __fc asm("0") = fc;
  85        register unsigned long __schid asm("1") = schid;
  86        register unsigned long __mask asm("2") = mask;
  87        register unsigned long __aob asm("3") = aob;
  88        int cc;
  89
  90        asm volatile(
  91                "       siga    0\n"
  92                "       ipm     %0\n"
  93                "       srl     %0,28\n"
  94                : "=d" (cc), "+d" (__fc), "+d" (__aob)
  95                : "d" (__schid), "d" (__mask)
  96                : "cc");
  97        *bb = __fc >> 31;
  98        return cc;
  99}
 100
 101/**
 102 * qdio_do_eqbs - extract buffer states for QEBSM
 103 * @q: queue to manipulate
 104 * @state: state of the extracted buffers
 105 * @start: buffer number to start at
 106 * @count: count of buffers to examine
 107 * @auto_ack: automatically acknowledge buffers
 108 *
 109 * Returns the number of successfully extracted equal buffer states.
 110 * Stops processing if a state is different from the last buffers state.
 111 */
 112static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 113                        int start, int count, int auto_ack)
 114{
 115        int tmp_count = count, tmp_start = start, nr = q->nr;
 116        unsigned int ccq = 0;
 117
 118        qperf_inc(q, eqbs);
 119
 120        if (!q->is_input_q)
 121                nr += q->irq_ptr->nr_input_qs;
 122again:
 123        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 124                      auto_ack);
 125
 126        switch (ccq) {
 127        case 0:
 128        case 32:
 129                /* all done, or next buffer state different */
 130                return count - tmp_count;
 131        case 96:
 132                /* not all buffers processed */
 133                qperf_inc(q, eqbs_partial);
 134                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
 135                        tmp_count);
 136                return count - tmp_count;
 137        case 97:
 138                /* no buffer processed */
 139                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 140                goto again;
 141        default:
 142                DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 143                DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 144                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 145                q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
 146                           q->first_to_check, count, q->irq_ptr->int_parm);
 147                return 0;
 148        }
 149}
 150
 151/**
 152 * qdio_do_sqbs - set buffer states for QEBSM
 153 * @q: queue to manipulate
 154 * @state: new state of the buffers
 155 * @start: first buffer number to change
 156 * @count: how many buffers to change
 157 *
 158 * Returns the number of successfully changed buffers.
 159 * Does retrying until the specified count of buffer states is set or an
 160 * error occurs.
 161 */
 162static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 163                        int count)
 164{
 165        unsigned int ccq = 0;
 166        int tmp_count = count, tmp_start = start;
 167        int nr = q->nr;
 168
 169        if (!count)
 170                return 0;
 171        qperf_inc(q, sqbs);
 172
 173        if (!q->is_input_q)
 174                nr += q->irq_ptr->nr_input_qs;
 175again:
 176        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 177
 178        switch (ccq) {
 179        case 0:
 180        case 32:
 181                /* all done, or active buffer adapter-owned */
 182                WARN_ON_ONCE(tmp_count);
 183                return count - tmp_count;
 184        case 96:
 185                /* not all buffers processed */
 186                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 187                qperf_inc(q, sqbs_partial);
 188                goto again;
 189        default:
 190                DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 191                DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 192                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 193                q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
 194                           q->first_to_check, count, q->irq_ptr->int_parm);
 195                return 0;
 196        }
 197}
 198
 199/*
 200 * Returns number of examined buffers and their common state in *state.
 201 * Requested number of buffers-to-examine must be > 0.
 202 */
 203static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 204                                 unsigned char *state, unsigned int count,
 205                                 int auto_ack, int merge_pending)
 206{
 207        unsigned char __state = 0;
 208        int i = 1;
 209
 210        if (is_qebsm(q))
 211                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 212
 213        /* get initial state: */
 214        __state = q->slsb.val[bufnr];
 215
 216        /* Bail out early if there is no work on the queue: */
 217        if (__state & SLSB_OWNER_CU)
 218                goto out;
 219
 220        if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 221                __state = SLSB_P_OUTPUT_EMPTY;
 222
 223        for (; i < count; i++) {
 224                bufnr = next_buf(bufnr);
 225
 226                /* merge PENDING into EMPTY: */
 227                if (merge_pending &&
 228                    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
 229                    __state == SLSB_P_OUTPUT_EMPTY)
 230                        continue;
 231
 232                /* stop if next state differs from initial state: */
 233                if (q->slsb.val[bufnr] != __state)
 234                        break;
 235        }
 236
 237out:
 238        *state = __state;
 239        return i;
 240}
 241
 242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 243                                unsigned char *state, int auto_ack)
 244{
 245        return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 246}
 247
 248/* wrap-around safe setting of slsb states, returns number of changed buffers */
 249static inline int set_buf_states(struct qdio_q *q, int bufnr,
 250                                 unsigned char state, int count)
 251{
 252        int i;
 253
 254        if (is_qebsm(q))
 255                return qdio_do_sqbs(q, state, bufnr, count);
 256
 257        /* Ensure that all preceding changes to the SBALs are visible: */
 258        mb();
 259
 260        for (i = 0; i < count; i++) {
 261                WRITE_ONCE(q->slsb.val[bufnr], state);
 262                bufnr = next_buf(bufnr);
 263        }
 264
 265        /* Make our SLSB changes visible: */
 266        mb();
 267
 268        return count;
 269}
 270
 271static inline int set_buf_state(struct qdio_q *q, int bufnr,
 272                                unsigned char state)
 273{
 274        return set_buf_states(q, bufnr, state, 1);
 275}
 276
 277/* set slsb states to initial state */
 278static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 279{
 280        struct qdio_q *q;
 281        int i;
 282
 283        for_each_input_queue(irq_ptr, q, i)
 284                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 285                               QDIO_MAX_BUFFERS_PER_Q);
 286        for_each_output_queue(irq_ptr, q, i)
 287                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 288                               QDIO_MAX_BUFFERS_PER_Q);
 289}
 290
 291static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 292                          unsigned int input)
 293{
 294        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 295        unsigned int fc = QDIO_SIGA_SYNC;
 296        int cc;
 297
 298        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 299        qperf_inc(q, siga_sync);
 300
 301        if (is_qebsm(q)) {
 302                schid = q->irq_ptr->sch_token;
 303                fc |= QDIO_SIGA_QEBSM_FLAG;
 304        }
 305
 306        cc = do_siga_sync(schid, output, input, fc);
 307        if (unlikely(cc))
 308                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 309        return (cc) ? -EIO : 0;
 310}
 311
 312static inline int qdio_siga_sync_q(struct qdio_q *q)
 313{
 314        if (q->is_input_q)
 315                return qdio_siga_sync(q, 0, q->mask);
 316        else
 317                return qdio_siga_sync(q, q->mask, 0);
 318}
 319
 320static int qdio_siga_output(struct qdio_q *q, unsigned int count,
 321                            unsigned int *busy_bit, unsigned long aob)
 322{
 323        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 324        unsigned int fc = QDIO_SIGA_WRITE;
 325        u64 start_time = 0;
 326        int retries = 0, cc;
 327
 328        if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
 329                if (count > 1)
 330                        fc = QDIO_SIGA_WRITEM;
 331                else if (aob)
 332                        fc = QDIO_SIGA_WRITEQ;
 333        }
 334
 335        if (is_qebsm(q)) {
 336                schid = q->irq_ptr->sch_token;
 337                fc |= QDIO_SIGA_QEBSM_FLAG;
 338        }
 339again:
 340        cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
 341
 342        /* hipersocket busy condition */
 343        if (unlikely(*busy_bit)) {
 344                retries++;
 345
 346                if (!start_time) {
 347                        start_time = get_tod_clock_fast();
 348                        goto again;
 349                }
 350                if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
 351                        goto again;
 352        }
 353        if (retries) {
 354                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 355                              "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 356                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 357        }
 358        return cc;
 359}
 360
 361static inline int qdio_siga_input(struct qdio_q *q)
 362{
 363        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 364        unsigned int fc = QDIO_SIGA_READ;
 365        int cc;
 366
 367        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 368        qperf_inc(q, siga_read);
 369
 370        if (is_qebsm(q)) {
 371                schid = q->irq_ptr->sch_token;
 372                fc |= QDIO_SIGA_QEBSM_FLAG;
 373        }
 374
 375        cc = do_siga_input(schid, q->mask, fc);
 376        if (unlikely(cc))
 377                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 378        return (cc) ? -EIO : 0;
 379}
 380
 381#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 382#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 383
 384static inline void qdio_sync_queues(struct qdio_q *q)
 385{
 386        /* PCI capable outbound queues will also be scanned so sync them too */
 387        if (pci_out_supported(q->irq_ptr))
 388                qdio_siga_sync_all(q);
 389        else
 390                qdio_siga_sync_q(q);
 391}
 392
 393int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 394                        unsigned char *state)
 395{
 396        if (need_siga_sync(q))
 397                qdio_siga_sync_q(q);
 398        return get_buf_state(q, bufnr, state, 0);
 399}
 400
 401static inline void qdio_stop_polling(struct qdio_q *q)
 402{
 403        if (!q->u.in.batch_count)
 404                return;
 405
 406        qperf_inc(q, stop_polling);
 407
 408        /* show the card that we are not polling anymore */
 409        set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
 410                       q->u.in.batch_count);
 411        q->u.in.batch_count = 0;
 412}
 413
 414static inline void account_sbals(struct qdio_q *q, unsigned int count)
 415{
 416        q->q_stats.nr_sbal_total += count;
 417        q->q_stats.nr_sbals[ilog2(count)]++;
 418}
 419
 420static void process_buffer_error(struct qdio_q *q, unsigned int start,
 421                                 int count)
 422{
 423        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 424
 425        /* special handling for no target buffer empty */
 426        if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
 427            q->sbal[start]->element[15].sflags == 0x10) {
 428                qperf_inc(q, target_full);
 429                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
 430                return;
 431        }
 432
 433        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 434        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 435        DBF_ERROR("FTC:%3d C:%3d", start, count);
 436        DBF_ERROR("F14:%2x F15:%2x",
 437                  q->sbal[start]->element[14].sflags,
 438                  q->sbal[start]->element[15].sflags);
 439}
 440
 441static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
 442                                       int count, bool auto_ack)
 443{
 444        /* ACK the newest SBAL: */
 445        if (!auto_ack)
 446                set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
 447
 448        if (!q->u.in.batch_count)
 449                q->u.in.batch_start = start;
 450        q->u.in.batch_count += count;
 451}
 452
 453static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
 454{
 455        unsigned char state = 0;
 456        int count;
 457
 458        q->timestamp = get_tod_clock_fast();
 459
 460        count = atomic_read(&q->nr_buf_used);
 461        if (!count)
 462                return 0;
 463
 464        /*
 465         * No siga sync here, as a PCI or we after a thin interrupt
 466         * already sync'ed the queues.
 467         */
 468        count = get_buf_states(q, start, &state, count, 1, 0);
 469        if (!count)
 470                return 0;
 471
 472        switch (state) {
 473        case SLSB_P_INPUT_PRIMED:
 474                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
 475                              count);
 476
 477                inbound_handle_work(q, start, count, is_qebsm(q));
 478                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 479                        qperf_inc(q, inbound_queue_full);
 480                if (q->irq_ptr->perf_stat_enabled)
 481                        account_sbals(q, count);
 482                return count;
 483        case SLSB_P_INPUT_ERROR:
 484                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
 485                              count);
 486
 487                process_buffer_error(q, start, count);
 488                inbound_handle_work(q, start, count, false);
 489                if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 490                        qperf_inc(q, inbound_queue_full);
 491                if (q->irq_ptr->perf_stat_enabled)
 492                        account_sbals_error(q, count);
 493                return count;
 494        case SLSB_CU_INPUT_EMPTY:
 495                if (q->irq_ptr->perf_stat_enabled)
 496                        q->q_stats.nr_sbal_nop++;
 497                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
 498                              q->nr, start);
 499                return 0;
 500        case SLSB_P_INPUT_NOT_INIT:
 501        case SLSB_P_INPUT_ACK:
 502                /* We should never see this state, throw a WARN: */
 503        default:
 504                dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
 505                              "found state %#x at index %u on queue %u\n",
 506                              state, start, q->nr);
 507                return 0;
 508        }
 509}
 510
 511static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
 512{
 513        return get_inbound_buffer_frontier(q, start);
 514}
 515
 516static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
 517{
 518        unsigned char state = 0;
 519
 520        if (!atomic_read(&q->nr_buf_used))
 521                return 1;
 522
 523        if (need_siga_sync(q))
 524                qdio_siga_sync_q(q);
 525        get_buf_state(q, start, &state, 0);
 526
 527        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 528                /* more work coming */
 529                return 0;
 530
 531        return 1;
 532}
 533
 534static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 535{
 536        unsigned char state = 0;
 537        int j, b = start;
 538
 539        for (j = 0; j < count; ++j) {
 540                get_buf_state(q, b, &state, 0);
 541                if (state == SLSB_P_OUTPUT_PENDING) {
 542                        struct qaob *aob = q->u.out.aobs[b];
 543                        if (aob == NULL)
 544                                continue;
 545
 546                        q->u.out.sbal_state[b].flags |=
 547                                QDIO_OUTBUF_STATE_FLAG_PENDING;
 548                        q->u.out.aobs[b] = NULL;
 549                }
 550                b = next_buf(b);
 551        }
 552}
 553
 554static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 555                                        int bufnr)
 556{
 557        unsigned long phys_aob = 0;
 558
 559        if (!q->aobs[bufnr]) {
 560                struct qaob *aob = qdio_allocate_aob();
 561                q->aobs[bufnr] = aob;
 562        }
 563        if (q->aobs[bufnr]) {
 564                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 565                phys_aob = virt_to_phys(q->aobs[bufnr]);
 566                WARN_ON_ONCE(phys_aob & 0xFF);
 567        }
 568
 569        q->sbal_state[bufnr].flags = 0;
 570        return phys_aob;
 571}
 572
 573static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
 574                              unsigned int count)
 575{
 576        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 577                return;
 578
 579        if (q->is_input_q) {
 580                qperf_inc(q, inbound_handler);
 581                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 582        } else {
 583                qperf_inc(q, outbound_handler);
 584                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 585                              start, count);
 586        }
 587
 588        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 589                   q->irq_ptr->int_parm);
 590
 591        /* for the next time */
 592        q->qdio_error = 0;
 593}
 594
 595static inline int qdio_tasklet_schedule(struct qdio_q *q)
 596{
 597        if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
 598                tasklet_schedule(&q->tasklet);
 599                return 0;
 600        }
 601        return -EPERM;
 602}
 603
 604static void __qdio_inbound_processing(struct qdio_q *q)
 605{
 606        unsigned int start = q->first_to_check;
 607        int count;
 608
 609        qperf_inc(q, tasklet_inbound);
 610
 611        count = qdio_inbound_q_moved(q, start);
 612        if (count == 0)
 613                return;
 614
 615        qdio_kick_handler(q, start, count);
 616        start = add_buf(start, count);
 617        q->first_to_check = start;
 618
 619        if (!qdio_inbound_q_done(q, start)) {
 620                /* means poll time is not yet over */
 621                qperf_inc(q, tasklet_inbound_resched);
 622                if (!qdio_tasklet_schedule(q))
 623                        return;
 624        }
 625
 626        qdio_stop_polling(q);
 627        /*
 628         * We need to check again to not lose initiative after
 629         * resetting the ACK state.
 630         */
 631        if (!qdio_inbound_q_done(q, start)) {
 632                qperf_inc(q, tasklet_inbound_resched2);
 633                qdio_tasklet_schedule(q);
 634        }
 635}
 636
 637void qdio_inbound_processing(unsigned long data)
 638{
 639        struct qdio_q *q = (struct qdio_q *)data;
 640        __qdio_inbound_processing(q);
 641}
 642
 643static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
 644{
 645        unsigned char state = 0;
 646        int count;
 647
 648        q->timestamp = get_tod_clock_fast();
 649
 650        if (need_siga_sync(q))
 651                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 652                    !pci_out_supported(q->irq_ptr)) ||
 653                    (queue_type(q) == QDIO_IQDIO_QFMT &&
 654                    multicast_outbound(q)))
 655                        qdio_siga_sync_q(q);
 656
 657        count = atomic_read(&q->nr_buf_used);
 658        if (!count)
 659                return 0;
 660
 661        count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
 662        if (!count)
 663                return 0;
 664
 665        switch (state) {
 666        case SLSB_P_OUTPUT_EMPTY:
 667        case SLSB_P_OUTPUT_PENDING:
 668                /* the adapter got it */
 669                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 670                        "out empty:%1d %02x", q->nr, count);
 671
 672                atomic_sub(count, &q->nr_buf_used);
 673                if (q->irq_ptr->perf_stat_enabled)
 674                        account_sbals(q, count);
 675                return count;
 676        case SLSB_P_OUTPUT_ERROR:
 677                process_buffer_error(q, start, count);
 678                atomic_sub(count, &q->nr_buf_used);
 679                if (q->irq_ptr->perf_stat_enabled)
 680                        account_sbals_error(q, count);
 681                return count;
 682        case SLSB_CU_OUTPUT_PRIMED:
 683                /* the adapter has not fetched the output yet */
 684                if (q->irq_ptr->perf_stat_enabled)
 685                        q->q_stats.nr_sbal_nop++;
 686                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 687                              q->nr);
 688                return 0;
 689        case SLSB_P_OUTPUT_HALTED:
 690                return 0;
 691        case SLSB_P_OUTPUT_NOT_INIT:
 692                /* We should never see this state, throw a WARN: */
 693        default:
 694                dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
 695                              "found state %#x at index %u on queue %u\n",
 696                              state, start, q->nr);
 697                return 0;
 698        }
 699}
 700
 701/* all buffers processed? */
 702static inline int qdio_outbound_q_done(struct qdio_q *q)
 703{
 704        return atomic_read(&q->nr_buf_used) == 0;
 705}
 706
 707static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
 708{
 709        int count;
 710
 711        count = get_outbound_buffer_frontier(q, start);
 712
 713        if (count) {
 714                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 715                if (q->u.out.use_cq)
 716                        qdio_handle_aobs(q, start, count);
 717        }
 718
 719        return count;
 720}
 721
 722static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
 723                                unsigned long aob)
 724{
 725        int retries = 0, cc;
 726        unsigned int busy_bit;
 727
 728        if (!need_siga_out(q))
 729                return 0;
 730
 731        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 732retry:
 733        qperf_inc(q, siga_write);
 734
 735        cc = qdio_siga_output(q, count, &busy_bit, aob);
 736        switch (cc) {
 737        case 0:
 738                break;
 739        case 2:
 740                if (busy_bit) {
 741                        while (++retries < QDIO_BUSY_BIT_RETRIES) {
 742                                mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 743                                goto retry;
 744                        }
 745                        DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 746                        cc = -EBUSY;
 747                } else {
 748                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 749                        cc = -ENOBUFS;
 750                }
 751                break;
 752        case 1:
 753        case 3:
 754                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 755                cc = -EIO;
 756                break;
 757        }
 758        if (retries) {
 759                DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 760                DBF_ERROR("count:%u", retries);
 761        }
 762        return cc;
 763}
 764
 765static void __qdio_outbound_processing(struct qdio_q *q)
 766{
 767        unsigned int start = q->first_to_check;
 768        int count;
 769
 770        qperf_inc(q, tasklet_outbound);
 771        WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 772
 773        count = qdio_outbound_q_moved(q, start);
 774        if (count) {
 775                q->first_to_check = add_buf(start, count);
 776                qdio_kick_handler(q, start, count);
 777        }
 778
 779        if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
 780            !qdio_outbound_q_done(q))
 781                goto sched;
 782
 783        if (q->u.out.pci_out_enabled)
 784                return;
 785
 786        /*
 787         * Now we know that queue type is either qeth without pci enabled
 788         * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 789         * is noticed and outbound_handler is called after some time.
 790         */
 791        if (qdio_outbound_q_done(q))
 792                del_timer_sync(&q->u.out.timer);
 793        else
 794                if (!timer_pending(&q->u.out.timer) &&
 795                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
 796                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 797        return;
 798
 799sched:
 800        qdio_tasklet_schedule(q);
 801}
 802
 803/* outbound tasklet */
 804void qdio_outbound_processing(unsigned long data)
 805{
 806        struct qdio_q *q = (struct qdio_q *)data;
 807        __qdio_outbound_processing(q);
 808}
 809
 810void qdio_outbound_timer(struct timer_list *t)
 811{
 812        struct qdio_q *q = from_timer(q, t, u.out.timer);
 813
 814        qdio_tasklet_schedule(q);
 815}
 816
 817static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
 818{
 819        struct qdio_q *out;
 820        int i;
 821
 822        if (!pci_out_supported(irq) || !irq->scan_threshold)
 823                return;
 824
 825        for_each_output_queue(irq, out, i)
 826                if (!qdio_outbound_q_done(out))
 827                        qdio_tasklet_schedule(out);
 828}
 829
 830void tiqdio_inbound_processing(unsigned long data)
 831{
 832        struct qdio_q *q = (struct qdio_q *)data;
 833
 834        if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 835                qdio_sync_queues(q);
 836
 837        /* The interrupt could be caused by a PCI request: */
 838        qdio_check_outbound_pci_queues(q->irq_ptr);
 839
 840        __qdio_inbound_processing(q);
 841}
 842
 843static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 844                                  enum qdio_irq_states state)
 845{
 846        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 847
 848        irq_ptr->state = state;
 849        mb();
 850}
 851
 852static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 853{
 854        if (irb->esw.esw0.erw.cons) {
 855                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 856                DBF_ERROR_HEX(irb, 64);
 857                DBF_ERROR_HEX(irb->ecw, 64);
 858        }
 859}
 860
 861/* PCI interrupt handler */
 862static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 863{
 864        int i;
 865        struct qdio_q *q;
 866
 867        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 868                return;
 869
 870        if (irq_ptr->irq_poll) {
 871                if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
 872                        irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
 873                else
 874                        QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
 875        } else {
 876                for_each_input_queue(irq_ptr, q, i)
 877                        tasklet_schedule(&q->tasklet);
 878        }
 879
 880        if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
 881                return;
 882
 883        for_each_output_queue(irq_ptr, q, i) {
 884                if (qdio_outbound_q_done(q))
 885                        continue;
 886                if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
 887                        qdio_siga_sync_q(q);
 888                qdio_tasklet_schedule(q);
 889        }
 890}
 891
 892static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
 893                                       unsigned long intparm, int cstat,
 894                                       int dstat)
 895{
 896        struct qdio_q *q;
 897
 898        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
 899        DBF_ERROR("intp :%lx", intparm);
 900        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 901
 902        if (irq_ptr->nr_input_qs) {
 903                q = irq_ptr->input_qs[0];
 904        } else if (irq_ptr->nr_output_qs) {
 905                q = irq_ptr->output_qs[0];
 906        } else {
 907                dump_stack();
 908                goto no_handler;
 909        }
 910
 911        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
 912                   q->nr, q->first_to_check, 0, irq_ptr->int_parm);
 913no_handler:
 914        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 915        /*
 916         * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
 917         * Therefore we call the LGR detection function here.
 918         */
 919        lgr_info_log();
 920}
 921
 922static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
 923                                      int dstat)
 924{
 925        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 926
 927        if (cstat)
 928                goto error;
 929        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
 930                goto error;
 931        if (!(dstat & DEV_STAT_DEV_END))
 932                goto error;
 933        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
 934        return;
 935
 936error:
 937        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
 938        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 939        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 940}
 941
 942/* qdio interrupt handler */
 943void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 944                      struct irb *irb)
 945{
 946        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 947        struct subchannel_id schid;
 948        int cstat, dstat;
 949
 950        if (!intparm || !irq_ptr) {
 951                ccw_device_get_schid(cdev, &schid);
 952                DBF_ERROR("qint:%4x", schid.sch_no);
 953                return;
 954        }
 955
 956        if (irq_ptr->perf_stat_enabled)
 957                irq_ptr->perf_stat.qdio_int++;
 958
 959        if (IS_ERR(irb)) {
 960                DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
 961                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 962                wake_up(&cdev->private->wait_q);
 963                return;
 964        }
 965        qdio_irq_check_sense(irq_ptr, irb);
 966        cstat = irb->scsw.cmd.cstat;
 967        dstat = irb->scsw.cmd.dstat;
 968
 969        switch (irq_ptr->state) {
 970        case QDIO_IRQ_STATE_INACTIVE:
 971                qdio_establish_handle_irq(irq_ptr, cstat, dstat);
 972                break;
 973        case QDIO_IRQ_STATE_CLEANUP:
 974                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 975                break;
 976        case QDIO_IRQ_STATE_ESTABLISHED:
 977        case QDIO_IRQ_STATE_ACTIVE:
 978                if (cstat & SCHN_STAT_PCI) {
 979                        qdio_int_handler_pci(irq_ptr);
 980                        return;
 981                }
 982                if (cstat || dstat)
 983                        qdio_handle_activate_check(irq_ptr, intparm, cstat,
 984                                                   dstat);
 985                break;
 986        case QDIO_IRQ_STATE_STOPPED:
 987                break;
 988        default:
 989                WARN_ON_ONCE(1);
 990        }
 991        wake_up(&cdev->private->wait_q);
 992}
 993
 994/**
 995 * qdio_get_ssqd_desc - get qdio subchannel description
 996 * @cdev: ccw device to get description for
 997 * @data: where to store the ssqd
 998 *
 999 * Returns 0 or an error code. The results of the chsc are stored in the
1000 * specified structure.
1001 */
1002int qdio_get_ssqd_desc(struct ccw_device *cdev,
1003                       struct qdio_ssqd_desc *data)
1004{
1005        struct subchannel_id schid;
1006
1007        if (!cdev || !cdev->private)
1008                return -EINVAL;
1009
1010        ccw_device_get_schid(cdev, &schid);
1011        DBF_EVENT("get ssqd:%4x", schid.sch_no);
1012        return qdio_setup_get_ssqd(NULL, &schid, data);
1013}
1014EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1015
1016static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
1017{
1018        struct qdio_q *q;
1019        int i;
1020
1021        for_each_input_queue(irq_ptr, q, i)
1022                tasklet_kill(&q->tasklet);
1023
1024        for_each_output_queue(irq_ptr, q, i) {
1025                del_timer_sync(&q->u.out.timer);
1026                tasklet_kill(&q->tasklet);
1027        }
1028}
1029
1030/**
1031 * qdio_shutdown - shut down a qdio subchannel
1032 * @cdev: associated ccw device
1033 * @how: use halt or clear to shutdown
1034 */
1035int qdio_shutdown(struct ccw_device *cdev, int how)
1036{
1037        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1038        struct subchannel_id schid;
1039        int rc;
1040
1041        if (!irq_ptr)
1042                return -ENODEV;
1043
1044        WARN_ON_ONCE(irqs_disabled());
1045        ccw_device_get_schid(cdev, &schid);
1046        DBF_EVENT("qshutdown:%4x", schid.sch_no);
1047
1048        mutex_lock(&irq_ptr->setup_mutex);
1049        /*
1050         * Subchannel was already shot down. We cannot prevent being called
1051         * twice since cio may trigger a shutdown asynchronously.
1052         */
1053        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1054                mutex_unlock(&irq_ptr->setup_mutex);
1055                return 0;
1056        }
1057
1058        /*
1059         * Indicate that the device is going down. Scheduling the queue
1060         * tasklets is forbidden from here on.
1061         */
1062        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1063
1064        tiqdio_remove_device(irq_ptr);
1065        qdio_shutdown_queues(irq_ptr);
1066        qdio_shutdown_debug_entries(irq_ptr);
1067
1068        /* cleanup subchannel */
1069        spin_lock_irq(get_ccwdev_lock(cdev));
1070        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1071        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1072                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1073        else
1074                /* default behaviour is halt */
1075                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1076        spin_unlock_irq(get_ccwdev_lock(cdev));
1077        if (rc) {
1078                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1079                DBF_ERROR("rc:%4d", rc);
1080                goto no_cleanup;
1081        }
1082
1083        wait_event_interruptible_timeout(cdev->private->wait_q,
1084                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1085                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1086                10 * HZ);
1087
1088no_cleanup:
1089        qdio_shutdown_thinint(irq_ptr);
1090        qdio_shutdown_irq(irq_ptr);
1091
1092        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1093        mutex_unlock(&irq_ptr->setup_mutex);
1094        if (rc)
1095                return rc;
1096        return 0;
1097}
1098EXPORT_SYMBOL_GPL(qdio_shutdown);
1099
1100/**
1101 * qdio_free - free data structures for a qdio subchannel
1102 * @cdev: associated ccw device
1103 */
1104int qdio_free(struct ccw_device *cdev)
1105{
1106        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1107        struct subchannel_id schid;
1108
1109        if (!irq_ptr)
1110                return -ENODEV;
1111
1112        ccw_device_get_schid(cdev, &schid);
1113        DBF_EVENT("qfree:%4x", schid.sch_no);
1114        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1115        mutex_lock(&irq_ptr->setup_mutex);
1116
1117        irq_ptr->debug_area = NULL;
1118        cdev->private->qdio_data = NULL;
1119        mutex_unlock(&irq_ptr->setup_mutex);
1120
1121        qdio_free_async_data(irq_ptr);
1122        qdio_free_queues(irq_ptr);
1123        free_page((unsigned long) irq_ptr->qdr);
1124        free_page(irq_ptr->chsc_page);
1125        free_page((unsigned long) irq_ptr);
1126        return 0;
1127}
1128EXPORT_SYMBOL_GPL(qdio_free);
1129
1130/**
1131 * qdio_allocate - allocate qdio queues and associated data
1132 * @cdev: associated ccw device
1133 * @no_input_qs: allocate this number of Input Queues
1134 * @no_output_qs: allocate this number of Output Queues
1135 */
1136int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
1137                  unsigned int no_output_qs)
1138{
1139        struct subchannel_id schid;
1140        struct qdio_irq *irq_ptr;
1141        int rc = -ENOMEM;
1142
1143        ccw_device_get_schid(cdev, &schid);
1144        DBF_EVENT("qallocate:%4x", schid.sch_no);
1145
1146        if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1147            no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
1148                return -EINVAL;
1149
1150        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1151        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1152        if (!irq_ptr)
1153                return -ENOMEM;
1154
1155        irq_ptr->cdev = cdev;
1156        mutex_init(&irq_ptr->setup_mutex);
1157        if (qdio_allocate_dbf(irq_ptr))
1158                goto err_dbf;
1159
1160        DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1161                      no_output_qs);
1162
1163        /*
1164         * Allocate a page for the chsc calls in qdio_establish.
1165         * Must be pre-allocated since a zfcp recovery will call
1166         * qdio_establish. In case of low memory and swap on a zfcp disk
1167         * we may not be able to allocate memory otherwise.
1168         */
1169        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1170        if (!irq_ptr->chsc_page)
1171                goto err_chsc;
1172
1173        /* qdr is used in ccw1.cda which is u32 */
1174        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1175        if (!irq_ptr->qdr)
1176                goto err_qdr;
1177
1178        rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1179        if (rc)
1180                goto err_queues;
1181
1182        INIT_LIST_HEAD(&irq_ptr->entry);
1183        cdev->private->qdio_data = irq_ptr;
1184        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1185        return 0;
1186
1187err_queues:
1188        free_page((unsigned long) irq_ptr->qdr);
1189err_qdr:
1190        free_page(irq_ptr->chsc_page);
1191err_chsc:
1192err_dbf:
1193        free_page((unsigned long) irq_ptr);
1194        return rc;
1195}
1196EXPORT_SYMBOL_GPL(qdio_allocate);
1197
1198static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1199{
1200        struct qdio_q *q = irq_ptr->input_qs[0];
1201        int i, use_cq = 0;
1202
1203        if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1204                use_cq = 1;
1205
1206        for_each_output_queue(irq_ptr, q, i) {
1207                if (use_cq) {
1208                        if (multicast_outbound(q))
1209                                continue;
1210                        if (qdio_enable_async_operation(&q->u.out) < 0) {
1211                                use_cq = 0;
1212                                continue;
1213                        }
1214                } else
1215                        qdio_disable_async_operation(&q->u.out);
1216        }
1217        DBF_EVENT("use_cq:%d", use_cq);
1218}
1219
1220static void qdio_trace_init_data(struct qdio_irq *irq,
1221                                 struct qdio_initialize *data)
1222{
1223        DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1224        DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR);
1225        DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1226        DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1227        DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1228        DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1229        DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1230                      data->no_output_qs);
1231        DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1232        DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1233        DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1234        DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1235        DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1236                    DBF_ERR);
1237}
1238
1239/**
1240 * qdio_establish - establish queues on a qdio subchannel
1241 * @cdev: associated ccw device
1242 * @init_data: initialization data
1243 */
1244int qdio_establish(struct ccw_device *cdev,
1245                   struct qdio_initialize *init_data)
1246{
1247        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1248        struct subchannel_id schid;
1249        int rc;
1250
1251        ccw_device_get_schid(cdev, &schid);
1252        DBF_EVENT("qestablish:%4x", schid.sch_no);
1253
1254        if (!irq_ptr)
1255                return -ENODEV;
1256
1257        if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1258            init_data->no_output_qs > irq_ptr->max_output_qs)
1259                return -EINVAL;
1260
1261        if ((init_data->no_input_qs && !init_data->input_handler) ||
1262            (init_data->no_output_qs && !init_data->output_handler))
1263                return -EINVAL;
1264
1265        if (!init_data->input_sbal_addr_array ||
1266            !init_data->output_sbal_addr_array)
1267                return -EINVAL;
1268
1269        mutex_lock(&irq_ptr->setup_mutex);
1270        qdio_trace_init_data(irq_ptr, init_data);
1271        qdio_setup_irq(irq_ptr, init_data);
1272
1273        rc = qdio_establish_thinint(irq_ptr);
1274        if (rc) {
1275                qdio_shutdown_irq(irq_ptr);
1276                mutex_unlock(&irq_ptr->setup_mutex);
1277                return rc;
1278        }
1279
1280        /* establish q */
1281        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1282        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1283        irq_ptr->ccw.count = irq_ptr->equeue.count;
1284        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1285
1286        spin_lock_irq(get_ccwdev_lock(cdev));
1287        ccw_device_set_options_mask(cdev, 0);
1288
1289        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1290        spin_unlock_irq(get_ccwdev_lock(cdev));
1291        if (rc) {
1292                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1293                DBF_ERROR("rc:%4x", rc);
1294                qdio_shutdown_thinint(irq_ptr);
1295                qdio_shutdown_irq(irq_ptr);
1296                mutex_unlock(&irq_ptr->setup_mutex);
1297                return rc;
1298        }
1299
1300        wait_event_interruptible_timeout(cdev->private->wait_q,
1301                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1302                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1303
1304        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1305                mutex_unlock(&irq_ptr->setup_mutex);
1306                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1307                return -EIO;
1308        }
1309
1310        qdio_setup_ssqd_info(irq_ptr);
1311
1312        qdio_detect_hsicq(irq_ptr);
1313
1314        /* qebsm is now setup if available, initialize buffer states */
1315        qdio_init_buf_states(irq_ptr);
1316
1317        mutex_unlock(&irq_ptr->setup_mutex);
1318        qdio_print_subchannel_info(irq_ptr);
1319        qdio_setup_debug_entries(irq_ptr);
1320        return 0;
1321}
1322EXPORT_SYMBOL_GPL(qdio_establish);
1323
1324/**
1325 * qdio_activate - activate queues on a qdio subchannel
1326 * @cdev: associated cdev
1327 */
1328int qdio_activate(struct ccw_device *cdev)
1329{
1330        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1331        struct subchannel_id schid;
1332        int rc;
1333
1334        ccw_device_get_schid(cdev, &schid);
1335        DBF_EVENT("qactivate:%4x", schid.sch_no);
1336
1337        if (!irq_ptr)
1338                return -ENODEV;
1339
1340        mutex_lock(&irq_ptr->setup_mutex);
1341        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1342                rc = -EBUSY;
1343                goto out;
1344        }
1345
1346        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1347        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1348        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1349        irq_ptr->ccw.cda = 0;
1350
1351        spin_lock_irq(get_ccwdev_lock(cdev));
1352        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1353
1354        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1355                              0, DOIO_DENY_PREFETCH);
1356        spin_unlock_irq(get_ccwdev_lock(cdev));
1357        if (rc) {
1358                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1359                DBF_ERROR("rc:%4x", rc);
1360                goto out;
1361        }
1362
1363        if (is_thinint_irq(irq_ptr))
1364                tiqdio_add_device(irq_ptr);
1365
1366        /* wait for subchannel to become active */
1367        msleep(5);
1368
1369        switch (irq_ptr->state) {
1370        case QDIO_IRQ_STATE_STOPPED:
1371        case QDIO_IRQ_STATE_ERR:
1372                rc = -EIO;
1373                break;
1374        default:
1375                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1376                rc = 0;
1377        }
1378out:
1379        mutex_unlock(&irq_ptr->setup_mutex);
1380        return rc;
1381}
1382EXPORT_SYMBOL_GPL(qdio_activate);
1383
1384/**
1385 * handle_inbound - reset processed input buffers
1386 * @q: queue containing the buffers
1387 * @callflags: flags
1388 * @bufnr: first buffer to process
1389 * @count: how many buffers are emptied
1390 */
1391static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1392                          int bufnr, int count)
1393{
1394        int overlap;
1395
1396        qperf_inc(q, inbound_call);
1397
1398        /* If any processed SBALs are returned to HW, adjust our tracking: */
1399        overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1400                             q->u.in.batch_count);
1401        if (overlap > 0) {
1402                q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1403                q->u.in.batch_count -= overlap;
1404        }
1405
1406        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1407        atomic_add(count, &q->nr_buf_used);
1408
1409        if (need_siga_in(q))
1410                return qdio_siga_input(q);
1411
1412        return 0;
1413}
1414
1415/**
1416 * handle_outbound - process filled outbound buffers
1417 * @q: queue containing the buffers
1418 * @callflags: flags
1419 * @bufnr: first buffer to process
1420 * @count: how many buffers are filled
1421 */
1422static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1423                           unsigned int bufnr, unsigned int count)
1424{
1425        const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
1426        unsigned char state = 0;
1427        int used, rc = 0;
1428
1429        qperf_inc(q, outbound_call);
1430
1431        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1432        used = atomic_add_return(count, &q->nr_buf_used);
1433
1434        if (used == QDIO_MAX_BUFFERS_PER_Q)
1435                qperf_inc(q, outbound_queue_full);
1436
1437        if (callflags & QDIO_FLAG_PCI_OUT) {
1438                q->u.out.pci_out_enabled = 1;
1439                qperf_inc(q, pci_request_int);
1440        } else
1441                q->u.out.pci_out_enabled = 0;
1442
1443        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1444                unsigned long phys_aob = 0;
1445
1446                if (q->u.out.use_cq && count == 1)
1447                        phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1448
1449                rc = qdio_kick_outbound_q(q, count, phys_aob);
1450        } else if (need_siga_sync(q)) {
1451                rc = qdio_siga_sync_q(q);
1452        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1453                   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1454                   state == SLSB_CU_OUTPUT_PRIMED) {
1455                /* The previous buffer is not processed yet, tack on. */
1456                qperf_inc(q, fast_requeue);
1457        } else {
1458                rc = qdio_kick_outbound_q(q, count, 0);
1459        }
1460
1461        /* Let drivers implement their own completion scanning: */
1462        if (!scan_threshold)
1463                return rc;
1464
1465        /* in case of SIGA errors we must process the error immediately */
1466        if (used >= scan_threshold || rc)
1467                qdio_tasklet_schedule(q);
1468        else
1469                /* free the SBALs in case of no further traffic */
1470                if (!timer_pending(&q->u.out.timer) &&
1471                    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1472                        mod_timer(&q->u.out.timer, jiffies + HZ);
1473        return rc;
1474}
1475
1476/**
1477 * do_QDIO - process input or output buffers
1478 * @cdev: associated ccw_device for the qdio subchannel
1479 * @callflags: input or output and special flags from the program
1480 * @q_nr: queue number
1481 * @bufnr: buffer number
1482 * @count: how many buffers to process
1483 */
1484int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1485            int q_nr, unsigned int bufnr, unsigned int count)
1486{
1487        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1488
1489        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1490                return -EINVAL;
1491
1492        if (!irq_ptr)
1493                return -ENODEV;
1494
1495        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1496                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1497
1498        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1499                return -EIO;
1500        if (!count)
1501                return 0;
1502        if (callflags & QDIO_FLAG_SYNC_INPUT)
1503                return handle_inbound(irq_ptr->input_qs[q_nr],
1504                                      callflags, bufnr, count);
1505        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1506                return handle_outbound(irq_ptr->output_qs[q_nr],
1507                                       callflags, bufnr, count);
1508        return -EINVAL;
1509}
1510EXPORT_SYMBOL_GPL(do_QDIO);
1511
1512/**
1513 * qdio_start_irq - enable interrupt processing for the device
1514 * @cdev: associated ccw_device for the qdio subchannel
1515 *
1516 * Return codes
1517 *   0 - success
1518 *   1 - irqs not started since new data is available
1519 */
1520int qdio_start_irq(struct ccw_device *cdev)
1521{
1522        struct qdio_q *q;
1523        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1524        unsigned int i;
1525
1526        if (!irq_ptr)
1527                return -ENODEV;
1528
1529        for_each_input_queue(irq_ptr, q, i)
1530                qdio_stop_polling(q);
1531
1532        clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1533
1534        /*
1535         * We need to check again to not lose initiative after
1536         * resetting the ACK state.
1537         */
1538        if (test_nonshared_ind(irq_ptr))
1539                goto rescan;
1540
1541        for_each_input_queue(irq_ptr, q, i) {
1542                if (!qdio_inbound_q_done(q, q->first_to_check))
1543                        goto rescan;
1544        }
1545
1546        return 0;
1547
1548rescan:
1549        if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1550                return 0;
1551        else
1552                return 1;
1553
1554}
1555EXPORT_SYMBOL(qdio_start_irq);
1556
1557static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1558                                unsigned int *error)
1559{
1560        unsigned int start = q->first_to_check;
1561        int count;
1562
1563        count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
1564                                qdio_outbound_q_moved(q, start);
1565        if (count == 0)
1566                return 0;
1567
1568        *bufnr = start;
1569        *error = q->qdio_error;
1570
1571        /* for the next time */
1572        q->first_to_check = add_buf(start, count);
1573        q->qdio_error = 0;
1574
1575        return count;
1576}
1577
1578int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1579                       unsigned int *bufnr, unsigned int *error)
1580{
1581        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1582        struct qdio_q *q;
1583
1584        if (!irq_ptr)
1585                return -ENODEV;
1586        q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1587
1588        if (need_siga_sync(q))
1589                qdio_siga_sync_q(q);
1590
1591        return __qdio_inspect_queue(q, bufnr, error);
1592}
1593EXPORT_SYMBOL_GPL(qdio_inspect_queue);
1594
1595/**
1596 * qdio_get_next_buffers - process input buffers
1597 * @cdev: associated ccw_device for the qdio subchannel
1598 * @nr: input queue number
1599 * @bufnr: first filled buffer number
1600 * @error: buffers are in error state
1601 *
1602 * Return codes
1603 *   < 0 - error
1604 *   = 0 - no new buffers found
1605 *   > 0 - number of processed buffers
1606 */
1607int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1608                          int *error)
1609{
1610        struct qdio_q *q;
1611        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1612
1613        if (!irq_ptr)
1614                return -ENODEV;
1615        q = irq_ptr->input_qs[nr];
1616
1617        /*
1618         * Cannot rely on automatic sync after interrupt since queues may
1619         * also be examined without interrupt.
1620         */
1621        if (need_siga_sync(q))
1622                qdio_sync_queues(q);
1623
1624        qdio_check_outbound_pci_queues(irq_ptr);
1625
1626        /* Note: upper-layer MUST stop processing immediately here ... */
1627        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1628                return -EIO;
1629
1630        return __qdio_inspect_queue(q, bufnr, error);
1631}
1632EXPORT_SYMBOL(qdio_get_next_buffers);
1633
1634/**
1635 * qdio_stop_irq - disable interrupt processing for the device
1636 * @cdev: associated ccw_device for the qdio subchannel
1637 *
1638 * Return codes
1639 *   0 - interrupts were already disabled
1640 *   1 - interrupts successfully disabled
1641 */
1642int qdio_stop_irq(struct ccw_device *cdev)
1643{
1644        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1645
1646        if (!irq_ptr)
1647                return -ENODEV;
1648
1649        if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1650                return 0;
1651        else
1652                return 1;
1653}
1654EXPORT_SYMBOL(qdio_stop_irq);
1655
1656static int __init init_QDIO(void)
1657{
1658        int rc;
1659
1660        rc = qdio_debug_init();
1661        if (rc)
1662                return rc;
1663        rc = qdio_setup_init();
1664        if (rc)
1665                goto out_debug;
1666        rc = qdio_thinint_init();
1667        if (rc)
1668                goto out_cache;
1669        return 0;
1670
1671out_cache:
1672        qdio_setup_exit();
1673out_debug:
1674        qdio_debug_exit();
1675        return rc;
1676}
1677
1678static void __exit exit_QDIO(void)
1679{
1680        qdio_thinint_exit();
1681        qdio_setup_exit();
1682        qdio_debug_exit();
1683}
1684
1685module_init(init_QDIO);
1686module_exit(exit_QDIO);
1687