linux/drivers/s390/cio/qdio_main.c
<<
>>
Prefs
   1/*
   2 * linux/drivers/s390/cio/qdio_main.c
   3 *
   4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   5 *
   6 * Copyright 2000,2008 IBM Corp.
   7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   8 *            Jan Glauber <jang@linux.vnet.ibm.com>
   9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
  10 */
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/timer.h>
  15#include <linux/delay.h>
  16#include <asm/atomic.h>
  17#include <asm/debug.h>
  18#include <asm/qdio.h>
  19
  20#include "cio.h"
  21#include "css.h"
  22#include "device.h"
  23#include "qdio.h"
  24#include "qdio_debug.h"
  25#include "qdio_perf.h"
  26
  27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  28        "Jan Glauber <jang@linux.vnet.ibm.com>");
  29MODULE_DESCRIPTION("QDIO base support");
  30MODULE_LICENSE("GPL");
  31
  32static inline int do_siga_sync(struct subchannel_id schid,
  33                               unsigned int out_mask, unsigned int in_mask)
  34{
  35        register unsigned long __fc asm ("0") = 2;
  36        register struct subchannel_id __schid asm ("1") = schid;
  37        register unsigned long out asm ("2") = out_mask;
  38        register unsigned long in asm ("3") = in_mask;
  39        int cc;
  40
  41        asm volatile(
  42                "       siga    0\n"
  43                "       ipm     %0\n"
  44                "       srl     %0,28\n"
  45                : "=d" (cc)
  46                : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  47        return cc;
  48}
  49
  50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
  51{
  52        register unsigned long __fc asm ("0") = 1;
  53        register struct subchannel_id __schid asm ("1") = schid;
  54        register unsigned long __mask asm ("2") = mask;
  55        int cc;
  56
  57        asm volatile(
  58                "       siga    0\n"
  59                "       ipm     %0\n"
  60                "       srl     %0,28\n"
  61                : "=d" (cc)
  62                : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
  63        return cc;
  64}
  65
  66/**
  67 * do_siga_output - perform SIGA-w/wt function
  68 * @schid: subchannel id or in case of QEBSM the subchannel token
  69 * @mask: which output queues to process
  70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  71 * @fc: function code to perform
  72 *
  73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
  74 * Note: For IQDC unicast queues only the highest priority queue is processed.
  75 */
  76static inline int do_siga_output(unsigned long schid, unsigned long mask,
  77                                 unsigned int *bb, unsigned int fc)
  78{
  79        register unsigned long __fc asm("0") = fc;
  80        register unsigned long __schid asm("1") = schid;
  81        register unsigned long __mask asm("2") = mask;
  82        int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
  83
  84        asm volatile(
  85                "       siga    0\n"
  86                "0:     ipm     %0\n"
  87                "       srl     %0,28\n"
  88                "1:\n"
  89                EX_TABLE(0b, 1b)
  90                : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
  91                : : "cc", "memory");
  92        *bb = ((unsigned int) __fc) >> 31;
  93        return cc;
  94}
  95
  96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
  97{
  98        /* all done or next buffer state different */
  99        if (ccq == 0 || ccq == 32)
 100                return 0;
 101        /* not all buffers processed */
 102        if (ccq == 96 || ccq == 97)
 103                return 1;
 104        /* notify devices immediately */
 105        DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 106        return -EIO;
 107}
 108
 109/**
 110 * qdio_do_eqbs - extract buffer states for QEBSM
 111 * @q: queue to manipulate
 112 * @state: state of the extracted buffers
 113 * @start: buffer number to start at
 114 * @count: count of buffers to examine
 115 * @auto_ack: automatically acknowledge buffers
 116 *
 117 * Returns the number of successfully extracted equal buffer states.
 118 * Stops processing if a state is different from the last buffers state.
 119 */
 120static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 121                        int start, int count, int auto_ack)
 122{
 123        unsigned int ccq = 0;
 124        int tmp_count = count, tmp_start = start;
 125        int nr = q->nr;
 126        int rc;
 127
 128        BUG_ON(!q->irq_ptr->sch_token);
 129        qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
 130
 131        if (!q->is_input_q)
 132                nr += q->irq_ptr->nr_input_qs;
 133again:
 134        ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 135                      auto_ack);
 136        rc = qdio_check_ccq(q, ccq);
 137
 138        /* At least one buffer was processed, return and extract the remaining
 139         * buffers later.
 140         */
 141        if ((ccq == 96) && (count != tmp_count)) {
 142                qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
 143                return (count - tmp_count);
 144        }
 145
 146        if (rc == 1) {
 147                DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 148                goto again;
 149        }
 150
 151        if (rc < 0) {
 152                DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 153                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 154                q->handler(q->irq_ptr->cdev,
 155                           QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 156                           0, -1, -1, q->irq_ptr->int_parm);
 157                return 0;
 158        }
 159        return count - tmp_count;
 160}
 161
 162/**
 163 * qdio_do_sqbs - set buffer states for QEBSM
 164 * @q: queue to manipulate
 165 * @state: new state of the buffers
 166 * @start: first buffer number to change
 167 * @count: how many buffers to change
 168 *
 169 * Returns the number of successfully changed buffers.
 170 * Does retrying until the specified count of buffer states is set or an
 171 * error occurs.
 172 */
 173static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 174                        int count)
 175{
 176        unsigned int ccq = 0;
 177        int tmp_count = count, tmp_start = start;
 178        int nr = q->nr;
 179        int rc;
 180
 181        if (!count)
 182                return 0;
 183
 184        BUG_ON(!q->irq_ptr->sch_token);
 185        qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
 186
 187        if (!q->is_input_q)
 188                nr += q->irq_ptr->nr_input_qs;
 189again:
 190        ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 191        rc = qdio_check_ccq(q, ccq);
 192        if (rc == 1) {
 193                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 194                qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
 195                goto again;
 196        }
 197        if (rc < 0) {
 198                DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 199                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 200                q->handler(q->irq_ptr->cdev,
 201                           QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 202                           0, -1, -1, q->irq_ptr->int_parm);
 203                return 0;
 204        }
 205        WARN_ON(tmp_count);
 206        return count - tmp_count;
 207}
 208
 209/* returns number of examined buffers and their common state in *state */
 210static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 211                                 unsigned char *state, unsigned int count,
 212                                 int auto_ack)
 213{
 214        unsigned char __state = 0;
 215        int i;
 216
 217        BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
 218        BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
 219
 220        if (is_qebsm(q))
 221                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 222
 223        for (i = 0; i < count; i++) {
 224                if (!__state)
 225                        __state = q->slsb.val[bufnr];
 226                else if (q->slsb.val[bufnr] != __state)
 227                        break;
 228                bufnr = next_buf(bufnr);
 229        }
 230        *state = __state;
 231        return i;
 232}
 233
 234static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 235                                unsigned char *state, int auto_ack)
 236{
 237        return get_buf_states(q, bufnr, state, 1, auto_ack);
 238}
 239
 240/* wrap-around safe setting of slsb states, returns number of changed buffers */
 241static inline int set_buf_states(struct qdio_q *q, int bufnr,
 242                                 unsigned char state, int count)
 243{
 244        int i;
 245
 246        BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
 247        BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
 248
 249        if (is_qebsm(q))
 250                return qdio_do_sqbs(q, state, bufnr, count);
 251
 252        for (i = 0; i < count; i++) {
 253                xchg(&q->slsb.val[bufnr], state);
 254                bufnr = next_buf(bufnr);
 255        }
 256        return count;
 257}
 258
 259static inline int set_buf_state(struct qdio_q *q, int bufnr,
 260                                unsigned char state)
 261{
 262        return set_buf_states(q, bufnr, state, 1);
 263}
 264
 265/* set slsb states to initial state */
 266void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 267{
 268        struct qdio_q *q;
 269        int i;
 270
 271        for_each_input_queue(irq_ptr, q, i)
 272                set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 273                               QDIO_MAX_BUFFERS_PER_Q);
 274        for_each_output_queue(irq_ptr, q, i)
 275                set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 276                               QDIO_MAX_BUFFERS_PER_Q);
 277}
 278
 279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 280                          unsigned int input)
 281{
 282        int cc;
 283
 284        if (!need_siga_sync(q))
 285                return 0;
 286
 287        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 288        qdio_perf_stat_inc(&perf_stats.siga_sync);
 289
 290        cc = do_siga_sync(q->irq_ptr->schid, output, input);
 291        if (cc)
 292                DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 293        return cc;
 294}
 295
 296static inline int qdio_siga_sync_q(struct qdio_q *q)
 297{
 298        if (q->is_input_q)
 299                return qdio_siga_sync(q, 0, q->mask);
 300        else
 301                return qdio_siga_sync(q, q->mask, 0);
 302}
 303
 304static inline int qdio_siga_sync_out(struct qdio_q *q)
 305{
 306        return qdio_siga_sync(q, ~0U, 0);
 307}
 308
 309static inline int qdio_siga_sync_all(struct qdio_q *q)
 310{
 311        return qdio_siga_sync(q, ~0U, ~0U);
 312}
 313
 314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
 315{
 316        unsigned long schid;
 317        unsigned int fc = 0;
 318        u64 start_time = 0;
 319        int cc;
 320
 321        if (q->u.out.use_enh_siga)
 322                fc = 3;
 323
 324        if (is_qebsm(q)) {
 325                schid = q->irq_ptr->sch_token;
 326                fc |= 0x80;
 327        }
 328        else
 329                schid = *((u32 *)&q->irq_ptr->schid);
 330
 331again:
 332        cc = do_siga_output(schid, q->mask, busy_bit, fc);
 333
 334        /* hipersocket busy condition */
 335        if (*busy_bit) {
 336                WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
 337
 338                if (!start_time) {
 339                        start_time = get_usecs();
 340                        goto again;
 341                }
 342                if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
 343                        goto again;
 344        }
 345        return cc;
 346}
 347
 348static inline int qdio_siga_input(struct qdio_q *q)
 349{
 350        int cc;
 351
 352        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 353        qdio_perf_stat_inc(&perf_stats.siga_in);
 354
 355        cc = do_siga_input(q->irq_ptr->schid, q->mask);
 356        if (cc)
 357                DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 358        return cc;
 359}
 360
 361static inline void qdio_sync_after_thinint(struct qdio_q *q)
 362{
 363        if (pci_out_supported(q)) {
 364                if (need_siga_sync_thinint(q))
 365                        qdio_siga_sync_all(q);
 366                else if (need_siga_sync_out_thinint(q))
 367                        qdio_siga_sync_out(q);
 368        } else
 369                qdio_siga_sync_q(q);
 370}
 371
 372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 373                        unsigned char *state)
 374{
 375        qdio_siga_sync_q(q);
 376        return get_buf_states(q, bufnr, state, 1, 0);
 377}
 378
 379static inline void qdio_stop_polling(struct qdio_q *q)
 380{
 381        if (!q->u.in.polling)
 382                return;
 383
 384        q->u.in.polling = 0;
 385        qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
 386
 387        /* show the card that we are not polling anymore */
 388        if (is_qebsm(q)) {
 389                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 390                               q->u.in.ack_count);
 391                q->u.in.ack_count = 0;
 392        } else
 393                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 394}
 395
 396static void announce_buffer_error(struct qdio_q *q, int count)
 397{
 398        q->qdio_error |= QDIO_ERROR_SLSB_STATE;
 399
 400        /* special handling for no target buffer empty */
 401        if ((!q->is_input_q &&
 402            (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
 403                qdio_perf_stat_inc(&perf_stats.outbound_target_full);
 404                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 405                              q->first_to_check);
 406                return;
 407        }
 408
 409        DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 410        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 411        DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 412        DBF_ERROR("F14:%2x F15:%2x",
 413                  q->sbal[q->first_to_check]->element[14].flags & 0xff,
 414                  q->sbal[q->first_to_check]->element[15].flags & 0xff);
 415}
 416
 417static inline void inbound_primed(struct qdio_q *q, int count)
 418{
 419        int new;
 420
 421        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
 422
 423        /* for QEBSM the ACK was already set by EQBS */
 424        if (is_qebsm(q)) {
 425                if (!q->u.in.polling) {
 426                        q->u.in.polling = 1;
 427                        q->u.in.ack_count = count;
 428                        q->u.in.ack_start = q->first_to_check;
 429                        return;
 430                }
 431
 432                /* delete the previous ACK's */
 433                set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 434                               q->u.in.ack_count);
 435                q->u.in.ack_count = count;
 436                q->u.in.ack_start = q->first_to_check;
 437                return;
 438        }
 439
 440        /*
 441         * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 442         * or by the next inbound run.
 443         */
 444        new = add_buf(q->first_to_check, count - 1);
 445        if (q->u.in.polling) {
 446                /* reset the previous ACK but first set the new one */
 447                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 448                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 449        } else {
 450                q->u.in.polling = 1;
 451                set_buf_state(q, new, SLSB_P_INPUT_ACK);
 452        }
 453
 454        q->u.in.ack_start = new;
 455        count--;
 456        if (!count)
 457                return;
 458        /* need to change ALL buffers to get more interrupts */
 459        set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 460}
 461
 462static int get_inbound_buffer_frontier(struct qdio_q *q)
 463{
 464        int count, stop;
 465        unsigned char state;
 466
 467        /*
 468         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 469         * would return 0.
 470         */
 471        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 472        stop = add_buf(q->first_to_check, count);
 473
 474        if (q->first_to_check == stop)
 475                goto out;
 476
 477        /*
 478         * No siga sync here, as a PCI or we after a thin interrupt
 479         * already sync'ed the queues.
 480         */
 481        count = get_buf_states(q, q->first_to_check, &state, count, 1);
 482        if (!count)
 483                goto out;
 484
 485        switch (state) {
 486        case SLSB_P_INPUT_PRIMED:
 487                inbound_primed(q, count);
 488                q->first_to_check = add_buf(q->first_to_check, count);
 489                atomic_sub(count, &q->nr_buf_used);
 490                break;
 491        case SLSB_P_INPUT_ERROR:
 492                announce_buffer_error(q, count);
 493                /* process the buffer, the upper layer will take care of it */
 494                q->first_to_check = add_buf(q->first_to_check, count);
 495                atomic_sub(count, &q->nr_buf_used);
 496                break;
 497        case SLSB_CU_INPUT_EMPTY:
 498        case SLSB_P_INPUT_NOT_INIT:
 499        case SLSB_P_INPUT_ACK:
 500                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
 501                break;
 502        default:
 503                BUG();
 504        }
 505out:
 506        return q->first_to_check;
 507}
 508
 509static int qdio_inbound_q_moved(struct qdio_q *q)
 510{
 511        int bufnr;
 512
 513        bufnr = get_inbound_buffer_frontier(q);
 514
 515        if ((bufnr != q->last_move) || q->qdio_error) {
 516                q->last_move = bufnr;
 517                if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
 518                        q->u.in.timestamp = get_usecs();
 519                return 1;
 520        } else
 521                return 0;
 522}
 523
 524static inline int qdio_inbound_q_done(struct qdio_q *q)
 525{
 526        unsigned char state = 0;
 527
 528        if (!atomic_read(&q->nr_buf_used))
 529                return 1;
 530
 531        qdio_siga_sync_q(q);
 532        get_buf_state(q, q->first_to_check, &state, 0);
 533
 534        if (state == SLSB_P_INPUT_PRIMED)
 535                /* more work coming */
 536                return 0;
 537
 538        if (is_thinint_irq(q->irq_ptr))
 539                return 1;
 540
 541        /* don't poll under z/VM */
 542        if (MACHINE_IS_VM)
 543                return 1;
 544
 545        /*
 546         * At this point we know, that inbound first_to_check
 547         * has (probably) not moved (see qdio_inbound_processing).
 548         */
 549        if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 550                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 551                              q->first_to_check);
 552                return 1;
 553        } else
 554                return 0;
 555}
 556
 557static void qdio_kick_handler(struct qdio_q *q)
 558{
 559        int start = q->first_to_kick;
 560        int end = q->first_to_check;
 561        int count;
 562
 563        if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 564                return;
 565
 566        count = sub_buf(end, start);
 567
 568        if (q->is_input_q) {
 569                qdio_perf_stat_inc(&perf_stats.inbound_handler);
 570                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 571        } else
 572                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 573                              start, count);
 574
 575        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 576                   q->irq_ptr->int_parm);
 577
 578        /* for the next time */
 579        q->first_to_kick = end;
 580        q->qdio_error = 0;
 581}
 582
 583static void __qdio_inbound_processing(struct qdio_q *q)
 584{
 585        qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
 586again:
 587        if (!qdio_inbound_q_moved(q))
 588                return;
 589
 590        qdio_kick_handler(q);
 591
 592        if (!qdio_inbound_q_done(q))
 593                /* means poll time is not yet over */
 594                goto again;
 595
 596        qdio_stop_polling(q);
 597        /*
 598         * We need to check again to not lose initiative after
 599         * resetting the ACK state.
 600         */
 601        if (!qdio_inbound_q_done(q))
 602                goto again;
 603}
 604
 605void qdio_inbound_processing(unsigned long data)
 606{
 607        struct qdio_q *q = (struct qdio_q *)data;
 608        __qdio_inbound_processing(q);
 609}
 610
 611static int get_outbound_buffer_frontier(struct qdio_q *q)
 612{
 613        int count, stop;
 614        unsigned char state;
 615
 616        if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
 617            (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
 618                qdio_siga_sync_q(q);
 619
 620        /*
 621         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 622         * would return 0.
 623         */
 624        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 625        stop = add_buf(q->first_to_check, count);
 626
 627        if (q->first_to_check == stop)
 628                return q->first_to_check;
 629
 630        count = get_buf_states(q, q->first_to_check, &state, count, 0);
 631        if (!count)
 632                return q->first_to_check;
 633
 634        switch (state) {
 635        case SLSB_P_OUTPUT_EMPTY:
 636                /* the adapter got it */
 637                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
 638
 639                atomic_sub(count, &q->nr_buf_used);
 640                q->first_to_check = add_buf(q->first_to_check, count);
 641                break;
 642        case SLSB_P_OUTPUT_ERROR:
 643                announce_buffer_error(q, count);
 644                /* process the buffer, the upper layer will take care of it */
 645                q->first_to_check = add_buf(q->first_to_check, count);
 646                atomic_sub(count, &q->nr_buf_used);
 647                break;
 648        case SLSB_CU_OUTPUT_PRIMED:
 649                /* the adapter has not fetched the output yet */
 650                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
 651                break;
 652        case SLSB_P_OUTPUT_NOT_INIT:
 653        case SLSB_P_OUTPUT_HALTED:
 654                break;
 655        default:
 656                BUG();
 657        }
 658        return q->first_to_check;
 659}
 660
 661/* all buffers processed? */
 662static inline int qdio_outbound_q_done(struct qdio_q *q)
 663{
 664        return atomic_read(&q->nr_buf_used) == 0;
 665}
 666
 667static inline int qdio_outbound_q_moved(struct qdio_q *q)
 668{
 669        int bufnr;
 670
 671        bufnr = get_outbound_buffer_frontier(q);
 672
 673        if ((bufnr != q->last_move) || q->qdio_error) {
 674                q->last_move = bufnr;
 675                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 676                return 1;
 677        } else
 678                return 0;
 679}
 680
 681static int qdio_kick_outbound_q(struct qdio_q *q)
 682{
 683        unsigned int busy_bit;
 684        int cc;
 685
 686        if (!need_siga_out(q))
 687                return 0;
 688
 689        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 690        qdio_perf_stat_inc(&perf_stats.siga_out);
 691
 692        cc = qdio_siga_output(q, &busy_bit);
 693        switch (cc) {
 694        case 0:
 695                break;
 696        case 2:
 697                if (busy_bit) {
 698                        DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
 699                        cc |= QDIO_ERROR_SIGA_BUSY;
 700                } else
 701                        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 702                break;
 703        case 1:
 704        case 3:
 705                DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 706                break;
 707        }
 708        return cc;
 709}
 710
 711static void __qdio_outbound_processing(struct qdio_q *q)
 712{
 713        qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
 714        BUG_ON(atomic_read(&q->nr_buf_used) < 0);
 715
 716        if (qdio_outbound_q_moved(q))
 717                qdio_kick_handler(q);
 718
 719        if (queue_type(q) == QDIO_ZFCP_QFMT)
 720                if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 721                        goto sched;
 722
 723        /* bail out for HiperSockets unicast queues */
 724        if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
 725                return;
 726
 727        if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
 728            (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
 729                goto sched;
 730
 731        if (q->u.out.pci_out_enabled)
 732                return;
 733
 734        /*
 735         * Now we know that queue type is either qeth without pci enabled
 736         * or HiperSockets multicast. Make sure buffer switch from PRIMED to
 737         * EMPTY is noticed and outbound_handler is called after some time.
 738         */
 739        if (qdio_outbound_q_done(q))
 740                del_timer(&q->u.out.timer);
 741        else {
 742                if (!timer_pending(&q->u.out.timer)) {
 743                        mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 744                        qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
 745                }
 746        }
 747        return;
 748
 749sched:
 750        if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 751                return;
 752        tasklet_schedule(&q->tasklet);
 753}
 754
 755/* outbound tasklet */
 756void qdio_outbound_processing(unsigned long data)
 757{
 758        struct qdio_q *q = (struct qdio_q *)data;
 759        __qdio_outbound_processing(q);
 760}
 761
 762void qdio_outbound_timer(unsigned long data)
 763{
 764        struct qdio_q *q = (struct qdio_q *)data;
 765
 766        if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 767                return;
 768        tasklet_schedule(&q->tasklet);
 769}
 770
 771static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 772{
 773        struct qdio_q *out;
 774        int i;
 775
 776        if (!pci_out_supported(q))
 777                return;
 778
 779        for_each_output_queue(q->irq_ptr, out, i)
 780                if (!qdio_outbound_q_done(out))
 781                        tasklet_schedule(&out->tasklet);
 782}
 783
 784static void __tiqdio_inbound_processing(struct qdio_q *q)
 785{
 786        qdio_perf_stat_inc(&perf_stats.thinint_inbound);
 787        qdio_sync_after_thinint(q);
 788
 789        /*
 790         * The interrupt could be caused by a PCI request. Check the
 791         * PCI capable outbound queues.
 792         */
 793        qdio_check_outbound_after_thinint(q);
 794
 795        if (!qdio_inbound_q_moved(q))
 796                return;
 797
 798        qdio_kick_handler(q);
 799
 800        if (!qdio_inbound_q_done(q)) {
 801                qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
 802                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 803                        tasklet_schedule(&q->tasklet);
 804                        return;
 805                }
 806        }
 807
 808        qdio_stop_polling(q);
 809        /*
 810         * We need to check again to not lose initiative after
 811         * resetting the ACK state.
 812         */
 813        if (!qdio_inbound_q_done(q)) {
 814                qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
 815                if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 816                        tasklet_schedule(&q->tasklet);
 817        }
 818}
 819
 820void tiqdio_inbound_processing(unsigned long data)
 821{
 822        struct qdio_q *q = (struct qdio_q *)data;
 823        __tiqdio_inbound_processing(q);
 824}
 825
 826static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 827                                  enum qdio_irq_states state)
 828{
 829        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 830
 831        irq_ptr->state = state;
 832        mb();
 833}
 834
 835static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 836{
 837        if (irb->esw.esw0.erw.cons) {
 838                DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 839                DBF_ERROR_HEX(irb, 64);
 840                DBF_ERROR_HEX(irb->ecw, 64);
 841        }
 842}
 843
 844/* PCI interrupt handler */
 845static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 846{
 847        int i;
 848        struct qdio_q *q;
 849
 850        if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 851                return;
 852
 853        qdio_perf_stat_inc(&perf_stats.pci_int);
 854
 855        for_each_input_queue(irq_ptr, q, i)
 856                tasklet_schedule(&q->tasklet);
 857
 858        if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
 859                return;
 860
 861        for_each_output_queue(irq_ptr, q, i) {
 862                if (qdio_outbound_q_done(q))
 863                        continue;
 864
 865                if (!siga_syncs_out_pci(q))
 866                        qdio_siga_sync_q(q);
 867
 868                tasklet_schedule(&q->tasklet);
 869        }
 870}
 871
 872static void qdio_handle_activate_check(struct ccw_device *cdev,
 873                                unsigned long intparm, int cstat, int dstat)
 874{
 875        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 876        struct qdio_q *q;
 877
 878        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
 879        DBF_ERROR("intp :%lx", intparm);
 880        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 881
 882        if (irq_ptr->nr_input_qs) {
 883                q = irq_ptr->input_qs[0];
 884        } else if (irq_ptr->nr_output_qs) {
 885                q = irq_ptr->output_qs[0];
 886        } else {
 887                dump_stack();
 888                goto no_handler;
 889        }
 890        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 891                   0, -1, -1, irq_ptr->int_parm);
 892no_handler:
 893        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 894}
 895
 896static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
 897                                      int dstat)
 898{
 899        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 900
 901        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 902
 903        if (cstat)
 904                goto error;
 905        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
 906                goto error;
 907        if (!(dstat & DEV_STAT_DEV_END))
 908                goto error;
 909        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
 910        return;
 911
 912error:
 913        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
 914        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 915        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 916}
 917
 918/* qdio interrupt handler */
 919void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 920                      struct irb *irb)
 921{
 922        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 923        int cstat, dstat;
 924
 925        qdio_perf_stat_inc(&perf_stats.qdio_int);
 926
 927        if (!intparm || !irq_ptr) {
 928                DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
 929                return;
 930        }
 931
 932        if (IS_ERR(irb)) {
 933                switch (PTR_ERR(irb)) {
 934                case -EIO:
 935                        DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
 936                        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 937                        wake_up(&cdev->private->wait_q);
 938                        return;
 939                default:
 940                        WARN_ON(1);
 941                        return;
 942                }
 943        }
 944        qdio_irq_check_sense(irq_ptr, irb);
 945        cstat = irb->scsw.cmd.cstat;
 946        dstat = irb->scsw.cmd.dstat;
 947
 948        switch (irq_ptr->state) {
 949        case QDIO_IRQ_STATE_INACTIVE:
 950                qdio_establish_handle_irq(cdev, cstat, dstat);
 951                break;
 952        case QDIO_IRQ_STATE_CLEANUP:
 953                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 954                break;
 955        case QDIO_IRQ_STATE_ESTABLISHED:
 956        case QDIO_IRQ_STATE_ACTIVE:
 957                if (cstat & SCHN_STAT_PCI) {
 958                        qdio_int_handler_pci(irq_ptr);
 959                        return;
 960                }
 961                if (cstat || dstat)
 962                        qdio_handle_activate_check(cdev, intparm, cstat,
 963                                                   dstat);
 964                break;
 965        default:
 966                WARN_ON(1);
 967        }
 968        wake_up(&cdev->private->wait_q);
 969}
 970
 971/**
 972 * qdio_get_ssqd_desc - get qdio subchannel description
 973 * @cdev: ccw device to get description for
 974 * @data: where to store the ssqd
 975 *
 976 * Returns 0 or an error code. The results of the chsc are stored in the
 977 * specified structure.
 978 */
 979int qdio_get_ssqd_desc(struct ccw_device *cdev,
 980                       struct qdio_ssqd_desc *data)
 981{
 982
 983        if (!cdev || !cdev->private)
 984                return -EINVAL;
 985
 986        DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
 987        return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
 988}
 989EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 990
 991/**
 992 * qdio_cleanup - shutdown queues and free data structures
 993 * @cdev: associated ccw device
 994 * @how: use halt or clear to shutdown
 995 *
 996 * This function calls qdio_shutdown() for @cdev with method @how.
 997 * and qdio_free(). The qdio_free() return value is ignored since
 998 * !irq_ptr is already checked.
 999 */
1000int qdio_cleanup(struct ccw_device *cdev, int how)
1001{
1002        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1003        int rc;
1004
1005        if (!irq_ptr)
1006                return -ENODEV;
1007
1008        rc = qdio_shutdown(cdev, how);
1009
1010        qdio_free(cdev);
1011        return rc;
1012}
1013EXPORT_SYMBOL_GPL(qdio_cleanup);
1014
1015static void qdio_shutdown_queues(struct ccw_device *cdev)
1016{
1017        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1018        struct qdio_q *q;
1019        int i;
1020
1021        for_each_input_queue(irq_ptr, q, i)
1022                tasklet_kill(&q->tasklet);
1023
1024        for_each_output_queue(irq_ptr, q, i) {
1025                del_timer(&q->u.out.timer);
1026                tasklet_kill(&q->tasklet);
1027        }
1028}
1029
1030/**
1031 * qdio_shutdown - shut down a qdio subchannel
1032 * @cdev: associated ccw device
1033 * @how: use halt or clear to shutdown
1034 */
1035int qdio_shutdown(struct ccw_device *cdev, int how)
1036{
1037        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1038        int rc;
1039        unsigned long flags;
1040
1041        if (!irq_ptr)
1042                return -ENODEV;
1043
1044        BUG_ON(irqs_disabled());
1045        DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1046
1047        mutex_lock(&irq_ptr->setup_mutex);
1048        /*
1049         * Subchannel was already shot down. We cannot prevent being called
1050         * twice since cio may trigger a shutdown asynchronously.
1051         */
1052        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1053                mutex_unlock(&irq_ptr->setup_mutex);
1054                return 0;
1055        }
1056
1057        /*
1058         * Indicate that the device is going down. Scheduling the queue
1059         * tasklets is forbidden from here on.
1060         */
1061        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1062
1063        tiqdio_remove_input_queues(irq_ptr);
1064        qdio_shutdown_queues(cdev);
1065        qdio_shutdown_debug_entries(irq_ptr, cdev);
1066
1067        /* cleanup subchannel */
1068        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1069
1070        if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1071                rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1072        else
1073                /* default behaviour is halt */
1074                rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1075        if (rc) {
1076                DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1077                DBF_ERROR("rc:%4d", rc);
1078                goto no_cleanup;
1079        }
1080
1081        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1082        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1083        wait_event_interruptible_timeout(cdev->private->wait_q,
1084                irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1085                irq_ptr->state == QDIO_IRQ_STATE_ERR,
1086                10 * HZ);
1087        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1088
1089no_cleanup:
1090        qdio_shutdown_thinint(irq_ptr);
1091
1092        /* restore interrupt handler */
1093        if ((void *)cdev->handler == (void *)qdio_int_handler)
1094                cdev->handler = irq_ptr->orig_handler;
1095        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1096
1097        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1098        mutex_unlock(&irq_ptr->setup_mutex);
1099        if (rc)
1100                return rc;
1101        return 0;
1102}
1103EXPORT_SYMBOL_GPL(qdio_shutdown);
1104
1105/**
1106 * qdio_free - free data structures for a qdio subchannel
1107 * @cdev: associated ccw device
1108 */
1109int qdio_free(struct ccw_device *cdev)
1110{
1111        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1112
1113        if (!irq_ptr)
1114                return -ENODEV;
1115
1116        DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1117        mutex_lock(&irq_ptr->setup_mutex);
1118
1119        if (irq_ptr->debug_area != NULL) {
1120                debug_unregister(irq_ptr->debug_area);
1121                irq_ptr->debug_area = NULL;
1122        }
1123        cdev->private->qdio_data = NULL;
1124        mutex_unlock(&irq_ptr->setup_mutex);
1125
1126        qdio_release_memory(irq_ptr);
1127        return 0;
1128}
1129EXPORT_SYMBOL_GPL(qdio_free);
1130
1131/**
1132 * qdio_initialize - allocate and establish queues for a qdio subchannel
1133 * @init_data: initialization data
1134 *
1135 * This function first allocates queues via qdio_allocate() and on success
1136 * establishes them via qdio_establish().
1137 */
1138int qdio_initialize(struct qdio_initialize *init_data)
1139{
1140        int rc;
1141
1142        rc = qdio_allocate(init_data);
1143        if (rc)
1144                return rc;
1145
1146        rc = qdio_establish(init_data);
1147        if (rc)
1148                qdio_free(init_data->cdev);
1149        return rc;
1150}
1151EXPORT_SYMBOL_GPL(qdio_initialize);
1152
1153/**
1154 * qdio_allocate - allocate qdio queues and associated data
1155 * @init_data: initialization data
1156 */
1157int qdio_allocate(struct qdio_initialize *init_data)
1158{
1159        struct qdio_irq *irq_ptr;
1160
1161        DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1162
1163        if ((init_data->no_input_qs && !init_data->input_handler) ||
1164            (init_data->no_output_qs && !init_data->output_handler))
1165                return -EINVAL;
1166
1167        if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1168            (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1169                return -EINVAL;
1170
1171        if ((!init_data->input_sbal_addr_array) ||
1172            (!init_data->output_sbal_addr_array))
1173                return -EINVAL;
1174
1175        /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1176        irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1177        if (!irq_ptr)
1178                goto out_err;
1179
1180        mutex_init(&irq_ptr->setup_mutex);
1181        qdio_allocate_dbf(init_data, irq_ptr);
1182
1183        /*
1184         * Allocate a page for the chsc calls in qdio_establish.
1185         * Must be pre-allocated since a zfcp recovery will call
1186         * qdio_establish. In case of low memory and swap on a zfcp disk
1187         * we may not be able to allocate memory otherwise.
1188         */
1189        irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1190        if (!irq_ptr->chsc_page)
1191                goto out_rel;
1192
1193        /* qdr is used in ccw1.cda which is u32 */
1194        irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1195        if (!irq_ptr->qdr)
1196                goto out_rel;
1197        WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1198
1199        if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1200                             init_data->no_output_qs))
1201                goto out_rel;
1202
1203        init_data->cdev->private->qdio_data = irq_ptr;
1204        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1205        return 0;
1206out_rel:
1207        qdio_release_memory(irq_ptr);
1208out_err:
1209        return -ENOMEM;
1210}
1211EXPORT_SYMBOL_GPL(qdio_allocate);
1212
1213/**
1214 * qdio_establish - establish queues on a qdio subchannel
1215 * @init_data: initialization data
1216 */
1217int qdio_establish(struct qdio_initialize *init_data)
1218{
1219        struct qdio_irq *irq_ptr;
1220        struct ccw_device *cdev = init_data->cdev;
1221        unsigned long saveflags;
1222        int rc;
1223
1224        DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1225
1226        irq_ptr = cdev->private->qdio_data;
1227        if (!irq_ptr)
1228                return -ENODEV;
1229
1230        if (cdev->private->state != DEV_STATE_ONLINE)
1231                return -EINVAL;
1232
1233        mutex_lock(&irq_ptr->setup_mutex);
1234        qdio_setup_irq(init_data);
1235
1236        rc = qdio_establish_thinint(irq_ptr);
1237        if (rc) {
1238                mutex_unlock(&irq_ptr->setup_mutex);
1239                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1240                return rc;
1241        }
1242
1243        /* establish q */
1244        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1245        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1246        irq_ptr->ccw.count = irq_ptr->equeue.count;
1247        irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1248
1249        spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1250        ccw_device_set_options_mask(cdev, 0);
1251
1252        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1253        if (rc) {
1254                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1255                DBF_ERROR("rc:%4x", rc);
1256        }
1257        spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1258
1259        if (rc) {
1260                mutex_unlock(&irq_ptr->setup_mutex);
1261                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1262                return rc;
1263        }
1264
1265        wait_event_interruptible_timeout(cdev->private->wait_q,
1266                irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1267                irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1268
1269        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1270                mutex_unlock(&irq_ptr->setup_mutex);
1271                qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1272                return -EIO;
1273        }
1274
1275        qdio_setup_ssqd_info(irq_ptr);
1276        DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1277        DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1278
1279        /* qebsm is now setup if available, initialize buffer states */
1280        qdio_init_buf_states(irq_ptr);
1281
1282        mutex_unlock(&irq_ptr->setup_mutex);
1283        qdio_print_subchannel_info(irq_ptr, cdev);
1284        qdio_setup_debug_entries(irq_ptr, cdev);
1285        return 0;
1286}
1287EXPORT_SYMBOL_GPL(qdio_establish);
1288
1289/**
1290 * qdio_activate - activate queues on a qdio subchannel
1291 * @cdev: associated cdev
1292 */
1293int qdio_activate(struct ccw_device *cdev)
1294{
1295        struct qdio_irq *irq_ptr;
1296        int rc;
1297        unsigned long saveflags;
1298
1299        DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1300
1301        irq_ptr = cdev->private->qdio_data;
1302        if (!irq_ptr)
1303                return -ENODEV;
1304
1305        if (cdev->private->state != DEV_STATE_ONLINE)
1306                return -EINVAL;
1307
1308        mutex_lock(&irq_ptr->setup_mutex);
1309        if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1310                rc = -EBUSY;
1311                goto out;
1312        }
1313
1314        irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1315        irq_ptr->ccw.flags = CCW_FLAG_SLI;
1316        irq_ptr->ccw.count = irq_ptr->aqueue.count;
1317        irq_ptr->ccw.cda = 0;
1318
1319        spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1320        ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1321
1322        rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1323                              0, DOIO_DENY_PREFETCH);
1324        if (rc) {
1325                DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1326                DBF_ERROR("rc:%4x", rc);
1327        }
1328        spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1329
1330        if (rc)
1331                goto out;
1332
1333        if (is_thinint_irq(irq_ptr))
1334                tiqdio_add_input_queues(irq_ptr);
1335
1336        /* wait for subchannel to become active */
1337        msleep(5);
1338
1339        switch (irq_ptr->state) {
1340        case QDIO_IRQ_STATE_STOPPED:
1341        case QDIO_IRQ_STATE_ERR:
1342                rc = -EIO;
1343                break;
1344        default:
1345                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1346                rc = 0;
1347        }
1348out:
1349        mutex_unlock(&irq_ptr->setup_mutex);
1350        return rc;
1351}
1352EXPORT_SYMBOL_GPL(qdio_activate);
1353
1354static inline int buf_in_between(int bufnr, int start, int count)
1355{
1356        int end = add_buf(start, count);
1357
1358        if (end > start) {
1359                if (bufnr >= start && bufnr < end)
1360                        return 1;
1361                else
1362                        return 0;
1363        }
1364
1365        /* wrap-around case */
1366        if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1367            (bufnr < end))
1368                return 1;
1369        else
1370                return 0;
1371}
1372
1373/**
1374 * handle_inbound - reset processed input buffers
1375 * @q: queue containing the buffers
1376 * @callflags: flags
1377 * @bufnr: first buffer to process
1378 * @count: how many buffers are emptied
1379 */
1380static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1381                          int bufnr, int count)
1382{
1383        int used, diff;
1384
1385        if (!q->u.in.polling)
1386                goto set;
1387
1388        /* protect against stop polling setting an ACK for an emptied slsb */
1389        if (count == QDIO_MAX_BUFFERS_PER_Q) {
1390                /* overwriting everything, just delete polling status */
1391                q->u.in.polling = 0;
1392                q->u.in.ack_count = 0;
1393                goto set;
1394        } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1395                if (is_qebsm(q)) {
1396                        /* partial overwrite, just update ack_start */
1397                        diff = add_buf(bufnr, count);
1398                        diff = sub_buf(diff, q->u.in.ack_start);
1399                        q->u.in.ack_count -= diff;
1400                        if (q->u.in.ack_count <= 0) {
1401                                q->u.in.polling = 0;
1402                                q->u.in.ack_count = 0;
1403                                goto set;
1404                        }
1405                        q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1406                }
1407                else
1408                        /* the only ACK will be deleted, so stop polling */
1409                        q->u.in.polling = 0;
1410        }
1411
1412set:
1413        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1414
1415        used = atomic_add_return(count, &q->nr_buf_used) - count;
1416        BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1417
1418        /* no need to signal as long as the adapter had free buffers */
1419        if (used)
1420                return 0;
1421
1422        if (need_siga_in(q))
1423                return qdio_siga_input(q);
1424        return 0;
1425}
1426
1427/**
1428 * handle_outbound - process filled outbound buffers
1429 * @q: queue containing the buffers
1430 * @callflags: flags
1431 * @bufnr: first buffer to process
1432 * @count: how many buffers are filled
1433 */
1434static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1435                           int bufnr, int count)
1436{
1437        unsigned char state;
1438        int used, rc = 0;
1439
1440        qdio_perf_stat_inc(&perf_stats.outbound_handler);
1441
1442        count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1443        used = atomic_add_return(count, &q->nr_buf_used);
1444        BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1445
1446        if (callflags & QDIO_FLAG_PCI_OUT)
1447                q->u.out.pci_out_enabled = 1;
1448        else
1449                q->u.out.pci_out_enabled = 0;
1450
1451        if (queue_type(q) == QDIO_IQDIO_QFMT) {
1452                if (multicast_outbound(q))
1453                        rc = qdio_kick_outbound_q(q);
1454                else
1455                        if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1456                            (count > 1) &&
1457                            (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1458                                /* exploit enhanced SIGA */
1459                                q->u.out.use_enh_siga = 1;
1460                                rc = qdio_kick_outbound_q(q);
1461                        } else {
1462                                /*
1463                                * One siga-w per buffer required for unicast
1464                                * HiperSockets.
1465                                */
1466                                q->u.out.use_enh_siga = 0;
1467                                while (count--) {
1468                                        rc = qdio_kick_outbound_q(q);
1469                                        if (rc)
1470                                                goto out;
1471                                }
1472                        }
1473                goto out;
1474        }
1475
1476        if (need_siga_sync(q)) {
1477                qdio_siga_sync_q(q);
1478                goto out;
1479        }
1480
1481        /* try to fast requeue buffers */
1482        get_buf_state(q, prev_buf(bufnr), &state, 0);
1483        if (state != SLSB_CU_OUTPUT_PRIMED)
1484                rc = qdio_kick_outbound_q(q);
1485        else
1486                qdio_perf_stat_inc(&perf_stats.fast_requeue);
1487
1488out:
1489        tasklet_schedule(&q->tasklet);
1490        return rc;
1491}
1492
1493/**
1494 * do_QDIO - process input or output buffers
1495 * @cdev: associated ccw_device for the qdio subchannel
1496 * @callflags: input or output and special flags from the program
1497 * @q_nr: queue number
1498 * @bufnr: buffer number
1499 * @count: how many buffers to process
1500 */
1501int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1502            int q_nr, unsigned int bufnr, unsigned int count)
1503{
1504        struct qdio_irq *irq_ptr;
1505
1506        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1507                return -EINVAL;
1508
1509        irq_ptr = cdev->private->qdio_data;
1510        if (!irq_ptr)
1511                return -ENODEV;
1512
1513        DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1514                      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1515
1516        if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1517                return -EBUSY;
1518
1519        if (callflags & QDIO_FLAG_SYNC_INPUT)
1520                return handle_inbound(irq_ptr->input_qs[q_nr],
1521                                      callflags, bufnr, count);
1522        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1523                return handle_outbound(irq_ptr->output_qs[q_nr],
1524                                       callflags, bufnr, count);
1525        return -EINVAL;
1526}
1527EXPORT_SYMBOL_GPL(do_QDIO);
1528
1529static int __init init_QDIO(void)
1530{
1531        int rc;
1532
1533        rc = qdio_setup_init();
1534        if (rc)
1535                return rc;
1536        rc = tiqdio_allocate_memory();
1537        if (rc)
1538                goto out_cache;
1539        rc = qdio_debug_init();
1540        if (rc)
1541                goto out_ti;
1542        rc = qdio_setup_perf_stats();
1543        if (rc)
1544                goto out_debug;
1545        rc = tiqdio_register_thinints();
1546        if (rc)
1547                goto out_perf;
1548        return 0;
1549
1550out_perf:
1551        qdio_remove_perf_stats();
1552out_debug:
1553        qdio_debug_exit();
1554out_ti:
1555        tiqdio_free_memory();
1556out_cache:
1557        qdio_setup_exit();
1558        return rc;
1559}
1560
1561static void __exit exit_QDIO(void)
1562{
1563        tiqdio_unregister_thinints();
1564        tiqdio_free_memory();
1565        qdio_remove_perf_stats();
1566        qdio_debug_exit();
1567        qdio_setup_exit();
1568}
1569
1570module_init(init_QDIO);
1571module_exit(exit_QDIO);
1572