linux/drivers/s390/cio/cio.c
<<
>>
Prefs
   1/*
   2 *   S/390 common I/O routines -- low level i/o calls
   3 *
   4 *    Copyright IBM Corp. 1999, 2008
   5 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
   6 *               Cornelia Huck (cornelia.huck@de.ibm.com)
   7 *               Arnd Bergmann (arndb@de.ibm.com)
   8 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/ftrace.h>
  15#include <linux/module.h>
  16#include <linux/init.h>
  17#include <linux/slab.h>
  18#include <linux/device.h>
  19#include <linux/kernel_stat.h>
  20#include <linux/interrupt.h>
  21#include <asm/cio.h>
  22#include <asm/delay.h>
  23#include <asm/irq.h>
  24#include <asm/irq_regs.h>
  25#include <asm/setup.h>
  26#include <asm/reset.h>
  27#include <asm/ipl.h>
  28#include <asm/chpid.h>
  29#include <asm/airq.h>
  30#include <asm/isc.h>
  31#include <linux/cputime.h>
  32#include <asm/fcx.h>
  33#include <asm/nmi.h>
  34#include <asm/crw.h>
  35#include "cio.h"
  36#include "css.h"
  37#include "chsc.h"
  38#include "ioasm.h"
  39#include "io_sch.h"
  40#include "blacklist.h"
  41#include "cio_debug.h"
  42#include "chp.h"
  43
  44debug_info_t *cio_debug_msg_id;
  45debug_info_t *cio_debug_trace_id;
  46debug_info_t *cio_debug_crw_id;
  47
  48/*
  49 * Function: cio_debug_init
  50 * Initializes three debug logs for common I/O:
  51 * - cio_msg logs generic cio messages
  52 * - cio_trace logs the calling of different functions
  53 * - cio_crw logs machine check related cio messages
  54 */
  55static int __init cio_debug_init(void)
  56{
  57        cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
  58        if (!cio_debug_msg_id)
  59                goto out_unregister;
  60        debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
  61        debug_set_level(cio_debug_msg_id, 2);
  62        cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
  63        if (!cio_debug_trace_id)
  64                goto out_unregister;
  65        debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
  66        debug_set_level(cio_debug_trace_id, 2);
  67        cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
  68        if (!cio_debug_crw_id)
  69                goto out_unregister;
  70        debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
  71        debug_set_level(cio_debug_crw_id, 4);
  72        return 0;
  73
  74out_unregister:
  75        if (cio_debug_msg_id)
  76                debug_unregister(cio_debug_msg_id);
  77        if (cio_debug_trace_id)
  78                debug_unregister(cio_debug_trace_id);
  79        if (cio_debug_crw_id)
  80                debug_unregister(cio_debug_crw_id);
  81        return -1;
  82}
  83
  84arch_initcall (cio_debug_init);
  85
  86int cio_set_options(struct subchannel *sch, int flags)
  87{
  88        struct io_subchannel_private *priv = to_io_private(sch);
  89
  90        priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
  91        priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
  92        priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
  93        return 0;
  94}
  95
  96static int
  97cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
  98{
  99        char dbf_text[15];
 100
 101        if (lpm != 0)
 102                sch->lpm &= ~lpm;
 103        else
 104                sch->lpm = 0;
 105
 106        CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
 107                      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
 108                      sch->schid.sch_no);
 109
 110        if (cio_update_schib(sch))
 111                return -ENODEV;
 112
 113        sprintf(dbf_text, "no%s", dev_name(&sch->dev));
 114        CIO_TRACE_EVENT(0, dbf_text);
 115        CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
 116
 117        return (sch->lpm ? -EACCES : -ENODEV);
 118}
 119
 120int
 121cio_start_key (struct subchannel *sch,  /* subchannel structure */
 122               struct ccw1 * cpa,       /* logical channel prog addr */
 123               __u8 lpm,                /* logical path mask */
 124               __u8 key)                /* storage key */
 125{
 126        struct io_subchannel_private *priv = to_io_private(sch);
 127        union orb *orb = &priv->orb;
 128        int ccode;
 129
 130        CIO_TRACE_EVENT(5, "stIO");
 131        CIO_TRACE_EVENT(5, dev_name(&sch->dev));
 132
 133        memset(orb, 0, sizeof(union orb));
 134        /* sch is always under 2G. */
 135        orb->cmd.intparm = (u32)(addr_t)sch;
 136        orb->cmd.fmt = 1;
 137
 138        orb->cmd.pfch = priv->options.prefetch == 0;
 139        orb->cmd.spnd = priv->options.suspend;
 140        orb->cmd.ssic = priv->options.suspend && priv->options.inter;
 141        orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
 142#ifdef CONFIG_64BIT
 143        /*
 144         * for 64 bit we always support 64 bit IDAWs with 4k page size only
 145         */
 146        orb->cmd.c64 = 1;
 147        orb->cmd.i2k = 0;
 148#endif
 149        orb->cmd.key = key >> 4;
 150        /* issue "Start Subchannel" */
 151        orb->cmd.cpa = (__u32) __pa(cpa);
 152        ccode = ssch(sch->schid, orb);
 153
 154        /* process condition code */
 155        CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
 156
 157        switch (ccode) {
 158        case 0:
 159                /*
 160                 * initialize device status information
 161                 */
 162                sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
 163                return 0;
 164        case 1:         /* status pending */
 165        case 2:         /* busy */
 166                return -EBUSY;
 167        case 3:         /* device/path not operational */
 168                return cio_start_handle_notoper(sch, lpm);
 169        default:
 170                return ccode;
 171        }
 172}
 173
 174int
 175cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
 176{
 177        return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
 178}
 179
 180/*
 181 * resume suspended I/O operation
 182 */
 183int
 184cio_resume (struct subchannel *sch)
 185{
 186        int ccode;
 187
 188        CIO_TRACE_EVENT(4, "resIO");
 189        CIO_TRACE_EVENT(4, dev_name(&sch->dev));
 190
 191        ccode = rsch (sch->schid);
 192
 193        CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
 194
 195        switch (ccode) {
 196        case 0:
 197                sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
 198                return 0;
 199        case 1:
 200                return -EBUSY;
 201        case 2:
 202                return -EINVAL;
 203        default:
 204                /*
 205                 * useless to wait for request completion
 206                 *  as device is no longer operational !
 207                 */
 208                return -ENODEV;
 209        }
 210}
 211
 212/*
 213 * halt I/O operation
 214 */
 215int
 216cio_halt(struct subchannel *sch)
 217{
 218        int ccode;
 219
 220        if (!sch)
 221                return -ENODEV;
 222
 223        CIO_TRACE_EVENT(2, "haltIO");
 224        CIO_TRACE_EVENT(2, dev_name(&sch->dev));
 225
 226        /*
 227         * Issue "Halt subchannel" and process condition code
 228         */
 229        ccode = hsch (sch->schid);
 230
 231        CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
 232
 233        switch (ccode) {
 234        case 0:
 235                sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
 236                return 0;
 237        case 1:         /* status pending */
 238        case 2:         /* busy */
 239                return -EBUSY;
 240        default:                /* device not operational */
 241                return -ENODEV;
 242        }
 243}
 244
 245/*
 246 * Clear I/O operation
 247 */
 248int
 249cio_clear(struct subchannel *sch)
 250{
 251        int ccode;
 252
 253        if (!sch)
 254                return -ENODEV;
 255
 256        CIO_TRACE_EVENT(2, "clearIO");
 257        CIO_TRACE_EVENT(2, dev_name(&sch->dev));
 258
 259        /*
 260         * Issue "Clear subchannel" and process condition code
 261         */
 262        ccode = csch (sch->schid);
 263
 264        CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
 265
 266        switch (ccode) {
 267        case 0:
 268                sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
 269                return 0;
 270        default:                /* device not operational */
 271                return -ENODEV;
 272        }
 273}
 274
 275/*
 276 * Function: cio_cancel
 277 * Issues a "Cancel Subchannel" on the specified subchannel
 278 * Note: We don't need any fancy intparms and flags here
 279 *       since xsch is executed synchronously.
 280 * Only for common I/O internal use as for now.
 281 */
 282int
 283cio_cancel (struct subchannel *sch)
 284{
 285        int ccode;
 286
 287        if (!sch)
 288                return -ENODEV;
 289
 290        CIO_TRACE_EVENT(2, "cancelIO");
 291        CIO_TRACE_EVENT(2, dev_name(&sch->dev));
 292
 293        ccode = xsch (sch->schid);
 294
 295        CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
 296
 297        switch (ccode) {
 298        case 0:         /* success */
 299                /* Update information in scsw. */
 300                if (cio_update_schib(sch))
 301                        return -ENODEV;
 302                return 0;
 303        case 1:         /* status pending */
 304                return -EBUSY;
 305        case 2:         /* not applicable */
 306                return -EINVAL;
 307        default:        /* not oper */
 308                return -ENODEV;
 309        }
 310}
 311
 312
 313static void cio_apply_config(struct subchannel *sch, struct schib *schib)
 314{
 315        schib->pmcw.intparm = sch->config.intparm;
 316        schib->pmcw.mbi = sch->config.mbi;
 317        schib->pmcw.isc = sch->config.isc;
 318        schib->pmcw.ena = sch->config.ena;
 319        schib->pmcw.mme = sch->config.mme;
 320        schib->pmcw.mp = sch->config.mp;
 321        schib->pmcw.csense = sch->config.csense;
 322        schib->pmcw.mbfc = sch->config.mbfc;
 323        if (sch->config.mbfc)
 324                schib->mba = sch->config.mba;
 325}
 326
 327static int cio_check_config(struct subchannel *sch, struct schib *schib)
 328{
 329        return (schib->pmcw.intparm == sch->config.intparm) &&
 330                (schib->pmcw.mbi == sch->config.mbi) &&
 331                (schib->pmcw.isc == sch->config.isc) &&
 332                (schib->pmcw.ena == sch->config.ena) &&
 333                (schib->pmcw.mme == sch->config.mme) &&
 334                (schib->pmcw.mp == sch->config.mp) &&
 335                (schib->pmcw.csense == sch->config.csense) &&
 336                (schib->pmcw.mbfc == sch->config.mbfc) &&
 337                (!sch->config.mbfc || (schib->mba == sch->config.mba));
 338}
 339
 340/*
 341 * cio_commit_config - apply configuration to the subchannel
 342 */
 343int cio_commit_config(struct subchannel *sch)
 344{
 345        int ccode, retry, ret = 0;
 346        struct schib schib;
 347        struct irb irb;
 348
 349        if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
 350                return -ENODEV;
 351
 352        for (retry = 0; retry < 5; retry++) {
 353                /* copy desired changes to local schib */
 354                cio_apply_config(sch, &schib);
 355                ccode = msch_err(sch->schid, &schib);
 356                if (ccode < 0) /* -EIO if msch gets a program check. */
 357                        return ccode;
 358                switch (ccode) {
 359                case 0: /* successful */
 360                        if (stsch_err(sch->schid, &schib) ||
 361                            !css_sch_is_valid(&schib))
 362                                return -ENODEV;
 363                        if (cio_check_config(sch, &schib)) {
 364                                /* commit changes from local schib */
 365                                memcpy(&sch->schib, &schib, sizeof(schib));
 366                                return 0;
 367                        }
 368                        ret = -EAGAIN;
 369                        break;
 370                case 1: /* status pending */
 371                        ret = -EBUSY;
 372                        if (tsch(sch->schid, &irb))
 373                                return ret;
 374                        break;
 375                case 2: /* busy */
 376                        udelay(100); /* allow for recovery */
 377                        ret = -EBUSY;
 378                        break;
 379                case 3: /* not operational */
 380                        return -ENODEV;
 381                }
 382        }
 383        return ret;
 384}
 385
 386/**
 387 * cio_update_schib - Perform stsch and update schib if subchannel is valid.
 388 * @sch: subchannel on which to perform stsch
 389 * Return zero on success, -ENODEV otherwise.
 390 */
 391int cio_update_schib(struct subchannel *sch)
 392{
 393        struct schib schib;
 394
 395        if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
 396                return -ENODEV;
 397
 398        memcpy(&sch->schib, &schib, sizeof(schib));
 399        return 0;
 400}
 401EXPORT_SYMBOL_GPL(cio_update_schib);
 402
 403/**
 404 * cio_enable_subchannel - enable a subchannel.
 405 * @sch: subchannel to be enabled
 406 * @intparm: interruption parameter to set
 407 */
 408int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
 409{
 410        int ret;
 411
 412        CIO_TRACE_EVENT(2, "ensch");
 413        CIO_TRACE_EVENT(2, dev_name(&sch->dev));
 414
 415        if (sch_is_pseudo_sch(sch))
 416                return -EINVAL;
 417        if (cio_update_schib(sch))
 418                return -ENODEV;
 419
 420        sch->config.ena = 1;
 421        sch->config.isc = sch->isc;
 422        sch->config.intparm = intparm;
 423
 424        ret = cio_commit_config(sch);
 425        if (ret == -EIO) {
 426                /*
 427                 * Got a program check in msch. Try without
 428                 * the concurrent sense bit the next time.
 429                 */
 430                sch->config.csense = 0;
 431                ret = cio_commit_config(sch);
 432        }
 433        CIO_HEX_EVENT(2, &ret, sizeof(ret));
 434        return ret;
 435}
 436EXPORT_SYMBOL_GPL(cio_enable_subchannel);
 437
 438/**
 439 * cio_disable_subchannel - disable a subchannel.
 440 * @sch: subchannel to disable
 441 */
 442int cio_disable_subchannel(struct subchannel *sch)
 443{
 444        int ret;
 445
 446        CIO_TRACE_EVENT(2, "dissch");
 447        CIO_TRACE_EVENT(2, dev_name(&sch->dev));
 448
 449        if (sch_is_pseudo_sch(sch))
 450                return 0;
 451        if (cio_update_schib(sch))
 452                return -ENODEV;
 453
 454        sch->config.ena = 0;
 455        ret = cio_commit_config(sch);
 456
 457        CIO_HEX_EVENT(2, &ret, sizeof(ret));
 458        return ret;
 459}
 460EXPORT_SYMBOL_GPL(cio_disable_subchannel);
 461
 462static int cio_check_devno_blacklisted(struct subchannel *sch)
 463{
 464        if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
 465                /*
 466                 * This device must not be known to Linux. So we simply
 467                 * say that there is no device and return ENODEV.
 468                 */
 469                CIO_MSG_EVENT(6, "Blacklisted device detected "
 470                              "at devno %04X, subchannel set %x\n",
 471                              sch->schib.pmcw.dev, sch->schid.ssid);
 472                return -ENODEV;
 473        }
 474        return 0;
 475}
 476
 477static int cio_validate_io_subchannel(struct subchannel *sch)
 478{
 479        /* Initialization for io subchannels. */
 480        if (!css_sch_is_valid(&sch->schib))
 481                return -ENODEV;
 482
 483        /* Devno is valid. */
 484        return cio_check_devno_blacklisted(sch);
 485}
 486
 487static int cio_validate_msg_subchannel(struct subchannel *sch)
 488{
 489        /* Initialization for message subchannels. */
 490        if (!css_sch_is_valid(&sch->schib))
 491                return -ENODEV;
 492
 493        /* Devno is valid. */
 494        return cio_check_devno_blacklisted(sch);
 495}
 496
 497/**
 498 * cio_validate_subchannel - basic validation of subchannel
 499 * @sch: subchannel structure to be filled out
 500 * @schid: subchannel id
 501 *
 502 * Find out subchannel type and initialize struct subchannel.
 503 * Return codes:
 504 *   0 on success
 505 *   -ENXIO for non-defined subchannels
 506 *   -ENODEV for invalid subchannels or blacklisted devices
 507 *   -EIO for subchannels in an invalid subchannel set
 508 */
 509int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
 510{
 511        char dbf_txt[15];
 512        int ccode;
 513        int err;
 514
 515        sprintf(dbf_txt, "valsch%x", schid.sch_no);
 516        CIO_TRACE_EVENT(4, dbf_txt);
 517
 518        /*
 519         * The first subchannel that is not-operational (ccode==3)
 520         * indicates that there aren't any more devices available.
 521         * If stsch gets an exception, it means the current subchannel set
 522         * is not valid.
 523         */
 524        ccode = stsch_err(schid, &sch->schib);
 525        if (ccode) {
 526                err = (ccode == 3) ? -ENXIO : ccode;
 527                goto out;
 528        }
 529        sch->st = sch->schib.pmcw.st;
 530        sch->schid = schid;
 531
 532        switch (sch->st) {
 533        case SUBCHANNEL_TYPE_IO:
 534                err = cio_validate_io_subchannel(sch);
 535                break;
 536        case SUBCHANNEL_TYPE_MSG:
 537                err = cio_validate_msg_subchannel(sch);
 538                break;
 539        default:
 540                err = 0;
 541        }
 542        if (err)
 543                goto out;
 544
 545        CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
 546                      sch->schid.ssid, sch->schid.sch_no, sch->st);
 547out:
 548        return err;
 549}
 550
 551/*
 552 * do_IRQ() handles all normal I/O device IRQ's (the special
 553 *          SMP cross-CPU interrupts have their own specific
 554 *          handlers).
 555 *
 556 */
 557void __irq_entry do_IRQ(struct pt_regs *regs)
 558{
 559        struct tpi_info *tpi_info;
 560        struct subchannel *sch;
 561        struct irb *irb;
 562        struct pt_regs *old_regs;
 563
 564        old_regs = set_irq_regs(regs);
 565        irq_enter();
 566        __this_cpu_write(s390_idle.nohz_delay, 1);
 567        if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 568                /* Serve timer interrupts first. */
 569                clock_comparator_work();
 570        /*
 571         * Get interrupt information from lowcore
 572         */
 573        tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
 574        irb = (struct irb *)&S390_lowcore.irb;
 575        do {
 576                kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
 577                if (tpi_info->adapter_IO) {
 578                        do_adapter_IO(tpi_info->isc);
 579                        continue;
 580                }
 581                sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
 582                if (!sch) {
 583                        /* Clear pending interrupt condition. */
 584                        inc_irq_stat(IRQIO_CIO);
 585                        tsch(tpi_info->schid, irb);
 586                        continue;
 587                }
 588                spin_lock(sch->lock);
 589                /* Store interrupt response block to lowcore. */
 590                if (tsch(tpi_info->schid, irb) == 0) {
 591                        /* Keep subchannel information word up to date. */
 592                        memcpy (&sch->schib.scsw, &irb->scsw,
 593                                sizeof (irb->scsw));
 594                        /* Call interrupt handler if there is one. */
 595                        if (sch->driver && sch->driver->irq)
 596                                sch->driver->irq(sch);
 597                        else
 598                                inc_irq_stat(IRQIO_CIO);
 599                } else
 600                        inc_irq_stat(IRQIO_CIO);
 601                spin_unlock(sch->lock);
 602                /*
 603                 * Are more interrupts pending?
 604                 * If so, the tpi instruction will update the lowcore
 605                 * to hold the info for the next interrupt.
 606                 * We don't do this for VM because a tpi drops the cpu
 607                 * out of the sie which costs more cycles than it saves.
 608                 */
 609        } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
 610        irq_exit();
 611        set_irq_regs(old_regs);
 612}
 613
 614#ifdef CONFIG_CCW_CONSOLE
 615static struct subchannel *console_sch;
 616
 617/*
 618 * Use cio_tsch to update the subchannel status and call the interrupt handler
 619 * if status had been pending. Called with the subchannel's lock held.
 620 */
 621void cio_tsch(struct subchannel *sch)
 622{
 623        struct irb *irb;
 624        int irq_context;
 625
 626        irb = (struct irb *)&S390_lowcore.irb;
 627        /* Store interrupt response block to lowcore. */
 628        if (tsch(sch->schid, irb) != 0)
 629                /* Not status pending or not operational. */
 630                return;
 631        memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
 632        /* Call interrupt handler with updated status. */
 633        irq_context = in_interrupt();
 634        if (!irq_context) {
 635                local_bh_disable();
 636                irq_enter();
 637        }
 638        kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
 639        if (sch->driver && sch->driver->irq)
 640                sch->driver->irq(sch);
 641        else
 642                inc_irq_stat(IRQIO_CIO);
 643        if (!irq_context) {
 644                irq_exit();
 645                _local_bh_enable();
 646        }
 647}
 648
 649static int cio_test_for_console(struct subchannel_id schid, void *data)
 650{
 651        struct schib schib;
 652
 653        if (stsch_err(schid, &schib) != 0)
 654                return -ENXIO;
 655        if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
 656            (schib.pmcw.dev == console_devno)) {
 657                console_irq = schid.sch_no;
 658                return 1; /* found */
 659        }
 660        return 0;
 661}
 662
 663static int cio_get_console_sch_no(void)
 664{
 665        struct subchannel_id schid;
 666        struct schib schib;
 667
 668        init_subchannel_id(&schid);
 669        if (console_irq != -1) {
 670                /* VM provided us with the irq number of the console. */
 671                schid.sch_no = console_irq;
 672                if (stsch_err(schid, &schib) != 0 ||
 673                    (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
 674                        return -1;
 675                console_devno = schib.pmcw.dev;
 676        } else if (console_devno != -1) {
 677                /* At least the console device number is known. */
 678                for_each_subchannel(cio_test_for_console, NULL);
 679        }
 680        return console_irq;
 681}
 682
 683struct subchannel *cio_probe_console(void)
 684{
 685        struct subchannel_id schid;
 686        struct subchannel *sch;
 687        int sch_no, ret;
 688
 689        sch_no = cio_get_console_sch_no();
 690        if (sch_no == -1) {
 691                pr_warning("No CCW console was found\n");
 692                return ERR_PTR(-ENODEV);
 693        }
 694        init_subchannel_id(&schid);
 695        schid.sch_no = sch_no;
 696        sch = css_alloc_subchannel(schid);
 697        if (IS_ERR(sch))
 698                return sch;
 699
 700        isc_register(CONSOLE_ISC);
 701        sch->config.isc = CONSOLE_ISC;
 702        sch->config.intparm = (u32)(addr_t)sch;
 703        ret = cio_commit_config(sch);
 704        if (ret) {
 705                isc_unregister(CONSOLE_ISC);
 706                put_device(&sch->dev);
 707                return ERR_PTR(ret);
 708        }
 709        console_sch = sch;
 710        return sch;
 711}
 712
 713int cio_is_console(struct subchannel_id schid)
 714{
 715        if (!console_sch)
 716                return 0;
 717        return schid_equal(&schid, &console_sch->schid);
 718}
 719
 720void cio_register_early_subchannels(void)
 721{
 722        int ret;
 723
 724        if (!console_sch)
 725                return;
 726
 727        ret = css_register_subchannel(console_sch);
 728        if (ret)
 729                put_device(&console_sch->dev);
 730}
 731#endif /* CONFIG_CCW_CONSOLE */
 732
 733static int
 734__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
 735{
 736        int retry, cc;
 737
 738        cc = 0;
 739        for (retry=0;retry<3;retry++) {
 740                schib->pmcw.ena = 0;
 741                cc = msch_err(schid, schib);
 742                if (cc)
 743                        return (cc==3?-ENODEV:-EBUSY);
 744                if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
 745                        return -ENODEV;
 746                if (!schib->pmcw.ena)
 747                        return 0;
 748        }
 749        return -EBUSY; /* uhm... */
 750}
 751
 752static int
 753__clear_io_subchannel_easy(struct subchannel_id schid)
 754{
 755        int retry;
 756
 757        if (csch(schid))
 758                return -ENODEV;
 759        for (retry=0;retry<20;retry++) {
 760                struct tpi_info ti;
 761
 762                if (tpi(&ti)) {
 763                        tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
 764                        if (schid_equal(&ti.schid, &schid))
 765                                return 0;
 766                }
 767                udelay_simple(100);
 768        }
 769        return -EBUSY;
 770}
 771
 772static void __clear_chsc_subchannel_easy(void)
 773{
 774        /* It seems we can only wait for a bit here :/ */
 775        udelay_simple(100);
 776}
 777
 778static int pgm_check_occured;
 779
 780static void cio_reset_pgm_check_handler(void)
 781{
 782        pgm_check_occured = 1;
 783}
 784
 785static int stsch_reset(struct subchannel_id schid, struct schib *addr)
 786{
 787        int rc;
 788
 789        pgm_check_occured = 0;
 790        s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
 791        rc = stsch_err(schid, addr);
 792        s390_base_pgm_handler_fn = NULL;
 793
 794        /* The program check handler could have changed pgm_check_occured. */
 795        barrier();
 796
 797        if (pgm_check_occured)
 798                return -EIO;
 799        else
 800                return rc;
 801}
 802
 803static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
 804{
 805        struct schib schib;
 806
 807        if (stsch_reset(schid, &schib))
 808                return -ENXIO;
 809        if (!schib.pmcw.ena)
 810                return 0;
 811        switch(__disable_subchannel_easy(schid, &schib)) {
 812        case 0:
 813        case -ENODEV:
 814                break;
 815        default: /* -EBUSY */
 816                switch (schib.pmcw.st) {
 817                case SUBCHANNEL_TYPE_IO:
 818                        if (__clear_io_subchannel_easy(schid))
 819                                goto out; /* give up... */
 820                        break;
 821                case SUBCHANNEL_TYPE_CHSC:
 822                        __clear_chsc_subchannel_easy();
 823                        break;
 824                default:
 825                        /* No default clear strategy */
 826                        break;
 827                }
 828                stsch_err(schid, &schib);
 829                __disable_subchannel_easy(schid, &schib);
 830        }
 831out:
 832        return 0;
 833}
 834
 835static atomic_t chpid_reset_count;
 836
 837static void s390_reset_chpids_mcck_handler(void)
 838{
 839        struct crw crw;
 840        struct mci *mci;
 841
 842        /* Check for pending channel report word. */
 843        mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
 844        if (!mci->cp)
 845                return;
 846        /* Process channel report words. */
 847        while (stcrw(&crw) == 0) {
 848                /* Check for responses to RCHP. */
 849                if (crw.slct && crw.rsc == CRW_RSC_CPATH)
 850                        atomic_dec(&chpid_reset_count);
 851        }
 852}
 853
 854#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
 855static void css_reset(void)
 856{
 857        int i, ret;
 858        unsigned long long timeout;
 859        struct chp_id chpid;
 860
 861        /* Reset subchannels. */
 862        for_each_subchannel(__shutdown_subchannel_easy,  NULL);
 863        /* Reset channel paths. */
 864        s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
 865        /* Enable channel report machine checks. */
 866        __ctl_set_bit(14, 28);
 867        /* Temporarily reenable machine checks. */
 868        local_mcck_enable();
 869        chp_id_init(&chpid);
 870        for (i = 0; i <= __MAX_CHPID; i++) {
 871                chpid.id = i;
 872                ret = rchp(chpid);
 873                if ((ret == 0) || (ret == 2))
 874                        /*
 875                         * rchp either succeeded, or another rchp is already
 876                         * in progress. In either case, we'll get a crw.
 877                         */
 878                        atomic_inc(&chpid_reset_count);
 879        }
 880        /* Wait for machine check for all channel paths. */
 881        timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
 882        while (atomic_read(&chpid_reset_count) != 0) {
 883                if (get_tod_clock_fast() > timeout)
 884                        break;
 885                cpu_relax();
 886        }
 887        /* Disable machine checks again. */
 888        local_mcck_disable();
 889        /* Disable channel report machine checks. */
 890        __ctl_clear_bit(14, 28);
 891        s390_base_mcck_handler_fn = NULL;
 892}
 893
 894static struct reset_call css_reset_call = {
 895        .fn = css_reset,
 896};
 897
 898static int __init init_css_reset_call(void)
 899{
 900        atomic_set(&chpid_reset_count, 0);
 901        register_reset_call(&css_reset_call);
 902        return 0;
 903}
 904
 905arch_initcall(init_css_reset_call);
 906
 907struct sch_match_id {
 908        struct subchannel_id schid;
 909        struct ccw_dev_id devid;
 910        int rc;
 911};
 912
 913static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
 914{
 915        struct schib schib;
 916        struct sch_match_id *match_id = data;
 917
 918        if (stsch_reset(schid, &schib))
 919                return -ENXIO;
 920        if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
 921            (schib.pmcw.dev == match_id->devid.devno) &&
 922            (schid.ssid == match_id->devid.ssid)) {
 923                match_id->schid = schid;
 924                match_id->rc = 0;
 925                return 1;
 926        }
 927        return 0;
 928}
 929
 930static int reipl_find_schid(struct ccw_dev_id *devid,
 931                            struct subchannel_id *schid)
 932{
 933        struct sch_match_id match_id;
 934
 935        match_id.devid = *devid;
 936        match_id.rc = -ENODEV;
 937        for_each_subchannel(__reipl_subchannel_match, &match_id);
 938        if (match_id.rc == 0)
 939                *schid = match_id.schid;
 940        return match_id.rc;
 941}
 942
 943extern void do_reipl_asm(__u32 schid);
 944
 945/* Make sure all subchannels are quiet before we re-ipl an lpar. */
 946void reipl_ccw_dev(struct ccw_dev_id *devid)
 947{
 948        struct subchannel_id uninitialized_var(schid);
 949
 950        s390_reset_system(NULL, NULL, NULL);
 951        if (reipl_find_schid(devid, &schid) != 0)
 952                panic("IPL Device not found\n");
 953        do_reipl_asm(*((__u32*)&schid));
 954}
 955
 956int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
 957{
 958        static struct chsc_sda_area sda_area __initdata;
 959        struct subchannel_id schid;
 960        struct schib schib;
 961
 962        schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
 963        if (!schid.one)
 964                return -ENODEV;
 965
 966        if (schid.ssid) {
 967                /*
 968                 * Firmware should have already enabled MSS but whoever started
 969                 * the kernel might have initiated a channel subsystem reset.
 970                 * Ensure that MSS is enabled.
 971                 */
 972                memset(&sda_area, 0, sizeof(sda_area));
 973                if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
 974                        return -ENODEV;
 975        }
 976        if (stsch_err(schid, &schib))
 977                return -ENODEV;
 978        if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
 979                return -ENODEV;
 980        if (!schib.pmcw.dnv)
 981                return -ENODEV;
 982
 983        iplinfo->ssid = schid.ssid;
 984        iplinfo->devno = schib.pmcw.dev;
 985        iplinfo->is_qdio = schib.pmcw.qf;
 986        return 0;
 987}
 988
 989/**
 990 * cio_tm_start_key - perform start function
 991 * @sch: subchannel on which to perform the start function
 992 * @tcw: transport-command word to be started
 993 * @lpm: mask of paths to use
 994 * @key: storage key to use for storage access
 995 *
 996 * Start the tcw on the given subchannel. Return zero on success, non-zero
 997 * otherwise.
 998 */
 999int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1000{
1001        int cc;
1002        union orb *orb = &to_io_private(sch)->orb;
1003
1004        memset(orb, 0, sizeof(union orb));
1005        orb->tm.intparm = (u32) (addr_t) sch;
1006        orb->tm.key = key >> 4;
1007        orb->tm.b = 1;
1008        orb->tm.lpm = lpm ? lpm : sch->lpm;
1009        orb->tm.tcw = (u32) (addr_t) tcw;
1010        cc = ssch(sch->schid, orb);
1011        switch (cc) {
1012        case 0:
1013                return 0;
1014        case 1:
1015        case 2:
1016                return -EBUSY;
1017        default:
1018                return cio_start_handle_notoper(sch, lpm);
1019        }
1020}
1021
1022/**
1023 * cio_tm_intrg - perform interrogate function
1024 * @sch - subchannel on which to perform the interrogate function
1025 *
1026 * If the specified subchannel is running in transport-mode, perform the
1027 * interrogate function. Return zero on success, non-zero otherwie.
1028 */
1029int cio_tm_intrg(struct subchannel *sch)
1030{
1031        int cc;
1032
1033        if (!to_io_private(sch)->orb.tm.b)
1034                return -EINVAL;
1035        cc = xsch(sch->schid);
1036        switch (cc) {
1037        case 0:
1038        case 2:
1039                return 0;
1040        case 1:
1041                return -EBUSY;
1042        default:
1043                return -ENODEV;
1044        }
1045}
1046