linux/drivers/s390/block/dasd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
   4 *                  Horst Hummel <Horst.Hummel@de.ibm.com>
   5 *                  Carsten Otte <Cotte@de.ibm.com>
   6 *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 * Bugreports.to..: <Linux390@de.ibm.com>
   8 * Copyright IBM Corp. 1999, 2009
   9 */
  10
  11#define KMSG_COMPONENT "dasd"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/kmod.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/ctype.h>
  18#include <linux/major.h>
  19#include <linux/slab.h>
  20#include <linux/hdreg.h>
  21#include <linux/async.h>
  22#include <linux/mutex.h>
  23#include <linux/debugfs.h>
  24#include <linux/seq_file.h>
  25#include <linux/vmalloc.h>
  26
  27#include <asm/ccwdev.h>
  28#include <asm/ebcdic.h>
  29#include <asm/idals.h>
  30#include <asm/itcw.h>
  31#include <asm/diag.h>
  32
  33/* This is ugly... */
  34#define PRINTK_HEADER "dasd:"
  35
  36#include "dasd_int.h"
  37/*
  38 * SECTION: Constant definitions to be used within this file
  39 */
  40#define DASD_CHANQ_MAX_SIZE 4
  41
  42#define DASD_DIAG_MOD           "dasd_diag_mod"
  43
  44/*
  45 * SECTION: exported variables of dasd.c
  46 */
  47debug_info_t *dasd_debug_area;
  48EXPORT_SYMBOL(dasd_debug_area);
  49static struct dentry *dasd_debugfs_root_entry;
  50struct dasd_discipline *dasd_diag_discipline_pointer;
  51EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  52void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  53
  54MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  55MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  56                   " Copyright IBM Corp. 2000");
  57MODULE_SUPPORTED_DEVICE("dasd");
  58MODULE_LICENSE("GPL");
  59
  60/*
  61 * SECTION: prototypes for static functions of dasd.c
  62 */
  63static int  dasd_alloc_queue(struct dasd_block *);
  64static void dasd_setup_queue(struct dasd_block *);
  65static void dasd_free_queue(struct dasd_block *);
  66static int dasd_flush_block_queue(struct dasd_block *);
  67static void dasd_device_tasklet(struct dasd_device *);
  68static void dasd_block_tasklet(struct dasd_block *);
  69static void do_kick_device(struct work_struct *);
  70static void do_restore_device(struct work_struct *);
  71static void do_reload_device(struct work_struct *);
  72static void do_requeue_requests(struct work_struct *);
  73static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
  74static void dasd_device_timeout(struct timer_list *);
  75static void dasd_block_timeout(struct timer_list *);
  76static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
  77static void dasd_profile_init(struct dasd_profile *, struct dentry *);
  78static void dasd_profile_exit(struct dasd_profile *);
  79static void dasd_hosts_init(struct dentry *, struct dasd_device *);
  80static void dasd_hosts_exit(struct dasd_device *);
  81
  82/*
  83 * SECTION: Operations on the device structure.
  84 */
  85static wait_queue_head_t dasd_init_waitq;
  86static wait_queue_head_t dasd_flush_wq;
  87static wait_queue_head_t generic_waitq;
  88static wait_queue_head_t shutdown_waitq;
  89
  90/*
  91 * Allocate memory for a new device structure.
  92 */
  93struct dasd_device *dasd_alloc_device(void)
  94{
  95        struct dasd_device *device;
  96
  97        device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
  98        if (!device)
  99                return ERR_PTR(-ENOMEM);
 100
 101        /* Get two pages for normal block device operations. */
 102        device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
 103        if (!device->ccw_mem) {
 104                kfree(device);
 105                return ERR_PTR(-ENOMEM);
 106        }
 107        /* Get one page for error recovery. */
 108        device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
 109        if (!device->erp_mem) {
 110                free_pages((unsigned long) device->ccw_mem, 1);
 111                kfree(device);
 112                return ERR_PTR(-ENOMEM);
 113        }
 114
 115        dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
 116        dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
 117        spin_lock_init(&device->mem_lock);
 118        atomic_set(&device->tasklet_scheduled, 0);
 119        tasklet_init(&device->tasklet,
 120                     (void (*)(unsigned long)) dasd_device_tasklet,
 121                     (unsigned long) device);
 122        INIT_LIST_HEAD(&device->ccw_queue);
 123        timer_setup(&device->timer, dasd_device_timeout, 0);
 124        INIT_WORK(&device->kick_work, do_kick_device);
 125        INIT_WORK(&device->restore_device, do_restore_device);
 126        INIT_WORK(&device->reload_device, do_reload_device);
 127        INIT_WORK(&device->requeue_requests, do_requeue_requests);
 128        device->state = DASD_STATE_NEW;
 129        device->target = DASD_STATE_NEW;
 130        mutex_init(&device->state_mutex);
 131        spin_lock_init(&device->profile.lock);
 132        return device;
 133}
 134
 135/*
 136 * Free memory of a device structure.
 137 */
 138void dasd_free_device(struct dasd_device *device)
 139{
 140        kfree(device->private);
 141        free_page((unsigned long) device->erp_mem);
 142        free_pages((unsigned long) device->ccw_mem, 1);
 143        kfree(device);
 144}
 145
 146/*
 147 * Allocate memory for a new device structure.
 148 */
 149struct dasd_block *dasd_alloc_block(void)
 150{
 151        struct dasd_block *block;
 152
 153        block = kzalloc(sizeof(*block), GFP_ATOMIC);
 154        if (!block)
 155                return ERR_PTR(-ENOMEM);
 156        /* open_count = 0 means device online but not in use */
 157        atomic_set(&block->open_count, -1);
 158
 159        atomic_set(&block->tasklet_scheduled, 0);
 160        tasklet_init(&block->tasklet,
 161                     (void (*)(unsigned long)) dasd_block_tasklet,
 162                     (unsigned long) block);
 163        INIT_LIST_HEAD(&block->ccw_queue);
 164        spin_lock_init(&block->queue_lock);
 165        timer_setup(&block->timer, dasd_block_timeout, 0);
 166        spin_lock_init(&block->profile.lock);
 167
 168        return block;
 169}
 170EXPORT_SYMBOL_GPL(dasd_alloc_block);
 171
 172/*
 173 * Free memory of a device structure.
 174 */
 175void dasd_free_block(struct dasd_block *block)
 176{
 177        kfree(block);
 178}
 179EXPORT_SYMBOL_GPL(dasd_free_block);
 180
 181/*
 182 * Make a new device known to the system.
 183 */
 184static int dasd_state_new_to_known(struct dasd_device *device)
 185{
 186        int rc;
 187
 188        /*
 189         * As long as the device is not in state DASD_STATE_NEW we want to
 190         * keep the reference count > 0.
 191         */
 192        dasd_get_device(device);
 193
 194        if (device->block) {
 195                rc = dasd_alloc_queue(device->block);
 196                if (rc) {
 197                        dasd_put_device(device);
 198                        return rc;
 199                }
 200        }
 201        device->state = DASD_STATE_KNOWN;
 202        return 0;
 203}
 204
 205/*
 206 * Let the system forget about a device.
 207 */
 208static int dasd_state_known_to_new(struct dasd_device *device)
 209{
 210        /* Disable extended error reporting for this device. */
 211        dasd_eer_disable(device);
 212        device->state = DASD_STATE_NEW;
 213
 214        if (device->block)
 215                dasd_free_queue(device->block);
 216
 217        /* Give up reference we took in dasd_state_new_to_known. */
 218        dasd_put_device(device);
 219        return 0;
 220}
 221
 222static struct dentry *dasd_debugfs_setup(const char *name,
 223                                         struct dentry *base_dentry)
 224{
 225        struct dentry *pde;
 226
 227        if (!base_dentry)
 228                return NULL;
 229        pde = debugfs_create_dir(name, base_dentry);
 230        if (!pde || IS_ERR(pde))
 231                return NULL;
 232        return pde;
 233}
 234
 235/*
 236 * Request the irq line for the device.
 237 */
 238static int dasd_state_known_to_basic(struct dasd_device *device)
 239{
 240        struct dasd_block *block = device->block;
 241        int rc = 0;
 242
 243        /* Allocate and register gendisk structure. */
 244        if (block) {
 245                rc = dasd_gendisk_alloc(block);
 246                if (rc)
 247                        return rc;
 248                block->debugfs_dentry =
 249                        dasd_debugfs_setup(block->gdp->disk_name,
 250                                           dasd_debugfs_root_entry);
 251                dasd_profile_init(&block->profile, block->debugfs_dentry);
 252                if (dasd_global_profile_level == DASD_PROFILE_ON)
 253                        dasd_profile_on(&device->block->profile);
 254        }
 255        device->debugfs_dentry =
 256                dasd_debugfs_setup(dev_name(&device->cdev->dev),
 257                                   dasd_debugfs_root_entry);
 258        dasd_profile_init(&device->profile, device->debugfs_dentry);
 259        dasd_hosts_init(device->debugfs_dentry, device);
 260
 261        /* register 'device' debug area, used for all DBF_DEV_XXX calls */
 262        device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
 263                                            8 * sizeof(long));
 264        debug_register_view(device->debug_area, &debug_sprintf_view);
 265        debug_set_level(device->debug_area, DBF_WARNING);
 266        DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
 267
 268        device->state = DASD_STATE_BASIC;
 269
 270        return rc;
 271}
 272
 273/*
 274 * Release the irq line for the device. Terminate any running i/o.
 275 */
 276static int dasd_state_basic_to_known(struct dasd_device *device)
 277{
 278        int rc;
 279
 280        if (device->discipline->basic_to_known) {
 281                rc = device->discipline->basic_to_known(device);
 282                if (rc)
 283                        return rc;
 284        }
 285
 286        if (device->block) {
 287                dasd_profile_exit(&device->block->profile);
 288                debugfs_remove(device->block->debugfs_dentry);
 289                dasd_gendisk_free(device->block);
 290                dasd_block_clear_timer(device->block);
 291        }
 292        rc = dasd_flush_device_queue(device);
 293        if (rc)
 294                return rc;
 295        dasd_device_clear_timer(device);
 296        dasd_profile_exit(&device->profile);
 297        dasd_hosts_exit(device);
 298        debugfs_remove(device->debugfs_dentry);
 299        DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
 300        if (device->debug_area != NULL) {
 301                debug_unregister(device->debug_area);
 302                device->debug_area = NULL;
 303        }
 304        device->state = DASD_STATE_KNOWN;
 305        return 0;
 306}
 307
 308/*
 309 * Do the initial analysis. The do_analysis function may return
 310 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
 311 * until the discipline decides to continue the startup sequence
 312 * by calling the function dasd_change_state. The eckd disciplines
 313 * uses this to start a ccw that detects the format. The completion
 314 * interrupt for this detection ccw uses the kernel event daemon to
 315 * trigger the call to dasd_change_state. All this is done in the
 316 * discipline code, see dasd_eckd.c.
 317 * After the analysis ccw is done (do_analysis returned 0) the block
 318 * device is setup.
 319 * In case the analysis returns an error, the device setup is stopped
 320 * (a fake disk was already added to allow formatting).
 321 */
 322static int dasd_state_basic_to_ready(struct dasd_device *device)
 323{
 324        int rc;
 325        struct dasd_block *block;
 326        struct gendisk *disk;
 327
 328        rc = 0;
 329        block = device->block;
 330        /* make disk known with correct capacity */
 331        if (block) {
 332                if (block->base->discipline->do_analysis != NULL)
 333                        rc = block->base->discipline->do_analysis(block);
 334                if (rc) {
 335                        if (rc != -EAGAIN) {
 336                                device->state = DASD_STATE_UNFMT;
 337                                disk = device->block->gdp;
 338                                kobject_uevent(&disk_to_dev(disk)->kobj,
 339                                               KOBJ_CHANGE);
 340                                goto out;
 341                        }
 342                        return rc;
 343                }
 344                dasd_setup_queue(block);
 345                set_capacity(block->gdp,
 346                             block->blocks << block->s2b_shift);
 347                device->state = DASD_STATE_READY;
 348                rc = dasd_scan_partitions(block);
 349                if (rc) {
 350                        device->state = DASD_STATE_BASIC;
 351                        return rc;
 352                }
 353        } else {
 354                device->state = DASD_STATE_READY;
 355        }
 356out:
 357        if (device->discipline->basic_to_ready)
 358                rc = device->discipline->basic_to_ready(device);
 359        return rc;
 360}
 361
 362static inline
 363int _wait_for_empty_queues(struct dasd_device *device)
 364{
 365        if (device->block)
 366                return list_empty(&device->ccw_queue) &&
 367                        list_empty(&device->block->ccw_queue);
 368        else
 369                return list_empty(&device->ccw_queue);
 370}
 371
 372/*
 373 * Remove device from block device layer. Destroy dirty buffers.
 374 * Forget format information. Check if the target level is basic
 375 * and if it is create fake disk for formatting.
 376 */
 377static int dasd_state_ready_to_basic(struct dasd_device *device)
 378{
 379        int rc;
 380
 381        device->state = DASD_STATE_BASIC;
 382        if (device->block) {
 383                struct dasd_block *block = device->block;
 384                rc = dasd_flush_block_queue(block);
 385                if (rc) {
 386                        device->state = DASD_STATE_READY;
 387                        return rc;
 388                }
 389                dasd_destroy_partitions(block);
 390                block->blocks = 0;
 391                block->bp_block = 0;
 392                block->s2b_shift = 0;
 393        }
 394        return 0;
 395}
 396
 397/*
 398 * Back to basic.
 399 */
 400static int dasd_state_unfmt_to_basic(struct dasd_device *device)
 401{
 402        device->state = DASD_STATE_BASIC;
 403        return 0;
 404}
 405
 406/*
 407 * Make the device online and schedule the bottom half to start
 408 * the requeueing of requests from the linux request queue to the
 409 * ccw queue.
 410 */
 411static int
 412dasd_state_ready_to_online(struct dasd_device * device)
 413{
 414        struct gendisk *disk;
 415        struct disk_part_iter piter;
 416        struct hd_struct *part;
 417
 418        device->state = DASD_STATE_ONLINE;
 419        if (device->block) {
 420                dasd_schedule_block_bh(device->block);
 421                if ((device->features & DASD_FEATURE_USERAW)) {
 422                        disk = device->block->gdp;
 423                        kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
 424                        return 0;
 425                }
 426                disk = device->block->bdev->bd_disk;
 427                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 428                while ((part = disk_part_iter_next(&piter)))
 429                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 430                disk_part_iter_exit(&piter);
 431        }
 432        return 0;
 433}
 434
 435/*
 436 * Stop the requeueing of requests again.
 437 */
 438static int dasd_state_online_to_ready(struct dasd_device *device)
 439{
 440        int rc;
 441        struct gendisk *disk;
 442        struct disk_part_iter piter;
 443        struct hd_struct *part;
 444
 445        if (device->discipline->online_to_ready) {
 446                rc = device->discipline->online_to_ready(device);
 447                if (rc)
 448                        return rc;
 449        }
 450
 451        device->state = DASD_STATE_READY;
 452        if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
 453                disk = device->block->bdev->bd_disk;
 454                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 455                while ((part = disk_part_iter_next(&piter)))
 456                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 457                disk_part_iter_exit(&piter);
 458        }
 459        return 0;
 460}
 461
 462/*
 463 * Device startup state changes.
 464 */
 465static int dasd_increase_state(struct dasd_device *device)
 466{
 467        int rc;
 468
 469        rc = 0;
 470        if (device->state == DASD_STATE_NEW &&
 471            device->target >= DASD_STATE_KNOWN)
 472                rc = dasd_state_new_to_known(device);
 473
 474        if (!rc &&
 475            device->state == DASD_STATE_KNOWN &&
 476            device->target >= DASD_STATE_BASIC)
 477                rc = dasd_state_known_to_basic(device);
 478
 479        if (!rc &&
 480            device->state == DASD_STATE_BASIC &&
 481            device->target >= DASD_STATE_READY)
 482                rc = dasd_state_basic_to_ready(device);
 483
 484        if (!rc &&
 485            device->state == DASD_STATE_UNFMT &&
 486            device->target > DASD_STATE_UNFMT)
 487                rc = -EPERM;
 488
 489        if (!rc &&
 490            device->state == DASD_STATE_READY &&
 491            device->target >= DASD_STATE_ONLINE)
 492                rc = dasd_state_ready_to_online(device);
 493
 494        return rc;
 495}
 496
 497/*
 498 * Device shutdown state changes.
 499 */
 500static int dasd_decrease_state(struct dasd_device *device)
 501{
 502        int rc;
 503
 504        rc = 0;
 505        if (device->state == DASD_STATE_ONLINE &&
 506            device->target <= DASD_STATE_READY)
 507                rc = dasd_state_online_to_ready(device);
 508
 509        if (!rc &&
 510            device->state == DASD_STATE_READY &&
 511            device->target <= DASD_STATE_BASIC)
 512                rc = dasd_state_ready_to_basic(device);
 513
 514        if (!rc &&
 515            device->state == DASD_STATE_UNFMT &&
 516            device->target <= DASD_STATE_BASIC)
 517                rc = dasd_state_unfmt_to_basic(device);
 518
 519        if (!rc &&
 520            device->state == DASD_STATE_BASIC &&
 521            device->target <= DASD_STATE_KNOWN)
 522                rc = dasd_state_basic_to_known(device);
 523
 524        if (!rc &&
 525            device->state == DASD_STATE_KNOWN &&
 526            device->target <= DASD_STATE_NEW)
 527                rc = dasd_state_known_to_new(device);
 528
 529        return rc;
 530}
 531
 532/*
 533 * This is the main startup/shutdown routine.
 534 */
 535static void dasd_change_state(struct dasd_device *device)
 536{
 537        int rc;
 538
 539        if (device->state == device->target)
 540                /* Already where we want to go today... */
 541                return;
 542        if (device->state < device->target)
 543                rc = dasd_increase_state(device);
 544        else
 545                rc = dasd_decrease_state(device);
 546        if (rc == -EAGAIN)
 547                return;
 548        if (rc)
 549                device->target = device->state;
 550
 551        /* let user-space know that the device status changed */
 552        kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
 553
 554        if (device->state == device->target)
 555                wake_up(&dasd_init_waitq);
 556}
 557
 558/*
 559 * Kick starter for devices that did not complete the startup/shutdown
 560 * procedure or were sleeping because of a pending state.
 561 * dasd_kick_device will schedule a call do do_kick_device to the kernel
 562 * event daemon.
 563 */
 564static void do_kick_device(struct work_struct *work)
 565{
 566        struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
 567        mutex_lock(&device->state_mutex);
 568        dasd_change_state(device);
 569        mutex_unlock(&device->state_mutex);
 570        dasd_schedule_device_bh(device);
 571        dasd_put_device(device);
 572}
 573
 574void dasd_kick_device(struct dasd_device *device)
 575{
 576        dasd_get_device(device);
 577        /* queue call to dasd_kick_device to the kernel event daemon. */
 578        if (!schedule_work(&device->kick_work))
 579                dasd_put_device(device);
 580}
 581EXPORT_SYMBOL(dasd_kick_device);
 582
 583/*
 584 * dasd_reload_device will schedule a call do do_reload_device to the kernel
 585 * event daemon.
 586 */
 587static void do_reload_device(struct work_struct *work)
 588{
 589        struct dasd_device *device = container_of(work, struct dasd_device,
 590                                                  reload_device);
 591        device->discipline->reload(device);
 592        dasd_put_device(device);
 593}
 594
 595void dasd_reload_device(struct dasd_device *device)
 596{
 597        dasd_get_device(device);
 598        /* queue call to dasd_reload_device to the kernel event daemon. */
 599        if (!schedule_work(&device->reload_device))
 600                dasd_put_device(device);
 601}
 602EXPORT_SYMBOL(dasd_reload_device);
 603
 604/*
 605 * dasd_restore_device will schedule a call do do_restore_device to the kernel
 606 * event daemon.
 607 */
 608static void do_restore_device(struct work_struct *work)
 609{
 610        struct dasd_device *device = container_of(work, struct dasd_device,
 611                                                  restore_device);
 612        device->cdev->drv->restore(device->cdev);
 613        dasd_put_device(device);
 614}
 615
 616void dasd_restore_device(struct dasd_device *device)
 617{
 618        dasd_get_device(device);
 619        /* queue call to dasd_restore_device to the kernel event daemon. */
 620        if (!schedule_work(&device->restore_device))
 621                dasd_put_device(device);
 622}
 623
 624/*
 625 * Set the target state for a device and starts the state change.
 626 */
 627void dasd_set_target_state(struct dasd_device *device, int target)
 628{
 629        dasd_get_device(device);
 630        mutex_lock(&device->state_mutex);
 631        /* If we are in probeonly mode stop at DASD_STATE_READY. */
 632        if (dasd_probeonly && target > DASD_STATE_READY)
 633                target = DASD_STATE_READY;
 634        if (device->target != target) {
 635                if (device->state == target)
 636                        wake_up(&dasd_init_waitq);
 637                device->target = target;
 638        }
 639        if (device->state != device->target)
 640                dasd_change_state(device);
 641        mutex_unlock(&device->state_mutex);
 642        dasd_put_device(device);
 643}
 644EXPORT_SYMBOL(dasd_set_target_state);
 645
 646/*
 647 * Enable devices with device numbers in [from..to].
 648 */
 649static inline int _wait_for_device(struct dasd_device *device)
 650{
 651        return (device->state == device->target);
 652}
 653
 654void dasd_enable_device(struct dasd_device *device)
 655{
 656        dasd_set_target_state(device, DASD_STATE_ONLINE);
 657        if (device->state <= DASD_STATE_KNOWN)
 658                /* No discipline for device found. */
 659                dasd_set_target_state(device, DASD_STATE_NEW);
 660        /* Now wait for the devices to come up. */
 661        wait_event(dasd_init_waitq, _wait_for_device(device));
 662
 663        dasd_reload_device(device);
 664        if (device->discipline->kick_validate)
 665                device->discipline->kick_validate(device);
 666}
 667EXPORT_SYMBOL(dasd_enable_device);
 668
 669/*
 670 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
 671 */
 672
 673unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
 674
 675#ifdef CONFIG_DASD_PROFILE
 676struct dasd_profile dasd_global_profile = {
 677        .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
 678};
 679static struct dentry *dasd_debugfs_global_entry;
 680
 681/*
 682 * Add profiling information for cqr before execution.
 683 */
 684static void dasd_profile_start(struct dasd_block *block,
 685                               struct dasd_ccw_req *cqr,
 686                               struct request *req)
 687{
 688        struct list_head *l;
 689        unsigned int counter;
 690        struct dasd_device *device;
 691
 692        /* count the length of the chanq for statistics */
 693        counter = 0;
 694        if (dasd_global_profile_level || block->profile.data)
 695                list_for_each(l, &block->ccw_queue)
 696                        if (++counter >= 31)
 697                                break;
 698
 699        spin_lock(&dasd_global_profile.lock);
 700        if (dasd_global_profile.data) {
 701                dasd_global_profile.data->dasd_io_nr_req[counter]++;
 702                if (rq_data_dir(req) == READ)
 703                        dasd_global_profile.data->dasd_read_nr_req[counter]++;
 704        }
 705        spin_unlock(&dasd_global_profile.lock);
 706
 707        spin_lock(&block->profile.lock);
 708        if (block->profile.data) {
 709                block->profile.data->dasd_io_nr_req[counter]++;
 710                if (rq_data_dir(req) == READ)
 711                        block->profile.data->dasd_read_nr_req[counter]++;
 712        }
 713        spin_unlock(&block->profile.lock);
 714
 715        /*
 716         * We count the request for the start device, even though it may run on
 717         * some other device due to error recovery. This way we make sure that
 718         * we count each request only once.
 719         */
 720        device = cqr->startdev;
 721        if (device->profile.data) {
 722                counter = 1; /* request is not yet queued on the start device */
 723                list_for_each(l, &device->ccw_queue)
 724                        if (++counter >= 31)
 725                                break;
 726        }
 727        spin_lock(&device->profile.lock);
 728        if (device->profile.data) {
 729                device->profile.data->dasd_io_nr_req[counter]++;
 730                if (rq_data_dir(req) == READ)
 731                        device->profile.data->dasd_read_nr_req[counter]++;
 732        }
 733        spin_unlock(&device->profile.lock);
 734}
 735
 736/*
 737 * Add profiling information for cqr after execution.
 738 */
 739
 740#define dasd_profile_counter(value, index)                         \
 741{                                                                  \
 742        for (index = 0; index < 31 && value >> (2+index); index++) \
 743                ;                                                  \
 744}
 745
 746static void dasd_profile_end_add_data(struct dasd_profile_info *data,
 747                                      int is_alias,
 748                                      int is_tpm,
 749                                      int is_read,
 750                                      long sectors,
 751                                      int sectors_ind,
 752                                      int tottime_ind,
 753                                      int tottimeps_ind,
 754                                      int strtime_ind,
 755                                      int irqtime_ind,
 756                                      int irqtimeps_ind,
 757                                      int endtime_ind)
 758{
 759        /* in case of an overflow, reset the whole profile */
 760        if (data->dasd_io_reqs == UINT_MAX) {
 761                        memset(data, 0, sizeof(*data));
 762                        ktime_get_real_ts64(&data->starttod);
 763        }
 764        data->dasd_io_reqs++;
 765        data->dasd_io_sects += sectors;
 766        if (is_alias)
 767                data->dasd_io_alias++;
 768        if (is_tpm)
 769                data->dasd_io_tpm++;
 770
 771        data->dasd_io_secs[sectors_ind]++;
 772        data->dasd_io_times[tottime_ind]++;
 773        data->dasd_io_timps[tottimeps_ind]++;
 774        data->dasd_io_time1[strtime_ind]++;
 775        data->dasd_io_time2[irqtime_ind]++;
 776        data->dasd_io_time2ps[irqtimeps_ind]++;
 777        data->dasd_io_time3[endtime_ind]++;
 778
 779        if (is_read) {
 780                data->dasd_read_reqs++;
 781                data->dasd_read_sects += sectors;
 782                if (is_alias)
 783                        data->dasd_read_alias++;
 784                if (is_tpm)
 785                        data->dasd_read_tpm++;
 786                data->dasd_read_secs[sectors_ind]++;
 787                data->dasd_read_times[tottime_ind]++;
 788                data->dasd_read_time1[strtime_ind]++;
 789                data->dasd_read_time2[irqtime_ind]++;
 790                data->dasd_read_time3[endtime_ind]++;
 791        }
 792}
 793
 794static void dasd_profile_end(struct dasd_block *block,
 795                             struct dasd_ccw_req *cqr,
 796                             struct request *req)
 797{
 798        unsigned long strtime, irqtime, endtime, tottime;
 799        unsigned long tottimeps, sectors;
 800        struct dasd_device *device;
 801        int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
 802        int irqtime_ind, irqtimeps_ind, endtime_ind;
 803        struct dasd_profile_info *data;
 804
 805        device = cqr->startdev;
 806        if (!(dasd_global_profile_level ||
 807              block->profile.data ||
 808              device->profile.data))
 809                return;
 810
 811        sectors = blk_rq_sectors(req);
 812        if (!cqr->buildclk || !cqr->startclk ||
 813            !cqr->stopclk || !cqr->endclk ||
 814            !sectors)
 815                return;
 816
 817        strtime = ((cqr->startclk - cqr->buildclk) >> 12);
 818        irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
 819        endtime = ((cqr->endclk - cqr->stopclk) >> 12);
 820        tottime = ((cqr->endclk - cqr->buildclk) >> 12);
 821        tottimeps = tottime / sectors;
 822
 823        dasd_profile_counter(sectors, sectors_ind);
 824        dasd_profile_counter(tottime, tottime_ind);
 825        dasd_profile_counter(tottimeps, tottimeps_ind);
 826        dasd_profile_counter(strtime, strtime_ind);
 827        dasd_profile_counter(irqtime, irqtime_ind);
 828        dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
 829        dasd_profile_counter(endtime, endtime_ind);
 830
 831        spin_lock(&dasd_global_profile.lock);
 832        if (dasd_global_profile.data) {
 833                data = dasd_global_profile.data;
 834                data->dasd_sum_times += tottime;
 835                data->dasd_sum_time_str += strtime;
 836                data->dasd_sum_time_irq += irqtime;
 837                data->dasd_sum_time_end += endtime;
 838                dasd_profile_end_add_data(dasd_global_profile.data,
 839                                          cqr->startdev != block->base,
 840                                          cqr->cpmode == 1,
 841                                          rq_data_dir(req) == READ,
 842                                          sectors, sectors_ind, tottime_ind,
 843                                          tottimeps_ind, strtime_ind,
 844                                          irqtime_ind, irqtimeps_ind,
 845                                          endtime_ind);
 846        }
 847        spin_unlock(&dasd_global_profile.lock);
 848
 849        spin_lock(&block->profile.lock);
 850        if (block->profile.data) {
 851                data = block->profile.data;
 852                data->dasd_sum_times += tottime;
 853                data->dasd_sum_time_str += strtime;
 854                data->dasd_sum_time_irq += irqtime;
 855                data->dasd_sum_time_end += endtime;
 856                dasd_profile_end_add_data(block->profile.data,
 857                                          cqr->startdev != block->base,
 858                                          cqr->cpmode == 1,
 859                                          rq_data_dir(req) == READ,
 860                                          sectors, sectors_ind, tottime_ind,
 861                                          tottimeps_ind, strtime_ind,
 862                                          irqtime_ind, irqtimeps_ind,
 863                                          endtime_ind);
 864        }
 865        spin_unlock(&block->profile.lock);
 866
 867        spin_lock(&device->profile.lock);
 868        if (device->profile.data) {
 869                data = device->profile.data;
 870                data->dasd_sum_times += tottime;
 871                data->dasd_sum_time_str += strtime;
 872                data->dasd_sum_time_irq += irqtime;
 873                data->dasd_sum_time_end += endtime;
 874                dasd_profile_end_add_data(device->profile.data,
 875                                          cqr->startdev != block->base,
 876                                          cqr->cpmode == 1,
 877                                          rq_data_dir(req) == READ,
 878                                          sectors, sectors_ind, tottime_ind,
 879                                          tottimeps_ind, strtime_ind,
 880                                          irqtime_ind, irqtimeps_ind,
 881                                          endtime_ind);
 882        }
 883        spin_unlock(&device->profile.lock);
 884}
 885
 886void dasd_profile_reset(struct dasd_profile *profile)
 887{
 888        struct dasd_profile_info *data;
 889
 890        spin_lock_bh(&profile->lock);
 891        data = profile->data;
 892        if (!data) {
 893                spin_unlock_bh(&profile->lock);
 894                return;
 895        }
 896        memset(data, 0, sizeof(*data));
 897        ktime_get_real_ts64(&data->starttod);
 898        spin_unlock_bh(&profile->lock);
 899}
 900
 901int dasd_profile_on(struct dasd_profile *profile)
 902{
 903        struct dasd_profile_info *data;
 904
 905        data = kzalloc(sizeof(*data), GFP_KERNEL);
 906        if (!data)
 907                return -ENOMEM;
 908        spin_lock_bh(&profile->lock);
 909        if (profile->data) {
 910                spin_unlock_bh(&profile->lock);
 911                kfree(data);
 912                return 0;
 913        }
 914        ktime_get_real_ts64(&data->starttod);
 915        profile->data = data;
 916        spin_unlock_bh(&profile->lock);
 917        return 0;
 918}
 919
 920void dasd_profile_off(struct dasd_profile *profile)
 921{
 922        spin_lock_bh(&profile->lock);
 923        kfree(profile->data);
 924        profile->data = NULL;
 925        spin_unlock_bh(&profile->lock);
 926}
 927
 928char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
 929{
 930        char *buffer;
 931
 932        buffer = vmalloc(user_len + 1);
 933        if (buffer == NULL)
 934                return ERR_PTR(-ENOMEM);
 935        if (copy_from_user(buffer, user_buf, user_len) != 0) {
 936                vfree(buffer);
 937                return ERR_PTR(-EFAULT);
 938        }
 939        /* got the string, now strip linefeed. */
 940        if (buffer[user_len - 1] == '\n')
 941                buffer[user_len - 1] = 0;
 942        else
 943                buffer[user_len] = 0;
 944        return buffer;
 945}
 946
 947static ssize_t dasd_stats_write(struct file *file,
 948                                const char __user *user_buf,
 949                                size_t user_len, loff_t *pos)
 950{
 951        char *buffer, *str;
 952        int rc;
 953        struct seq_file *m = (struct seq_file *)file->private_data;
 954        struct dasd_profile *prof = m->private;
 955
 956        if (user_len > 65536)
 957                user_len = 65536;
 958        buffer = dasd_get_user_string(user_buf, user_len);
 959        if (IS_ERR(buffer))
 960                return PTR_ERR(buffer);
 961
 962        str = skip_spaces(buffer);
 963        rc = user_len;
 964        if (strncmp(str, "reset", 5) == 0) {
 965                dasd_profile_reset(prof);
 966        } else if (strncmp(str, "on", 2) == 0) {
 967                rc = dasd_profile_on(prof);
 968                if (rc)
 969                        goto out;
 970                rc = user_len;
 971                if (prof == &dasd_global_profile) {
 972                        dasd_profile_reset(prof);
 973                        dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
 974                }
 975        } else if (strncmp(str, "off", 3) == 0) {
 976                if (prof == &dasd_global_profile)
 977                        dasd_global_profile_level = DASD_PROFILE_OFF;
 978                dasd_profile_off(prof);
 979        } else
 980                rc = -EINVAL;
 981out:
 982        vfree(buffer);
 983        return rc;
 984}
 985
 986static void dasd_stats_array(struct seq_file *m, unsigned int *array)
 987{
 988        int i;
 989
 990        for (i = 0; i < 32; i++)
 991                seq_printf(m, "%u ", array[i]);
 992        seq_putc(m, '\n');
 993}
 994
 995static void dasd_stats_seq_print(struct seq_file *m,
 996                                 struct dasd_profile_info *data)
 997{
 998        seq_printf(m, "start_time %lld.%09ld\n",
 999                   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1000        seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
1001        seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
1002        seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
1003        seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
1004        seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
1005                   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
1006        seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
1007                   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
1008        seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
1009                   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
1010        seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
1011                   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
1012        seq_puts(m, "histogram_sectors ");
1013        dasd_stats_array(m, data->dasd_io_secs);
1014        seq_puts(m, "histogram_io_times ");
1015        dasd_stats_array(m, data->dasd_io_times);
1016        seq_puts(m, "histogram_io_times_weighted ");
1017        dasd_stats_array(m, data->dasd_io_timps);
1018        seq_puts(m, "histogram_time_build_to_ssch ");
1019        dasd_stats_array(m, data->dasd_io_time1);
1020        seq_puts(m, "histogram_time_ssch_to_irq ");
1021        dasd_stats_array(m, data->dasd_io_time2);
1022        seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1023        dasd_stats_array(m, data->dasd_io_time2ps);
1024        seq_puts(m, "histogram_time_irq_to_end ");
1025        dasd_stats_array(m, data->dasd_io_time3);
1026        seq_puts(m, "histogram_ccw_queue_length ");
1027        dasd_stats_array(m, data->dasd_io_nr_req);
1028        seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1029        seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1030        seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1031        seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1032        seq_puts(m, "histogram_read_sectors ");
1033        dasd_stats_array(m, data->dasd_read_secs);
1034        seq_puts(m, "histogram_read_times ");
1035        dasd_stats_array(m, data->dasd_read_times);
1036        seq_puts(m, "histogram_read_time_build_to_ssch ");
1037        dasd_stats_array(m, data->dasd_read_time1);
1038        seq_puts(m, "histogram_read_time_ssch_to_irq ");
1039        dasd_stats_array(m, data->dasd_read_time2);
1040        seq_puts(m, "histogram_read_time_irq_to_end ");
1041        dasd_stats_array(m, data->dasd_read_time3);
1042        seq_puts(m, "histogram_read_ccw_queue_length ");
1043        dasd_stats_array(m, data->dasd_read_nr_req);
1044}
1045
1046static int dasd_stats_show(struct seq_file *m, void *v)
1047{
1048        struct dasd_profile *profile;
1049        struct dasd_profile_info *data;
1050
1051        profile = m->private;
1052        spin_lock_bh(&profile->lock);
1053        data = profile->data;
1054        if (!data) {
1055                spin_unlock_bh(&profile->lock);
1056                seq_puts(m, "disabled\n");
1057                return 0;
1058        }
1059        dasd_stats_seq_print(m, data);
1060        spin_unlock_bh(&profile->lock);
1061        return 0;
1062}
1063
1064static int dasd_stats_open(struct inode *inode, struct file *file)
1065{
1066        struct dasd_profile *profile = inode->i_private;
1067        return single_open(file, dasd_stats_show, profile);
1068}
1069
1070static const struct file_operations dasd_stats_raw_fops = {
1071        .owner          = THIS_MODULE,
1072        .open           = dasd_stats_open,
1073        .read           = seq_read,
1074        .llseek         = seq_lseek,
1075        .release        = single_release,
1076        .write          = dasd_stats_write,
1077};
1078
1079static void dasd_profile_init(struct dasd_profile *profile,
1080                              struct dentry *base_dentry)
1081{
1082        umode_t mode;
1083        struct dentry *pde;
1084
1085        if (!base_dentry)
1086                return;
1087        profile->dentry = NULL;
1088        profile->data = NULL;
1089        mode = (S_IRUSR | S_IWUSR | S_IFREG);
1090        pde = debugfs_create_file("statistics", mode, base_dentry,
1091                                  profile, &dasd_stats_raw_fops);
1092        if (pde && !IS_ERR(pde))
1093                profile->dentry = pde;
1094        return;
1095}
1096
1097static void dasd_profile_exit(struct dasd_profile *profile)
1098{
1099        dasd_profile_off(profile);
1100        debugfs_remove(profile->dentry);
1101        profile->dentry = NULL;
1102}
1103
1104static void dasd_statistics_removeroot(void)
1105{
1106        dasd_global_profile_level = DASD_PROFILE_OFF;
1107        dasd_profile_exit(&dasd_global_profile);
1108        debugfs_remove(dasd_debugfs_global_entry);
1109        debugfs_remove(dasd_debugfs_root_entry);
1110}
1111
1112static void dasd_statistics_createroot(void)
1113{
1114        struct dentry *pde;
1115
1116        dasd_debugfs_root_entry = NULL;
1117        pde = debugfs_create_dir("dasd", NULL);
1118        if (!pde || IS_ERR(pde))
1119                goto error;
1120        dasd_debugfs_root_entry = pde;
1121        pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1122        if (!pde || IS_ERR(pde))
1123                goto error;
1124        dasd_debugfs_global_entry = pde;
1125        dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1126        return;
1127
1128error:
1129        DBF_EVENT(DBF_ERR, "%s",
1130                  "Creation of the dasd debugfs interface failed");
1131        dasd_statistics_removeroot();
1132        return;
1133}
1134
1135#else
1136#define dasd_profile_start(block, cqr, req) do {} while (0)
1137#define dasd_profile_end(block, cqr, req) do {} while (0)
1138
1139static void dasd_statistics_createroot(void)
1140{
1141        return;
1142}
1143
1144static void dasd_statistics_removeroot(void)
1145{
1146        return;
1147}
1148
1149int dasd_stats_generic_show(struct seq_file *m, void *v)
1150{
1151        seq_puts(m, "Statistics are not activated in this kernel\n");
1152        return 0;
1153}
1154
1155static void dasd_profile_init(struct dasd_profile *profile,
1156                              struct dentry *base_dentry)
1157{
1158        return;
1159}
1160
1161static void dasd_profile_exit(struct dasd_profile *profile)
1162{
1163        return;
1164}
1165
1166int dasd_profile_on(struct dasd_profile *profile)
1167{
1168        return 0;
1169}
1170
1171#endif                          /* CONFIG_DASD_PROFILE */
1172
1173static int dasd_hosts_show(struct seq_file *m, void *v)
1174{
1175        struct dasd_device *device;
1176        int rc = -EOPNOTSUPP;
1177
1178        device = m->private;
1179        dasd_get_device(device);
1180
1181        if (device->discipline->hosts_print)
1182                rc = device->discipline->hosts_print(device, m);
1183
1184        dasd_put_device(device);
1185        return rc;
1186}
1187
1188static int dasd_hosts_open(struct inode *inode, struct file *file)
1189{
1190        struct dasd_device *device = inode->i_private;
1191
1192        return single_open(file, dasd_hosts_show, device);
1193}
1194
1195static const struct file_operations dasd_hosts_fops = {
1196        .owner          = THIS_MODULE,
1197        .open           = dasd_hosts_open,
1198        .read           = seq_read,
1199        .llseek         = seq_lseek,
1200        .release        = single_release,
1201};
1202
1203static void dasd_hosts_exit(struct dasd_device *device)
1204{
1205        debugfs_remove(device->hosts_dentry);
1206        device->hosts_dentry = NULL;
1207}
1208
1209static void dasd_hosts_init(struct dentry *base_dentry,
1210                            struct dasd_device *device)
1211{
1212        struct dentry *pde;
1213        umode_t mode;
1214
1215        if (!base_dentry)
1216                return;
1217
1218        mode = S_IRUSR | S_IFREG;
1219        pde = debugfs_create_file("host_access_list", mode, base_dentry,
1220                                  device, &dasd_hosts_fops);
1221        if (pde && !IS_ERR(pde))
1222                device->hosts_dentry = pde;
1223}
1224
1225/*
1226 * Allocate memory for a channel program with 'cplength' channel
1227 * command words and 'datasize' additional space. There are two
1228 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
1229 * memory and 2) dasd_smalloc_request uses the static ccw memory
1230 * that gets allocated for each device.
1231 */
1232struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
1233                                          int datasize,
1234                                          struct dasd_device *device)
1235{
1236        struct dasd_ccw_req *cqr;
1237
1238        /* Sanity checks */
1239        BUG_ON(datasize > PAGE_SIZE ||
1240             (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
1241
1242        cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
1243        if (cqr == NULL)
1244                return ERR_PTR(-ENOMEM);
1245        cqr->cpaddr = NULL;
1246        if (cplength > 0) {
1247                cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
1248                                      GFP_ATOMIC | GFP_DMA);
1249                if (cqr->cpaddr == NULL) {
1250                        kfree(cqr);
1251                        return ERR_PTR(-ENOMEM);
1252                }
1253        }
1254        cqr->data = NULL;
1255        if (datasize > 0) {
1256                cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
1257                if (cqr->data == NULL) {
1258                        kfree(cqr->cpaddr);
1259                        kfree(cqr);
1260                        return ERR_PTR(-ENOMEM);
1261                }
1262        }
1263        cqr->magic =  magic;
1264        set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1265        dasd_get_device(device);
1266        return cqr;
1267}
1268EXPORT_SYMBOL(dasd_kmalloc_request);
1269
1270struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
1271                                          int datasize,
1272                                          struct dasd_device *device)
1273{
1274        unsigned long flags;
1275        struct dasd_ccw_req *cqr;
1276        char *data;
1277        int size;
1278
1279        size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
1280        if (cplength > 0)
1281                size += cplength * sizeof(struct ccw1);
1282        if (datasize > 0)
1283                size += datasize;
1284        spin_lock_irqsave(&device->mem_lock, flags);
1285        cqr = (struct dasd_ccw_req *)
1286                dasd_alloc_chunk(&device->ccw_chunks, size);
1287        spin_unlock_irqrestore(&device->mem_lock, flags);
1288        if (cqr == NULL)
1289                return ERR_PTR(-ENOMEM);
1290        memset(cqr, 0, sizeof(struct dasd_ccw_req));
1291        data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
1292        cqr->cpaddr = NULL;
1293        if (cplength > 0) {
1294                cqr->cpaddr = (struct ccw1 *) data;
1295                data += cplength*sizeof(struct ccw1);
1296                memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
1297        }
1298        cqr->data = NULL;
1299        if (datasize > 0) {
1300                cqr->data = data;
1301                memset(cqr->data, 0, datasize);
1302        }
1303        cqr->magic = magic;
1304        set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1305        dasd_get_device(device);
1306        return cqr;
1307}
1308EXPORT_SYMBOL(dasd_smalloc_request);
1309
1310/*
1311 * Free memory of a channel program. This function needs to free all the
1312 * idal lists that might have been created by dasd_set_cda and the
1313 * struct dasd_ccw_req itself.
1314 */
1315void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1316{
1317        struct ccw1 *ccw;
1318
1319        /* Clear any idals used for the request. */
1320        ccw = cqr->cpaddr;
1321        do {
1322                clear_normalized_cda(ccw);
1323        } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
1324        kfree(cqr->cpaddr);
1325        kfree(cqr->data);
1326        kfree(cqr);
1327        dasd_put_device(device);
1328}
1329EXPORT_SYMBOL(dasd_kfree_request);
1330
1331void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1332{
1333        unsigned long flags;
1334
1335        spin_lock_irqsave(&device->mem_lock, flags);
1336        dasd_free_chunk(&device->ccw_chunks, cqr);
1337        spin_unlock_irqrestore(&device->mem_lock, flags);
1338        dasd_put_device(device);
1339}
1340EXPORT_SYMBOL(dasd_sfree_request);
1341
1342/*
1343 * Check discipline magic in cqr.
1344 */
1345static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1346{
1347        struct dasd_device *device;
1348
1349        if (cqr == NULL)
1350                return -EINVAL;
1351        device = cqr->startdev;
1352        if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1353                DBF_DEV_EVENT(DBF_WARNING, device,
1354                            " dasd_ccw_req 0x%08x magic doesn't match"
1355                            " discipline 0x%08x",
1356                            cqr->magic,
1357                            *(unsigned int *) device->discipline->name);
1358                return -EINVAL;
1359        }
1360        return 0;
1361}
1362
1363/*
1364 * Terminate the current i/o and set the request to clear_pending.
1365 * Timer keeps device runnig.
1366 * ccw_device_clear can fail if the i/o subsystem
1367 * is in a bad mood.
1368 */
1369int dasd_term_IO(struct dasd_ccw_req *cqr)
1370{
1371        struct dasd_device *device;
1372        int retries, rc;
1373        char errorstring[ERRORLENGTH];
1374
1375        /* Check the cqr */
1376        rc = dasd_check_cqr(cqr);
1377        if (rc)
1378                return rc;
1379        retries = 0;
1380        device = (struct dasd_device *) cqr->startdev;
1381        while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1382                rc = ccw_device_clear(device->cdev, (long) cqr);
1383                switch (rc) {
1384                case 0: /* termination successful */
1385                        cqr->status = DASD_CQR_CLEAR_PENDING;
1386                        cqr->stopclk = get_tod_clock();
1387                        cqr->starttime = 0;
1388                        DBF_DEV_EVENT(DBF_DEBUG, device,
1389                                      "terminate cqr %p successful",
1390                                      cqr);
1391                        break;
1392                case -ENODEV:
1393                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
1394                                      "device gone, retry");
1395                        break;
1396                case -EINVAL:
1397                        /*
1398                         * device not valid so no I/O could be running
1399                         * handle CQR as termination successful
1400                         */
1401                        cqr->status = DASD_CQR_CLEARED;
1402                        cqr->stopclk = get_tod_clock();
1403                        cqr->starttime = 0;
1404                        /* no retries for invalid devices */
1405                        cqr->retries = -1;
1406                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
1407                                      "EINVAL, handle as terminated");
1408                        /* fake rc to success */
1409                        rc = 0;
1410                        break;
1411                default:
1412                        /* internal error 10 - unknown rc*/
1413                        snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1414                        dev_err(&device->cdev->dev, "An error occurred in the "
1415                                "DASD device driver, reason=%s\n", errorstring);
1416                        BUG();
1417                        break;
1418                }
1419                retries++;
1420        }
1421        dasd_schedule_device_bh(device);
1422        return rc;
1423}
1424EXPORT_SYMBOL(dasd_term_IO);
1425
1426/*
1427 * Start the i/o. This start_IO can fail if the channel is really busy.
1428 * In that case set up a timer to start the request later.
1429 */
1430int dasd_start_IO(struct dasd_ccw_req *cqr)
1431{
1432        struct dasd_device *device;
1433        int rc;
1434        char errorstring[ERRORLENGTH];
1435
1436        /* Check the cqr */
1437        rc = dasd_check_cqr(cqr);
1438        if (rc) {
1439                cqr->intrc = rc;
1440                return rc;
1441        }
1442        device = (struct dasd_device *) cqr->startdev;
1443        if (((cqr->block &&
1444              test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1445             test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1446            !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1447                DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1448                              "because of stolen lock", cqr);
1449                cqr->status = DASD_CQR_ERROR;
1450                cqr->intrc = -EPERM;
1451                return -EPERM;
1452        }
1453        if (cqr->retries < 0) {
1454                /* internal error 14 - start_IO run out of retries */
1455                sprintf(errorstring, "14 %p", cqr);
1456                dev_err(&device->cdev->dev, "An error occurred in the DASD "
1457                        "device driver, reason=%s\n", errorstring);
1458                cqr->status = DASD_CQR_ERROR;
1459                return -EIO;
1460        }
1461        cqr->startclk = get_tod_clock();
1462        cqr->starttime = jiffies;
1463        cqr->retries--;
1464        if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1465                cqr->lpm &= dasd_path_get_opm(device);
1466                if (!cqr->lpm)
1467                        cqr->lpm = dasd_path_get_opm(device);
1468        }
1469        if (cqr->cpmode == 1) {
1470                rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1471                                         (long) cqr, cqr->lpm);
1472        } else {
1473                rc = ccw_device_start(device->cdev, cqr->cpaddr,
1474                                      (long) cqr, cqr->lpm, 0);
1475        }
1476        switch (rc) {
1477        case 0:
1478                cqr->status = DASD_CQR_IN_IO;
1479                break;
1480        case -EBUSY:
1481                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1482                              "start_IO: device busy, retry later");
1483                break;
1484        case -EACCES:
1485                /* -EACCES indicates that the request used only a subset of the
1486                 * available paths and all these paths are gone. If the lpm of
1487                 * this request was only a subset of the opm (e.g. the ppm) then
1488                 * we just do a retry with all available paths.
1489                 * If we already use the full opm, something is amiss, and we
1490                 * need a full path verification.
1491                 */
1492                if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1493                        DBF_DEV_EVENT(DBF_WARNING, device,
1494                                      "start_IO: selected paths gone (%x)",
1495                                      cqr->lpm);
1496                } else if (cqr->lpm != dasd_path_get_opm(device)) {
1497                        cqr->lpm = dasd_path_get_opm(device);
1498                        DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1499                                      "start_IO: selected paths gone,"
1500                                      " retry on all paths");
1501                } else {
1502                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1503                                      "start_IO: all paths in opm gone,"
1504                                      " do path verification");
1505                        dasd_generic_last_path_gone(device);
1506                        dasd_path_no_path(device);
1507                        dasd_path_set_tbvpm(device,
1508                                          ccw_device_get_path_mask(
1509                                                  device->cdev));
1510                }
1511                break;
1512        case -ENODEV:
1513                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1514                              "start_IO: -ENODEV device gone, retry");
1515                break;
1516        case -EIO:
1517                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1518                              "start_IO: -EIO device gone, retry");
1519                break;
1520        case -EINVAL:
1521                /* most likely caused in power management context */
1522                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1523                              "start_IO: -EINVAL device currently "
1524                              "not accessible");
1525                break;
1526        default:
1527                /* internal error 11 - unknown rc */
1528                snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1529                dev_err(&device->cdev->dev,
1530                        "An error occurred in the DASD device driver, "
1531                        "reason=%s\n", errorstring);
1532                BUG();
1533                break;
1534        }
1535        cqr->intrc = rc;
1536        return rc;
1537}
1538EXPORT_SYMBOL(dasd_start_IO);
1539
1540/*
1541 * Timeout function for dasd devices. This is used for different purposes
1542 *  1) missing interrupt handler for normal operation
1543 *  2) delayed start of request where start_IO failed with -EBUSY
1544 *  3) timeout for missing state change interrupts
1545 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1546 * DASD_CQR_QUEUED for 2) and 3).
1547 */
1548static void dasd_device_timeout(struct timer_list *t)
1549{
1550        unsigned long flags;
1551        struct dasd_device *device;
1552
1553        device = from_timer(device, t, timer);
1554        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1555        /* re-activate request queue */
1556        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1557        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1558        dasd_schedule_device_bh(device);
1559}
1560
1561/*
1562 * Setup timeout for a device in jiffies.
1563 */
1564void dasd_device_set_timer(struct dasd_device *device, int expires)
1565{
1566        if (expires == 0)
1567                del_timer(&device->timer);
1568        else
1569                mod_timer(&device->timer, jiffies + expires);
1570}
1571EXPORT_SYMBOL(dasd_device_set_timer);
1572
1573/*
1574 * Clear timeout for a device.
1575 */
1576void dasd_device_clear_timer(struct dasd_device *device)
1577{
1578        del_timer(&device->timer);
1579}
1580EXPORT_SYMBOL(dasd_device_clear_timer);
1581
1582static void dasd_handle_killed_request(struct ccw_device *cdev,
1583                                       unsigned long intparm)
1584{
1585        struct dasd_ccw_req *cqr;
1586        struct dasd_device *device;
1587
1588        if (!intparm)
1589                return;
1590        cqr = (struct dasd_ccw_req *) intparm;
1591        if (cqr->status != DASD_CQR_IN_IO) {
1592                DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1593                                "invalid status in handle_killed_request: "
1594                                "%02x", cqr->status);
1595                return;
1596        }
1597
1598        device = dasd_device_from_cdev_locked(cdev);
1599        if (IS_ERR(device)) {
1600                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1601                                "unable to get device from cdev");
1602                return;
1603        }
1604
1605        if (!cqr->startdev ||
1606            device != cqr->startdev ||
1607            strncmp(cqr->startdev->discipline->ebcname,
1608                    (char *) &cqr->magic, 4)) {
1609                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1610                                "invalid device in request");
1611                dasd_put_device(device);
1612                return;
1613        }
1614
1615        /* Schedule request to be retried. */
1616        cqr->status = DASD_CQR_QUEUED;
1617
1618        dasd_device_clear_timer(device);
1619        dasd_schedule_device_bh(device);
1620        dasd_put_device(device);
1621}
1622
1623void dasd_generic_handle_state_change(struct dasd_device *device)
1624{
1625        /* First of all start sense subsystem status request. */
1626        dasd_eer_snss(device);
1627
1628        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1629        dasd_schedule_device_bh(device);
1630        if (device->block) {
1631                dasd_schedule_block_bh(device->block);
1632                if (device->block->request_queue)
1633                        blk_mq_run_hw_queues(device->block->request_queue,
1634                                             true);
1635        }
1636}
1637EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1638
1639static int dasd_check_hpf_error(struct irb *irb)
1640{
1641        return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1642            (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1643             irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1644}
1645
1646/*
1647 * Interrupt handler for "normal" ssch-io based dasd devices.
1648 */
1649void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1650                      struct irb *irb)
1651{
1652        struct dasd_ccw_req *cqr, *next;
1653        struct dasd_device *device;
1654        unsigned long now;
1655        int nrf_suppressed = 0;
1656        int fp_suppressed = 0;
1657        u8 *sense = NULL;
1658        int expires;
1659
1660        cqr = (struct dasd_ccw_req *) intparm;
1661        if (IS_ERR(irb)) {
1662                switch (PTR_ERR(irb)) {
1663                case -EIO:
1664                        if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1665                                device = cqr->startdev;
1666                                cqr->status = DASD_CQR_CLEARED;
1667                                dasd_device_clear_timer(device);
1668                                wake_up(&dasd_flush_wq);
1669                                dasd_schedule_device_bh(device);
1670                                return;
1671                        }
1672                        break;
1673                case -ETIMEDOUT:
1674                        DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1675                                        "request timed out\n", __func__);
1676                        break;
1677                default:
1678                        DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1679                                        "unknown error %ld\n", __func__,
1680                                        PTR_ERR(irb));
1681                }
1682                dasd_handle_killed_request(cdev, intparm);
1683                return;
1684        }
1685
1686        now = get_tod_clock();
1687        /* check for conditions that should be handled immediately */
1688        if (!cqr ||
1689            !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1690              scsw_cstat(&irb->scsw) == 0)) {
1691                if (cqr)
1692                        memcpy(&cqr->irb, irb, sizeof(*irb));
1693                device = dasd_device_from_cdev_locked(cdev);
1694                if (IS_ERR(device))
1695                        return;
1696                /* ignore unsolicited interrupts for DIAG discipline */
1697                if (device->discipline == dasd_diag_discipline_pointer) {
1698                        dasd_put_device(device);
1699                        return;
1700                }
1701
1702                /*
1703                 * In some cases 'File Protected' or 'No Record Found' errors
1704                 * might be expected and debug log messages for the
1705                 * corresponding interrupts shouldn't be written then.
1706                 * Check if either of the according suppress bits is set.
1707                 */
1708                sense = dasd_get_sense(irb);
1709                if (sense) {
1710                        fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1711                                test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1712                        nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1713                                test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1714                }
1715                if (!(fp_suppressed || nrf_suppressed))
1716                        device->discipline->dump_sense_dbf(device, irb, "int");
1717
1718                if (device->features & DASD_FEATURE_ERPLOG)
1719                        device->discipline->dump_sense(device, cqr, irb);
1720                device->discipline->check_for_device_change(device, cqr, irb);
1721                dasd_put_device(device);
1722        }
1723
1724        /* check for for attention message */
1725        if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1726                device = dasd_device_from_cdev_locked(cdev);
1727                if (!IS_ERR(device)) {
1728                        device->discipline->check_attention(device,
1729                                                            irb->esw.esw1.lpum);
1730                        dasd_put_device(device);
1731                }
1732        }
1733
1734        if (!cqr)
1735                return;
1736
1737        device = (struct dasd_device *) cqr->startdev;
1738        if (!device ||
1739            strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1740                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1741                                "invalid device in request");
1742                return;
1743        }
1744
1745        /* Check for clear pending */
1746        if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1747            scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1748                cqr->status = DASD_CQR_CLEARED;
1749                dasd_device_clear_timer(device);
1750                wake_up(&dasd_flush_wq);
1751                dasd_schedule_device_bh(device);
1752                return;
1753        }
1754
1755        /* check status - the request might have been killed by dyn detach */
1756        if (cqr->status != DASD_CQR_IN_IO) {
1757                DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1758                              "status %02x", dev_name(&cdev->dev), cqr->status);
1759                return;
1760        }
1761
1762        next = NULL;
1763        expires = 0;
1764        if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1765            scsw_cstat(&irb->scsw) == 0) {
1766                /* request was completed successfully */
1767                cqr->status = DASD_CQR_SUCCESS;
1768                cqr->stopclk = now;
1769                /* Start first request on queue if possible -> fast_io. */
1770                if (cqr->devlist.next != &device->ccw_queue) {
1771                        next = list_entry(cqr->devlist.next,
1772                                          struct dasd_ccw_req, devlist);
1773                }
1774        } else {  /* error */
1775                /* check for HPF error
1776                 * call discipline function to requeue all requests
1777                 * and disable HPF accordingly
1778                 */
1779                if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1780                    device->discipline->handle_hpf_error)
1781                        device->discipline->handle_hpf_error(device, irb);
1782                /*
1783                 * If we don't want complex ERP for this request, then just
1784                 * reset this and retry it in the fastpath
1785                 */
1786                if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1787                    cqr->retries > 0) {
1788                        if (cqr->lpm == dasd_path_get_opm(device))
1789                                DBF_DEV_EVENT(DBF_DEBUG, device,
1790                                              "default ERP in fastpath "
1791                                              "(%i retries left)",
1792                                              cqr->retries);
1793                        if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1794                                cqr->lpm = dasd_path_get_opm(device);
1795                        cqr->status = DASD_CQR_QUEUED;
1796                        next = cqr;
1797                } else
1798                        cqr->status = DASD_CQR_ERROR;
1799        }
1800        if (next && (next->status == DASD_CQR_QUEUED) &&
1801            (!device->stopped)) {
1802                if (device->discipline->start_IO(next) == 0)
1803                        expires = next->expires;
1804        }
1805        if (expires != 0)
1806                dasd_device_set_timer(device, expires);
1807        else
1808                dasd_device_clear_timer(device);
1809        dasd_schedule_device_bh(device);
1810}
1811EXPORT_SYMBOL(dasd_int_handler);
1812
1813enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1814{
1815        struct dasd_device *device;
1816
1817        device = dasd_device_from_cdev_locked(cdev);
1818
1819        if (IS_ERR(device))
1820                goto out;
1821        if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1822           device->state != device->target ||
1823           !device->discipline->check_for_device_change){
1824                dasd_put_device(device);
1825                goto out;
1826        }
1827        if (device->discipline->dump_sense_dbf)
1828                device->discipline->dump_sense_dbf(device, irb, "uc");
1829        device->discipline->check_for_device_change(device, NULL, irb);
1830        dasd_put_device(device);
1831out:
1832        return UC_TODO_RETRY;
1833}
1834EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1835
1836/*
1837 * If we have an error on a dasd_block layer request then we cancel
1838 * and return all further requests from the same dasd_block as well.
1839 */
1840static void __dasd_device_recovery(struct dasd_device *device,
1841                                   struct dasd_ccw_req *ref_cqr)
1842{
1843        struct list_head *l, *n;
1844        struct dasd_ccw_req *cqr;
1845
1846        /*
1847         * only requeue request that came from the dasd_block layer
1848         */
1849        if (!ref_cqr->block)
1850                return;
1851
1852        list_for_each_safe(l, n, &device->ccw_queue) {
1853                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1854                if (cqr->status == DASD_CQR_QUEUED &&
1855                    ref_cqr->block == cqr->block) {
1856                        cqr->status = DASD_CQR_CLEARED;
1857                }
1858        }
1859};
1860
1861/*
1862 * Remove those ccw requests from the queue that need to be returned
1863 * to the upper layer.
1864 */
1865static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1866                                            struct list_head *final_queue)
1867{
1868        struct list_head *l, *n;
1869        struct dasd_ccw_req *cqr;
1870
1871        /* Process request with final status. */
1872        list_for_each_safe(l, n, &device->ccw_queue) {
1873                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1874
1875                /* Skip any non-final request. */
1876                if (cqr->status == DASD_CQR_QUEUED ||
1877                    cqr->status == DASD_CQR_IN_IO ||
1878                    cqr->status == DASD_CQR_CLEAR_PENDING)
1879                        continue;
1880                if (cqr->status == DASD_CQR_ERROR) {
1881                        __dasd_device_recovery(device, cqr);
1882                }
1883                /* Rechain finished requests to final queue */
1884                list_move_tail(&cqr->devlist, final_queue);
1885        }
1886}
1887
1888/*
1889 * the cqrs from the final queue are returned to the upper layer
1890 * by setting a dasd_block state and calling the callback function
1891 */
1892static void __dasd_device_process_final_queue(struct dasd_device *device,
1893                                              struct list_head *final_queue)
1894{
1895        struct list_head *l, *n;
1896        struct dasd_ccw_req *cqr;
1897        struct dasd_block *block;
1898        void (*callback)(struct dasd_ccw_req *, void *data);
1899        void *callback_data;
1900        char errorstring[ERRORLENGTH];
1901
1902        list_for_each_safe(l, n, final_queue) {
1903                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1904                list_del_init(&cqr->devlist);
1905                block = cqr->block;
1906                callback = cqr->callback;
1907                callback_data = cqr->callback_data;
1908                if (block)
1909                        spin_lock_bh(&block->queue_lock);
1910                switch (cqr->status) {
1911                case DASD_CQR_SUCCESS:
1912                        cqr->status = DASD_CQR_DONE;
1913                        break;
1914                case DASD_CQR_ERROR:
1915                        cqr->status = DASD_CQR_NEED_ERP;
1916                        break;
1917                case DASD_CQR_CLEARED:
1918                        cqr->status = DASD_CQR_TERMINATED;
1919                        break;
1920                default:
1921                        /* internal error 12 - wrong cqr status*/
1922                        snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1923                        dev_err(&device->cdev->dev,
1924                                "An error occurred in the DASD device driver, "
1925                                "reason=%s\n", errorstring);
1926                        BUG();
1927                }
1928                if (cqr->callback != NULL)
1929                        (callback)(cqr, callback_data);
1930                if (block)
1931                        spin_unlock_bh(&block->queue_lock);
1932        }
1933}
1934
1935/*
1936 * Take a look at the first request on the ccw queue and check
1937 * if it reached its expire time. If so, terminate the IO.
1938 */
1939static void __dasd_device_check_expire(struct dasd_device *device)
1940{
1941        struct dasd_ccw_req *cqr;
1942
1943        if (list_empty(&device->ccw_queue))
1944                return;
1945        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1946        if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1947            (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1948                if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1949                        /*
1950                         * IO in safe offline processing should not
1951                         * run out of retries
1952                         */
1953                        cqr->retries++;
1954                }
1955                if (device->discipline->term_IO(cqr) != 0) {
1956                        /* Hmpf, try again in 5 sec */
1957                        dev_err(&device->cdev->dev,
1958                                "cqr %p timed out (%lus) but cannot be "
1959                                "ended, retrying in 5 s\n",
1960                                cqr, (cqr->expires/HZ));
1961                        cqr->expires += 5*HZ;
1962                        dasd_device_set_timer(device, 5*HZ);
1963                } else {
1964                        dev_err(&device->cdev->dev,
1965                                "cqr %p timed out (%lus), %i retries "
1966                                "remaining\n", cqr, (cqr->expires/HZ),
1967                                cqr->retries);
1968                }
1969        }
1970}
1971
1972/*
1973 * return 1 when device is not eligible for IO
1974 */
1975static int __dasd_device_is_unusable(struct dasd_device *device,
1976                                     struct dasd_ccw_req *cqr)
1977{
1978        int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1979
1980        if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
1981            !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1982                /*
1983                 * dasd is being set offline
1984                 * but it is no safe offline where we have to allow I/O
1985                 */
1986                return 1;
1987        }
1988        if (device->stopped) {
1989                if (device->stopped & mask) {
1990                        /* stopped and CQR will not change that. */
1991                        return 1;
1992                }
1993                if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1994                        /* CQR is not able to change device to
1995                         * operational. */
1996                        return 1;
1997                }
1998                /* CQR required to get device operational. */
1999        }
2000        return 0;
2001}
2002
2003/*
2004 * Take a look at the first request on the ccw queue and check
2005 * if it needs to be started.
2006 */
2007static void __dasd_device_start_head(struct dasd_device *device)
2008{
2009        struct dasd_ccw_req *cqr;
2010        int rc;
2011
2012        if (list_empty(&device->ccw_queue))
2013                return;
2014        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2015        if (cqr->status != DASD_CQR_QUEUED)
2016                return;
2017        /* if device is not usable return request to upper layer */
2018        if (__dasd_device_is_unusable(device, cqr)) {
2019                cqr->intrc = -EAGAIN;
2020                cqr->status = DASD_CQR_CLEARED;
2021                dasd_schedule_device_bh(device);
2022                return;
2023        }
2024
2025        rc = device->discipline->start_IO(cqr);
2026        if (rc == 0)
2027                dasd_device_set_timer(device, cqr->expires);
2028        else if (rc == -EACCES) {
2029                dasd_schedule_device_bh(device);
2030        } else
2031                /* Hmpf, try again in 1/2 sec */
2032                dasd_device_set_timer(device, 50);
2033}
2034
2035static void __dasd_device_check_path_events(struct dasd_device *device)
2036{
2037        int rc;
2038
2039        if (!dasd_path_get_tbvpm(device))
2040                return;
2041
2042        if (device->stopped &
2043            ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
2044                return;
2045        rc = device->discipline->verify_path(device,
2046                                             dasd_path_get_tbvpm(device));
2047        if (rc)
2048                dasd_device_set_timer(device, 50);
2049        else
2050                dasd_path_clear_all_verify(device);
2051};
2052
2053/*
2054 * Go through all request on the dasd_device request queue,
2055 * terminate them on the cdev if necessary, and return them to the
2056 * submitting layer via callback.
2057 * Note:
2058 * Make sure that all 'submitting layers' still exist when
2059 * this function is called!. In other words, when 'device' is a base
2060 * device then all block layer requests must have been removed before
2061 * via dasd_flush_block_queue.
2062 */
2063int dasd_flush_device_queue(struct dasd_device *device)
2064{
2065        struct dasd_ccw_req *cqr, *n;
2066        int rc;
2067        struct list_head flush_queue;
2068
2069        INIT_LIST_HEAD(&flush_queue);
2070        spin_lock_irq(get_ccwdev_lock(device->cdev));
2071        rc = 0;
2072        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2073                /* Check status and move request to flush_queue */
2074                switch (cqr->status) {
2075                case DASD_CQR_IN_IO:
2076                        rc = device->discipline->term_IO(cqr);
2077                        if (rc) {
2078                                /* unable to terminate requeust */
2079                                dev_err(&device->cdev->dev,
2080                                        "Flushing the DASD request queue "
2081                                        "failed for request %p\n", cqr);
2082                                /* stop flush processing */
2083                                goto finished;
2084                        }
2085                        break;
2086                case DASD_CQR_QUEUED:
2087                        cqr->stopclk = get_tod_clock();
2088                        cqr->status = DASD_CQR_CLEARED;
2089                        break;
2090                default: /* no need to modify the others */
2091                        break;
2092                }
2093                list_move_tail(&cqr->devlist, &flush_queue);
2094        }
2095finished:
2096        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2097        /*
2098         * After this point all requests must be in state CLEAR_PENDING,
2099         * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2100         * one of the others.
2101         */
2102        list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2103                wait_event(dasd_flush_wq,
2104                           (cqr->status != DASD_CQR_CLEAR_PENDING));
2105        /*
2106         * Now set each request back to TERMINATED, DONE or NEED_ERP
2107         * and call the callback function of flushed requests
2108         */
2109        __dasd_device_process_final_queue(device, &flush_queue);
2110        return rc;
2111}
2112EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2113
2114/*
2115 * Acquire the device lock and process queues for the device.
2116 */
2117static void dasd_device_tasklet(struct dasd_device *device)
2118{
2119        struct list_head final_queue;
2120
2121        atomic_set (&device->tasklet_scheduled, 0);
2122        INIT_LIST_HEAD(&final_queue);
2123        spin_lock_irq(get_ccwdev_lock(device->cdev));
2124        /* Check expire time of first request on the ccw queue. */
2125        __dasd_device_check_expire(device);
2126        /* find final requests on ccw queue */
2127        __dasd_device_process_ccw_queue(device, &final_queue);
2128        __dasd_device_check_path_events(device);
2129        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2130        /* Now call the callback function of requests with final status */
2131        __dasd_device_process_final_queue(device, &final_queue);
2132        spin_lock_irq(get_ccwdev_lock(device->cdev));
2133        /* Now check if the head of the ccw queue needs to be started. */
2134        __dasd_device_start_head(device);
2135        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2136        if (waitqueue_active(&shutdown_waitq))
2137                wake_up(&shutdown_waitq);
2138        dasd_put_device(device);
2139}
2140
2141/*
2142 * Schedules a call to dasd_tasklet over the device tasklet.
2143 */
2144void dasd_schedule_device_bh(struct dasd_device *device)
2145{
2146        /* Protect against rescheduling. */
2147        if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2148                return;
2149        dasd_get_device(device);
2150        tasklet_hi_schedule(&device->tasklet);
2151}
2152EXPORT_SYMBOL(dasd_schedule_device_bh);
2153
2154void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2155{
2156        device->stopped |= bits;
2157}
2158EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2159
2160void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2161{
2162        device->stopped &= ~bits;
2163        if (!device->stopped)
2164                wake_up(&generic_waitq);
2165}
2166EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2167
2168/*
2169 * Queue a request to the head of the device ccw_queue.
2170 * Start the I/O if possible.
2171 */
2172void dasd_add_request_head(struct dasd_ccw_req *cqr)
2173{
2174        struct dasd_device *device;
2175        unsigned long flags;
2176
2177        device = cqr->startdev;
2178        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2179        cqr->status = DASD_CQR_QUEUED;
2180        list_add(&cqr->devlist, &device->ccw_queue);
2181        /* let the bh start the request to keep them in order */
2182        dasd_schedule_device_bh(device);
2183        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2184}
2185EXPORT_SYMBOL(dasd_add_request_head);
2186
2187/*
2188 * Queue a request to the tail of the device ccw_queue.
2189 * Start the I/O if possible.
2190 */
2191void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2192{
2193        struct dasd_device *device;
2194        unsigned long flags;
2195
2196        device = cqr->startdev;
2197        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2198        cqr->status = DASD_CQR_QUEUED;
2199        list_add_tail(&cqr->devlist, &device->ccw_queue);
2200        /* let the bh start the request to keep them in order */
2201        dasd_schedule_device_bh(device);
2202        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2203}
2204EXPORT_SYMBOL(dasd_add_request_tail);
2205
2206/*
2207 * Wakeup helper for the 'sleep_on' functions.
2208 */
2209void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2210{
2211        spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2212        cqr->callback_data = DASD_SLEEPON_END_TAG;
2213        spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2214        wake_up(&generic_waitq);
2215}
2216EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2217
2218static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2219{
2220        struct dasd_device *device;
2221        int rc;
2222
2223        device = cqr->startdev;
2224        spin_lock_irq(get_ccwdev_lock(device->cdev));
2225        rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2226        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2227        return rc;
2228}
2229
2230/*
2231 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2232 */
2233static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2234{
2235        struct dasd_device *device;
2236        dasd_erp_fn_t erp_fn;
2237
2238        if (cqr->status == DASD_CQR_FILLED)
2239                return 0;
2240        device = cqr->startdev;
2241        if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2242                if (cqr->status == DASD_CQR_TERMINATED) {
2243                        device->discipline->handle_terminated_request(cqr);
2244                        return 1;
2245                }
2246                if (cqr->status == DASD_CQR_NEED_ERP) {
2247                        erp_fn = device->discipline->erp_action(cqr);
2248                        erp_fn(cqr);
2249                        return 1;
2250                }
2251                if (cqr->status == DASD_CQR_FAILED)
2252                        dasd_log_sense(cqr, &cqr->irb);
2253                if (cqr->refers) {
2254                        __dasd_process_erp(device, cqr);
2255                        return 1;
2256                }
2257        }
2258        return 0;
2259}
2260
2261static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2262{
2263        if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2264                if (cqr->refers) /* erp is not done yet */
2265                        return 1;
2266                return ((cqr->status != DASD_CQR_DONE) &&
2267                        (cqr->status != DASD_CQR_FAILED));
2268        } else
2269                return (cqr->status == DASD_CQR_FILLED);
2270}
2271
2272static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2273{
2274        struct dasd_device *device;
2275        int rc;
2276        struct list_head ccw_queue;
2277        struct dasd_ccw_req *cqr;
2278
2279        INIT_LIST_HEAD(&ccw_queue);
2280        maincqr->status = DASD_CQR_FILLED;
2281        device = maincqr->startdev;
2282        list_add(&maincqr->blocklist, &ccw_queue);
2283        for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2284             cqr = list_first_entry(&ccw_queue,
2285                                    struct dasd_ccw_req, blocklist)) {
2286
2287                if (__dasd_sleep_on_erp(cqr))
2288                        continue;
2289                if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2290                        continue;
2291                if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2292                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2293                        cqr->status = DASD_CQR_FAILED;
2294                        cqr->intrc = -EPERM;
2295                        continue;
2296                }
2297                /* Non-temporary stop condition will trigger fail fast */
2298                if (device->stopped & ~DASD_STOPPED_PENDING &&
2299                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2300                    (!dasd_eer_enabled(device))) {
2301                        cqr->status = DASD_CQR_FAILED;
2302                        cqr->intrc = -ENOLINK;
2303                        continue;
2304                }
2305                /*
2306                 * Don't try to start requests if device is in
2307                 * offline processing, it might wait forever
2308                 */
2309                if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2310                        cqr->status = DASD_CQR_FAILED;
2311                        cqr->intrc = -ENODEV;
2312                        continue;
2313                }
2314                /*
2315                 * Don't try to start requests if device is stopped
2316                 * except path verification requests
2317                 */
2318                if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2319                        if (interruptible) {
2320                                rc = wait_event_interruptible(
2321                                        generic_waitq, !(device->stopped));
2322                                if (rc == -ERESTARTSYS) {
2323                                        cqr->status = DASD_CQR_FAILED;
2324                                        maincqr->intrc = rc;
2325                                        continue;
2326                                }
2327                        } else
2328                                wait_event(generic_waitq, !(device->stopped));
2329                }
2330                if (!cqr->callback)
2331                        cqr->callback = dasd_wakeup_cb;
2332
2333                cqr->callback_data = DASD_SLEEPON_START_TAG;
2334                dasd_add_request_tail(cqr);
2335                if (interruptible) {
2336                        rc = wait_event_interruptible(
2337                                generic_waitq, _wait_for_wakeup(cqr));
2338                        if (rc == -ERESTARTSYS) {
2339                                dasd_cancel_req(cqr);
2340                                /* wait (non-interruptible) for final status */
2341                                wait_event(generic_waitq,
2342                                           _wait_for_wakeup(cqr));
2343                                cqr->status = DASD_CQR_FAILED;
2344                                maincqr->intrc = rc;
2345                                continue;
2346                        }
2347                } else
2348                        wait_event(generic_waitq, _wait_for_wakeup(cqr));
2349        }
2350
2351        maincqr->endclk = get_tod_clock();
2352        if ((maincqr->status != DASD_CQR_DONE) &&
2353            (maincqr->intrc != -ERESTARTSYS))
2354                dasd_log_sense(maincqr, &maincqr->irb);
2355        if (maincqr->status == DASD_CQR_DONE)
2356                rc = 0;
2357        else if (maincqr->intrc)
2358                rc = maincqr->intrc;
2359        else
2360                rc = -EIO;
2361        return rc;
2362}
2363
2364static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2365{
2366        struct dasd_ccw_req *cqr;
2367
2368        list_for_each_entry(cqr, ccw_queue, blocklist) {
2369                if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2370                        return 0;
2371        }
2372
2373        return 1;
2374}
2375
2376static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2377{
2378        struct dasd_device *device;
2379        struct dasd_ccw_req *cqr, *n;
2380        u8 *sense = NULL;
2381        int rc;
2382
2383retry:
2384        list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2385                device = cqr->startdev;
2386                if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2387                        continue;
2388
2389                if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2390                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2391                        cqr->status = DASD_CQR_FAILED;
2392                        cqr->intrc = -EPERM;
2393                        continue;
2394                }
2395                /*Non-temporary stop condition will trigger fail fast*/
2396                if (device->stopped & ~DASD_STOPPED_PENDING &&
2397                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2398                    !dasd_eer_enabled(device)) {
2399                        cqr->status = DASD_CQR_FAILED;
2400                        cqr->intrc = -EAGAIN;
2401                        continue;
2402                }
2403
2404                /*Don't try to start requests if device is stopped*/
2405                if (interruptible) {
2406                        rc = wait_event_interruptible(
2407                                generic_waitq, !device->stopped);
2408                        if (rc == -ERESTARTSYS) {
2409                                cqr->status = DASD_CQR_FAILED;
2410                                cqr->intrc = rc;
2411                                continue;
2412                        }
2413                } else
2414                        wait_event(generic_waitq, !(device->stopped));
2415
2416                if (!cqr->callback)
2417                        cqr->callback = dasd_wakeup_cb;
2418                cqr->callback_data = DASD_SLEEPON_START_TAG;
2419                dasd_add_request_tail(cqr);
2420        }
2421
2422        wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2423
2424        rc = 0;
2425        list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2426                /*
2427                 * In some cases the 'File Protected' or 'Incorrect Length'
2428                 * error might be expected and error recovery would be
2429                 * unnecessary in these cases.  Check if the according suppress
2430                 * bit is set.
2431                 */
2432                sense = dasd_get_sense(&cqr->irb);
2433                if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2434                    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2435                        continue;
2436                if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2437                    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2438                        continue;
2439
2440                /*
2441                 * for alias devices simplify error recovery and
2442                 * return to upper layer
2443                 * do not skip ERP requests
2444                 */
2445                if (cqr->startdev != cqr->basedev && !cqr->refers &&
2446                    (cqr->status == DASD_CQR_TERMINATED ||
2447                     cqr->status == DASD_CQR_NEED_ERP))
2448                        return -EAGAIN;
2449
2450                /* normal recovery for basedev IO */
2451                if (__dasd_sleep_on_erp(cqr))
2452                        /* handle erp first */
2453                        goto retry;
2454        }
2455
2456        return 0;
2457}
2458
2459/*
2460 * Queue a request to the tail of the device ccw_queue and wait for
2461 * it's completion.
2462 */
2463int dasd_sleep_on(struct dasd_ccw_req *cqr)
2464{
2465        return _dasd_sleep_on(cqr, 0);
2466}
2467EXPORT_SYMBOL(dasd_sleep_on);
2468
2469/*
2470 * Start requests from a ccw_queue and wait for their completion.
2471 */
2472int dasd_sleep_on_queue(struct list_head *ccw_queue)
2473{
2474        return _dasd_sleep_on_queue(ccw_queue, 0);
2475}
2476EXPORT_SYMBOL(dasd_sleep_on_queue);
2477
2478/*
2479 * Queue a request to the tail of the device ccw_queue and wait
2480 * interruptible for it's completion.
2481 */
2482int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2483{
2484        return _dasd_sleep_on(cqr, 1);
2485}
2486EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2487
2488/*
2489 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2490 * for eckd devices) the currently running request has to be terminated
2491 * and be put back to status queued, before the special request is added
2492 * to the head of the queue. Then the special request is waited on normally.
2493 */
2494static inline int _dasd_term_running_cqr(struct dasd_device *device)
2495{
2496        struct dasd_ccw_req *cqr;
2497        int rc;
2498
2499        if (list_empty(&device->ccw_queue))
2500                return 0;
2501        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2502        rc = device->discipline->term_IO(cqr);
2503        if (!rc)
2504                /*
2505                 * CQR terminated because a more important request is pending.
2506                 * Undo decreasing of retry counter because this is
2507                 * not an error case.
2508                 */
2509                cqr->retries++;
2510        return rc;
2511}
2512
2513int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2514{
2515        struct dasd_device *device;
2516        int rc;
2517
2518        device = cqr->startdev;
2519        if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2520            !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2521                cqr->status = DASD_CQR_FAILED;
2522                cqr->intrc = -EPERM;
2523                return -EIO;
2524        }
2525        spin_lock_irq(get_ccwdev_lock(device->cdev));
2526        rc = _dasd_term_running_cqr(device);
2527        if (rc) {
2528                spin_unlock_irq(get_ccwdev_lock(device->cdev));
2529                return rc;
2530        }
2531        cqr->callback = dasd_wakeup_cb;
2532        cqr->callback_data = DASD_SLEEPON_START_TAG;
2533        cqr->status = DASD_CQR_QUEUED;
2534        /*
2535         * add new request as second
2536         * first the terminated cqr needs to be finished
2537         */
2538        list_add(&cqr->devlist, device->ccw_queue.next);
2539
2540        /* let the bh start the request to keep them in order */
2541        dasd_schedule_device_bh(device);
2542
2543        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2544
2545        wait_event(generic_waitq, _wait_for_wakeup(cqr));
2546
2547        if (cqr->status == DASD_CQR_DONE)
2548                rc = 0;
2549        else if (cqr->intrc)
2550                rc = cqr->intrc;
2551        else
2552                rc = -EIO;
2553
2554        /* kick tasklets */
2555        dasd_schedule_device_bh(device);
2556        if (device->block)
2557                dasd_schedule_block_bh(device->block);
2558
2559        return rc;
2560}
2561EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2562
2563/*
2564 * Cancels a request that was started with dasd_sleep_on_req.
2565 * This is useful to timeout requests. The request will be
2566 * terminated if it is currently in i/o.
2567 * Returns 0 if request termination was successful
2568 *         negative error code if termination failed
2569 * Cancellation of a request is an asynchronous operation! The calling
2570 * function has to wait until the request is properly returned via callback.
2571 */
2572int dasd_cancel_req(struct dasd_ccw_req *cqr)
2573{
2574        struct dasd_device *device = cqr->startdev;
2575        unsigned long flags;
2576        int rc;
2577
2578        rc = 0;
2579        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2580        switch (cqr->status) {
2581        case DASD_CQR_QUEUED:
2582                /* request was not started - just set to cleared */
2583                cqr->status = DASD_CQR_CLEARED;
2584                break;
2585        case DASD_CQR_IN_IO:
2586                /* request in IO - terminate IO and release again */
2587                rc = device->discipline->term_IO(cqr);
2588                if (rc) {
2589                        dev_err(&device->cdev->dev,
2590                                "Cancelling request %p failed with rc=%d\n",
2591                                cqr, rc);
2592                } else {
2593                        cqr->stopclk = get_tod_clock();
2594                }
2595                break;
2596        default: /* already finished or clear pending - do nothing */
2597                break;
2598        }
2599        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2600        dasd_schedule_device_bh(device);
2601        return rc;
2602}
2603EXPORT_SYMBOL(dasd_cancel_req);
2604
2605/*
2606 * SECTION: Operations of the dasd_block layer.
2607 */
2608
2609/*
2610 * Timeout function for dasd_block. This is used when the block layer
2611 * is waiting for something that may not come reliably, (e.g. a state
2612 * change interrupt)
2613 */
2614static void dasd_block_timeout(struct timer_list *t)
2615{
2616        unsigned long flags;
2617        struct dasd_block *block;
2618
2619        block = from_timer(block, t, timer);
2620        spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2621        /* re-activate request queue */
2622        dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2623        spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2624        dasd_schedule_block_bh(block);
2625        blk_mq_run_hw_queues(block->request_queue, true);
2626}
2627
2628/*
2629 * Setup timeout for a dasd_block in jiffies.
2630 */
2631void dasd_block_set_timer(struct dasd_block *block, int expires)
2632{
2633        if (expires == 0)
2634                del_timer(&block->timer);
2635        else
2636                mod_timer(&block->timer, jiffies + expires);
2637}
2638EXPORT_SYMBOL(dasd_block_set_timer);
2639
2640/*
2641 * Clear timeout for a dasd_block.
2642 */
2643void dasd_block_clear_timer(struct dasd_block *block)
2644{
2645        del_timer(&block->timer);
2646}
2647EXPORT_SYMBOL(dasd_block_clear_timer);
2648
2649/*
2650 * Process finished error recovery ccw.
2651 */
2652static void __dasd_process_erp(struct dasd_device *device,
2653                               struct dasd_ccw_req *cqr)
2654{
2655        dasd_erp_fn_t erp_fn;
2656
2657        if (cqr->status == DASD_CQR_DONE)
2658                DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2659        else
2660                dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2661        erp_fn = device->discipline->erp_postaction(cqr);
2662        erp_fn(cqr);
2663}
2664
2665static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2666{
2667        struct request *req;
2668        blk_status_t error = BLK_STS_OK;
2669        int status;
2670
2671        req = (struct request *) cqr->callback_data;
2672        dasd_profile_end(cqr->block, cqr, req);
2673
2674        status = cqr->block->base->discipline->free_cp(cqr, req);
2675        if (status < 0)
2676                error = errno_to_blk_status(status);
2677        else if (status == 0) {
2678                switch (cqr->intrc) {
2679                case -EPERM:
2680                        error = BLK_STS_NEXUS;
2681                        break;
2682                case -ENOLINK:
2683                        error = BLK_STS_TRANSPORT;
2684                        break;
2685                case -ETIMEDOUT:
2686                        error = BLK_STS_TIMEOUT;
2687                        break;
2688                default:
2689                        error = BLK_STS_IOERR;
2690                        break;
2691                }
2692        }
2693
2694        /*
2695         * We need to take care for ETIMEDOUT errors here since the
2696         * complete callback does not get called in this case.
2697         * Take care of all errors here and avoid additional code to
2698         * transfer the error value to the complete callback.
2699         */
2700        if (error) {
2701                blk_mq_end_request(req, error);
2702                blk_mq_run_hw_queues(req->q, true);
2703        } else {
2704                blk_mq_complete_request(req);
2705        }
2706}
2707
2708/*
2709 * Process ccw request queue.
2710 */
2711static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2712                                           struct list_head *final_queue)
2713{
2714        struct list_head *l, *n;
2715        struct dasd_ccw_req *cqr;
2716        dasd_erp_fn_t erp_fn;
2717        unsigned long flags;
2718        struct dasd_device *base = block->base;
2719
2720restart:
2721        /* Process request with final status. */
2722        list_for_each_safe(l, n, &block->ccw_queue) {
2723                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2724                if (cqr->status != DASD_CQR_DONE &&
2725                    cqr->status != DASD_CQR_FAILED &&
2726                    cqr->status != DASD_CQR_NEED_ERP &&
2727                    cqr->status != DASD_CQR_TERMINATED)
2728                        continue;
2729
2730                if (cqr->status == DASD_CQR_TERMINATED) {
2731                        base->discipline->handle_terminated_request(cqr);
2732                        goto restart;
2733                }
2734
2735                /*  Process requests that may be recovered */
2736                if (cqr->status == DASD_CQR_NEED_ERP) {
2737                        erp_fn = base->discipline->erp_action(cqr);
2738                        if (IS_ERR(erp_fn(cqr)))
2739                                continue;
2740                        goto restart;
2741                }
2742
2743                /* log sense for fatal error */
2744                if (cqr->status == DASD_CQR_FAILED) {
2745                        dasd_log_sense(cqr, &cqr->irb);
2746                }
2747
2748                /* First of all call extended error reporting. */
2749                if (dasd_eer_enabled(base) &&
2750                    cqr->status == DASD_CQR_FAILED) {
2751                        dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2752
2753                        /* restart request  */
2754                        cqr->status = DASD_CQR_FILLED;
2755                        cqr->retries = 255;
2756                        spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2757                        dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2758                        spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2759                                               flags);
2760                        goto restart;
2761                }
2762
2763                /* Process finished ERP request. */
2764                if (cqr->refers) {
2765                        __dasd_process_erp(base, cqr);
2766                        goto restart;
2767                }
2768
2769                /* Rechain finished requests to final queue */
2770                cqr->endclk = get_tod_clock();
2771                list_move_tail(&cqr->blocklist, final_queue);
2772        }
2773}
2774
2775static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2776{
2777        dasd_schedule_block_bh(cqr->block);
2778}
2779
2780static void __dasd_block_start_head(struct dasd_block *block)
2781{
2782        struct dasd_ccw_req *cqr;
2783
2784        if (list_empty(&block->ccw_queue))
2785                return;
2786        /* We allways begin with the first requests on the queue, as some
2787         * of previously started requests have to be enqueued on a
2788         * dasd_device again for error recovery.
2789         */
2790        list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2791                if (cqr->status != DASD_CQR_FILLED)
2792                        continue;
2793                if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2794                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2795                        cqr->status = DASD_CQR_FAILED;
2796                        cqr->intrc = -EPERM;
2797                        dasd_schedule_block_bh(block);
2798                        continue;
2799                }
2800                /* Non-temporary stop condition will trigger fail fast */
2801                if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2802                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2803                    (!dasd_eer_enabled(block->base))) {
2804                        cqr->status = DASD_CQR_FAILED;
2805                        cqr->intrc = -ENOLINK;
2806                        dasd_schedule_block_bh(block);
2807                        continue;
2808                }
2809                /* Don't try to start requests if device is stopped */
2810                if (block->base->stopped)
2811                        return;
2812
2813                /* just a fail safe check, should not happen */
2814                if (!cqr->startdev)
2815                        cqr->startdev = block->base;
2816
2817                /* make sure that the requests we submit find their way back */
2818                cqr->callback = dasd_return_cqr_cb;
2819
2820                dasd_add_request_tail(cqr);
2821        }
2822}
2823
2824/*
2825 * Central dasd_block layer routine. Takes requests from the generic
2826 * block layer request queue, creates ccw requests, enqueues them on
2827 * a dasd_device and processes ccw requests that have been returned.
2828 */
2829static void dasd_block_tasklet(struct dasd_block *block)
2830{
2831        struct list_head final_queue;
2832        struct list_head *l, *n;
2833        struct dasd_ccw_req *cqr;
2834        struct dasd_queue *dq;
2835
2836        atomic_set(&block->tasklet_scheduled, 0);
2837        INIT_LIST_HEAD(&final_queue);
2838        spin_lock_irq(&block->queue_lock);
2839        /* Finish off requests on ccw queue */
2840        __dasd_process_block_ccw_queue(block, &final_queue);
2841        spin_unlock_irq(&block->queue_lock);
2842
2843        /* Now call the callback function of requests with final status */
2844        list_for_each_safe(l, n, &final_queue) {
2845                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2846                dq = cqr->dq;
2847                spin_lock_irq(&dq->lock);
2848                list_del_init(&cqr->blocklist);
2849                __dasd_cleanup_cqr(cqr);
2850                spin_unlock_irq(&dq->lock);
2851        }
2852
2853        spin_lock_irq(&block->queue_lock);
2854        /* Now check if the head of the ccw queue needs to be started. */
2855        __dasd_block_start_head(block);
2856        spin_unlock_irq(&block->queue_lock);
2857
2858        if (waitqueue_active(&shutdown_waitq))
2859                wake_up(&shutdown_waitq);
2860        dasd_put_device(block->base);
2861}
2862
2863static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2864{
2865        wake_up(&dasd_flush_wq);
2866}
2867
2868/*
2869 * Requeue a request back to the block request queue
2870 * only works for block requests
2871 */
2872static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2873{
2874        struct dasd_block *block = cqr->block;
2875        struct request *req;
2876
2877        if (!block)
2878                return -EINVAL;
2879        spin_lock_irq(&cqr->dq->lock);
2880        req = (struct request *) cqr->callback_data;
2881        blk_mq_requeue_request(req, false);
2882        spin_unlock_irq(&cqr->dq->lock);
2883
2884        return 0;
2885}
2886
2887/*
2888 * Go through all request on the dasd_block request queue, cancel them
2889 * on the respective dasd_device, and return them to the generic
2890 * block layer.
2891 */
2892static int dasd_flush_block_queue(struct dasd_block *block)
2893{
2894        struct dasd_ccw_req *cqr, *n;
2895        int rc, i;
2896        struct list_head flush_queue;
2897        unsigned long flags;
2898
2899        INIT_LIST_HEAD(&flush_queue);
2900        spin_lock_bh(&block->queue_lock);
2901        rc = 0;
2902restart:
2903        list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2904                /* if this request currently owned by a dasd_device cancel it */
2905                if (cqr->status >= DASD_CQR_QUEUED)
2906                        rc = dasd_cancel_req(cqr);
2907                if (rc < 0)
2908                        break;
2909                /* Rechain request (including erp chain) so it won't be
2910                 * touched by the dasd_block_tasklet anymore.
2911                 * Replace the callback so we notice when the request
2912                 * is returned from the dasd_device layer.
2913                 */
2914                cqr->callback = _dasd_wake_block_flush_cb;
2915                for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2916                        list_move_tail(&cqr->blocklist, &flush_queue);
2917                if (i > 1)
2918                        /* moved more than one request - need to restart */
2919                        goto restart;
2920        }
2921        spin_unlock_bh(&block->queue_lock);
2922        /* Now call the callback function of flushed requests */
2923restart_cb:
2924        list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2925                wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2926                /* Process finished ERP request. */
2927                if (cqr->refers) {
2928                        spin_lock_bh(&block->queue_lock);
2929                        __dasd_process_erp(block->base, cqr);
2930                        spin_unlock_bh(&block->queue_lock);
2931                        /* restart list_for_xx loop since dasd_process_erp
2932                         * might remove multiple elements */
2933                        goto restart_cb;
2934                }
2935                /* call the callback function */
2936                spin_lock_irqsave(&cqr->dq->lock, flags);
2937                cqr->endclk = get_tod_clock();
2938                list_del_init(&cqr->blocklist);
2939                __dasd_cleanup_cqr(cqr);
2940                spin_unlock_irqrestore(&cqr->dq->lock, flags);
2941        }
2942        return rc;
2943}
2944
2945/*
2946 * Schedules a call to dasd_tasklet over the device tasklet.
2947 */
2948void dasd_schedule_block_bh(struct dasd_block *block)
2949{
2950        /* Protect against rescheduling. */
2951        if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2952                return;
2953        /* life cycle of block is bound to it's base device */
2954        dasd_get_device(block->base);
2955        tasklet_hi_schedule(&block->tasklet);
2956}
2957EXPORT_SYMBOL(dasd_schedule_block_bh);
2958
2959
2960/*
2961 * SECTION: external block device operations
2962 * (request queue handling, open, release, etc.)
2963 */
2964
2965/*
2966 * Dasd request queue function. Called from ll_rw_blk.c
2967 */
2968static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
2969                                    const struct blk_mq_queue_data *qd)
2970{
2971        struct dasd_block *block = hctx->queue->queuedata;
2972        struct dasd_queue *dq = hctx->driver_data;
2973        struct request *req = qd->rq;
2974        struct dasd_device *basedev;
2975        struct dasd_ccw_req *cqr;
2976        blk_status_t rc = BLK_STS_OK;
2977
2978        basedev = block->base;
2979        spin_lock_irq(&dq->lock);
2980        if (basedev->state < DASD_STATE_READY) {
2981                DBF_DEV_EVENT(DBF_ERR, basedev,
2982                              "device not ready for request %p", req);
2983                rc = BLK_STS_IOERR;
2984                goto out;
2985        }
2986
2987        /*
2988         * if device is stopped do not fetch new requests
2989         * except failfast is active which will let requests fail
2990         * immediately in __dasd_block_start_head()
2991         */
2992        if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
2993                DBF_DEV_EVENT(DBF_ERR, basedev,
2994                              "device stopped request %p", req);
2995                rc = BLK_STS_RESOURCE;
2996                goto out;
2997        }
2998
2999        if (basedev->features & DASD_FEATURE_READONLY &&
3000            rq_data_dir(req) == WRITE) {
3001                DBF_DEV_EVENT(DBF_ERR, basedev,
3002                              "Rejecting write request %p", req);
3003                rc = BLK_STS_IOERR;
3004                goto out;
3005        }
3006
3007        if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3008            (basedev->features & DASD_FEATURE_FAILFAST ||
3009             blk_noretry_request(req))) {
3010                DBF_DEV_EVENT(DBF_ERR, basedev,
3011                              "Rejecting failfast request %p", req);
3012                rc = BLK_STS_IOERR;
3013                goto out;
3014        }
3015
3016        cqr = basedev->discipline->build_cp(basedev, block, req);
3017        if (IS_ERR(cqr)) {
3018                if (PTR_ERR(cqr) == -EBUSY ||
3019                    PTR_ERR(cqr) == -ENOMEM ||
3020                    PTR_ERR(cqr) == -EAGAIN) {
3021                        rc = BLK_STS_RESOURCE;
3022                        goto out;
3023                }
3024                DBF_DEV_EVENT(DBF_ERR, basedev,
3025                              "CCW creation failed (rc=%ld) on request %p",
3026                              PTR_ERR(cqr), req);
3027                rc = BLK_STS_IOERR;
3028                goto out;
3029        }
3030        /*
3031         *  Note: callback is set to dasd_return_cqr_cb in
3032         * __dasd_block_start_head to cover erp requests as well
3033         */
3034        cqr->callback_data = req;
3035        cqr->status = DASD_CQR_FILLED;
3036        cqr->dq = dq;
3037        *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
3038
3039        blk_mq_start_request(req);
3040        spin_lock(&block->queue_lock);
3041        list_add_tail(&cqr->blocklist, &block->ccw_queue);
3042        INIT_LIST_HEAD(&cqr->devlist);
3043        dasd_profile_start(block, cqr, req);
3044        dasd_schedule_block_bh(block);
3045        spin_unlock(&block->queue_lock);
3046
3047out:
3048        spin_unlock_irq(&dq->lock);
3049        return rc;
3050}
3051
3052/*
3053 * Block timeout callback, called from the block layer
3054 *
3055 * Return values:
3056 * BLK_EH_RESET_TIMER if the request should be left running
3057 * BLK_EH_NOT_HANDLED if the request is handled or terminated
3058 *                    by the driver.
3059 */
3060enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3061{
3062        struct dasd_block *block = req->q->queuedata;
3063        struct dasd_device *device;
3064        struct dasd_ccw_req *cqr;
3065        unsigned long flags;
3066        int rc = 0;
3067
3068        cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
3069        if (!cqr)
3070                return BLK_EH_NOT_HANDLED;
3071
3072        spin_lock_irqsave(&cqr->dq->lock, flags);
3073        device = cqr->startdev ? cqr->startdev : block->base;
3074        if (!device->blk_timeout) {
3075                spin_unlock_irqrestore(&cqr->dq->lock, flags);
3076                return BLK_EH_RESET_TIMER;
3077        }
3078        DBF_DEV_EVENT(DBF_WARNING, device,
3079                      " dasd_times_out cqr %p status %x",
3080                      cqr, cqr->status);
3081
3082        spin_lock(&block->queue_lock);
3083        spin_lock(get_ccwdev_lock(device->cdev));
3084        cqr->retries = -1;
3085        cqr->intrc = -ETIMEDOUT;
3086        if (cqr->status >= DASD_CQR_QUEUED) {
3087                spin_unlock(get_ccwdev_lock(device->cdev));
3088                rc = dasd_cancel_req(cqr);
3089        } else if (cqr->status == DASD_CQR_FILLED ||
3090                   cqr->status == DASD_CQR_NEED_ERP) {
3091                cqr->status = DASD_CQR_TERMINATED;
3092                spin_unlock(get_ccwdev_lock(device->cdev));
3093        } else if (cqr->status == DASD_CQR_IN_ERP) {
3094                struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3095
3096                list_for_each_entry_safe(searchcqr, nextcqr,
3097                                         &block->ccw_queue, blocklist) {
3098                        tmpcqr = searchcqr;
3099                        while (tmpcqr->refers)
3100                                tmpcqr = tmpcqr->refers;
3101                        if (tmpcqr != cqr)
3102                                continue;
3103                        /* searchcqr is an ERP request for cqr */
3104                        searchcqr->retries = -1;
3105                        searchcqr->intrc = -ETIMEDOUT;
3106                        if (searchcqr->status >= DASD_CQR_QUEUED) {
3107                                spin_unlock(get_ccwdev_lock(device->cdev));
3108                                rc = dasd_cancel_req(searchcqr);
3109                                spin_lock(get_ccwdev_lock(device->cdev));
3110                        } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3111                                   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3112                                searchcqr->status = DASD_CQR_TERMINATED;
3113                                rc = 0;
3114                        } else if (searchcqr->status == DASD_CQR_IN_ERP) {
3115                                /*
3116                                 * Shouldn't happen; most recent ERP
3117                                 * request is at the front of queue
3118                                 */
3119                                continue;
3120                        }
3121                        break;
3122                }
3123                spin_unlock(get_ccwdev_lock(device->cdev));
3124        }
3125        dasd_schedule_block_bh(block);
3126        spin_unlock(&block->queue_lock);
3127        spin_unlock_irqrestore(&cqr->dq->lock, flags);
3128
3129        return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
3130}
3131
3132static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3133                          unsigned int idx)
3134{
3135        struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3136
3137        if (!dq)
3138                return -ENOMEM;
3139
3140        spin_lock_init(&dq->lock);
3141        hctx->driver_data = dq;
3142
3143        return 0;
3144}
3145
3146static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3147{
3148        kfree(hctx->driver_data);
3149        hctx->driver_data = NULL;
3150}
3151
3152static void dasd_request_done(struct request *req)
3153{
3154        blk_mq_end_request(req, 0);
3155        blk_mq_run_hw_queues(req->q, true);
3156}
3157
3158static struct blk_mq_ops dasd_mq_ops = {
3159        .queue_rq = do_dasd_request,
3160        .complete = dasd_request_done,
3161        .timeout = dasd_times_out,
3162        .init_hctx = dasd_init_hctx,
3163        .exit_hctx = dasd_exit_hctx,
3164};
3165
3166/*
3167 * Allocate and initialize request queue and default I/O scheduler.
3168 */
3169static int dasd_alloc_queue(struct dasd_block *block)
3170{
3171        int rc;
3172
3173        block->tag_set.ops = &dasd_mq_ops;
3174        block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
3175        block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
3176        block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
3177        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3178
3179        rc = blk_mq_alloc_tag_set(&block->tag_set);
3180        if (rc)
3181                return rc;
3182
3183        block->request_queue = blk_mq_init_queue(&block->tag_set);
3184        if (IS_ERR(block->request_queue))
3185                return PTR_ERR(block->request_queue);
3186
3187        block->request_queue->queuedata = block;
3188
3189        return 0;
3190}
3191
3192/*
3193 * Allocate and initialize request queue.
3194 */
3195static void dasd_setup_queue(struct dasd_block *block)
3196{
3197        unsigned int logical_block_size = block->bp_block;
3198        struct request_queue *q = block->request_queue;
3199        unsigned int max_bytes, max_discard_sectors;
3200        int max;
3201
3202        if (block->base->features & DASD_FEATURE_USERAW) {
3203                /*
3204                 * the max_blocks value for raw_track access is 256
3205                 * it is higher than the native ECKD value because we
3206                 * only need one ccw per track
3207                 * so the max_hw_sectors are
3208                 * 2048 x 512B = 1024kB = 16 tracks
3209                 */
3210                max = 2048;
3211        } else {
3212                max = block->base->discipline->max_blocks << block->s2b_shift;
3213        }
3214        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3215        q->limits.max_dev_sectors = max;
3216        blk_queue_logical_block_size(q, logical_block_size);
3217        blk_queue_max_hw_sectors(q, max);
3218        blk_queue_max_segments(q, USHRT_MAX);
3219        /* with page sized segments we can translate each segement into
3220         * one idaw/tidaw
3221         */
3222        blk_queue_max_segment_size(q, PAGE_SIZE);
3223        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
3224
3225        /* Only activate blocklayer discard support for devices that support it */
3226        if (block->base->features & DASD_FEATURE_DISCARD) {
3227                q->limits.discard_granularity = logical_block_size;
3228                q->limits.discard_alignment = PAGE_SIZE;
3229
3230                /* Calculate max_discard_sectors and make it PAGE aligned */
3231                max_bytes = USHRT_MAX * logical_block_size;
3232                max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
3233                max_discard_sectors = max_bytes / logical_block_size;
3234
3235                blk_queue_max_discard_sectors(q, max_discard_sectors);
3236                blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
3237                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3238        }
3239}
3240
3241/*
3242 * Deactivate and free request queue.
3243 */
3244static void dasd_free_queue(struct dasd_block *block)
3245{
3246        if (block->request_queue) {
3247                blk_cleanup_queue(block->request_queue);
3248                blk_mq_free_tag_set(&block->tag_set);
3249                block->request_queue = NULL;
3250        }
3251}
3252
3253static int dasd_open(struct block_device *bdev, fmode_t mode)
3254{
3255        struct dasd_device *base;
3256        int rc;
3257
3258        base = dasd_device_from_gendisk(bdev->bd_disk);
3259        if (!base)
3260                return -ENODEV;
3261
3262        atomic_inc(&base->block->open_count);
3263        if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3264                rc = -ENODEV;
3265                goto unlock;
3266        }
3267
3268        if (!try_module_get(base->discipline->owner)) {
3269                rc = -EINVAL;
3270                goto unlock;
3271        }
3272
3273        if (dasd_probeonly) {
3274                dev_info(&base->cdev->dev,
3275                         "Accessing the DASD failed because it is in "
3276                         "probeonly mode\n");
3277                rc = -EPERM;
3278                goto out;
3279        }
3280
3281        if (base->state <= DASD_STATE_BASIC) {
3282                DBF_DEV_EVENT(DBF_ERR, base, " %s",
3283                              " Cannot open unrecognized device");
3284                rc = -ENODEV;
3285                goto out;
3286        }
3287
3288        if ((mode & FMODE_WRITE) &&
3289            (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3290             (base->features & DASD_FEATURE_READONLY))) {
3291                rc = -EROFS;
3292                goto out;
3293        }
3294
3295        dasd_put_device(base);
3296        return 0;
3297
3298out:
3299        module_put(base->discipline->owner);
3300unlock:
3301        atomic_dec(&base->block->open_count);
3302        dasd_put_device(base);
3303        return rc;
3304}
3305
3306static void dasd_release(struct gendisk *disk, fmode_t mode)
3307{
3308        struct dasd_device *base = dasd_device_from_gendisk(disk);
3309        if (base) {
3310                atomic_dec(&base->block->open_count);
3311                module_put(base->discipline->owner);
3312                dasd_put_device(base);
3313        }
3314}
3315
3316/*
3317 * Return disk geometry.
3318 */
3319static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3320{
3321        struct dasd_device *base;
3322
3323        base = dasd_device_from_gendisk(bdev->bd_disk);
3324        if (!base)
3325                return -ENODEV;
3326
3327        if (!base->discipline ||
3328            !base->discipline->fill_geometry) {
3329                dasd_put_device(base);
3330                return -EINVAL;
3331        }
3332        base->discipline->fill_geometry(base->block, geo);
3333        geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3334        dasd_put_device(base);
3335        return 0;
3336}
3337
3338const struct block_device_operations
3339dasd_device_operations = {
3340        .owner          = THIS_MODULE,
3341        .open           = dasd_open,
3342        .release        = dasd_release,
3343        .ioctl          = dasd_ioctl,
3344        .compat_ioctl   = dasd_ioctl,
3345        .getgeo         = dasd_getgeo,
3346};
3347
3348/*******************************************************************************
3349 * end of block device operations
3350 */
3351
3352static void
3353dasd_exit(void)
3354{
3355#ifdef CONFIG_PROC_FS
3356        dasd_proc_exit();
3357#endif
3358        dasd_eer_exit();
3359        if (dasd_page_cache != NULL) {
3360                kmem_cache_destroy(dasd_page_cache);
3361                dasd_page_cache = NULL;
3362        }
3363        dasd_gendisk_exit();
3364        dasd_devmap_exit();
3365        if (dasd_debug_area != NULL) {
3366                debug_unregister(dasd_debug_area);
3367                dasd_debug_area = NULL;
3368        }
3369        dasd_statistics_removeroot();
3370}
3371
3372/*
3373 * SECTION: common functions for ccw_driver use
3374 */
3375
3376/*
3377 * Is the device read-only?
3378 * Note that this function does not report the setting of the
3379 * readonly device attribute, but how it is configured in z/VM.
3380 */
3381int dasd_device_is_ro(struct dasd_device *device)
3382{
3383        struct ccw_dev_id dev_id;
3384        struct diag210 diag_data;
3385        int rc;
3386
3387        if (!MACHINE_IS_VM)
3388                return 0;
3389        ccw_device_get_id(device->cdev, &dev_id);
3390        memset(&diag_data, 0, sizeof(diag_data));
3391        diag_data.vrdcdvno = dev_id.devno;
3392        diag_data.vrdclen = sizeof(diag_data);
3393        rc = diag210(&diag_data);
3394        if (rc == 0 || rc == 2) {
3395                return diag_data.vrdcvfla & 0x80;
3396        } else {
3397                DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3398                          dev_id.devno, rc);
3399                return 0;
3400        }
3401}
3402EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3403
3404static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3405{
3406        struct ccw_device *cdev = data;
3407        int ret;
3408
3409        ret = ccw_device_set_online(cdev);
3410        if (ret)
3411                pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3412                        dev_name(&cdev->dev), ret);
3413}
3414
3415/*
3416 * Initial attempt at a probe function. this can be simplified once
3417 * the other detection code is gone.
3418 */
3419int dasd_generic_probe(struct ccw_device *cdev,
3420                       struct dasd_discipline *discipline)
3421{
3422        int ret;
3423
3424        ret = dasd_add_sysfs_files(cdev);
3425        if (ret) {
3426                DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
3427                                "dasd_generic_probe: could not add "
3428                                "sysfs entries");
3429                return ret;
3430        }
3431        cdev->handler = &dasd_int_handler;
3432
3433        /*
3434         * Automatically online either all dasd devices (dasd_autodetect)
3435         * or all devices specified with dasd= parameters during
3436         * initial probe.
3437         */
3438        if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3439            (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3440                async_schedule(dasd_generic_auto_online, cdev);
3441        return 0;
3442}
3443EXPORT_SYMBOL_GPL(dasd_generic_probe);
3444
3445void dasd_generic_free_discipline(struct dasd_device *device)
3446{
3447        /* Forget the discipline information. */
3448        if (device->discipline) {
3449                if (device->discipline->uncheck_device)
3450                        device->discipline->uncheck_device(device);
3451                module_put(device->discipline->owner);
3452                device->discipline = NULL;
3453        }
3454        if (device->base_discipline) {
3455                module_put(device->base_discipline->owner);
3456                device->base_discipline = NULL;
3457        }
3458}
3459EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3460
3461/*
3462 * This will one day be called from a global not_oper handler.
3463 * It is also used by driver_unregister during module unload.
3464 */
3465void dasd_generic_remove(struct ccw_device *cdev)
3466{
3467        struct dasd_device *device;
3468        struct dasd_block *block;
3469
3470        cdev->handler = NULL;
3471
3472        device = dasd_device_from_cdev(cdev);
3473        if (IS_ERR(device)) {
3474                dasd_remove_sysfs_files(cdev);
3475                return;
3476        }
3477        if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3478            !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3479                /* Already doing offline processing */
3480                dasd_put_device(device);
3481                dasd_remove_sysfs_files(cdev);
3482                return;
3483        }
3484        /*
3485         * This device is removed unconditionally. Set offline
3486         * flag to prevent dasd_open from opening it while it is
3487         * no quite down yet.
3488         */
3489        dasd_set_target_state(device, DASD_STATE_NEW);
3490        /* dasd_delete_device destroys the device reference. */
3491        block = device->block;
3492        dasd_delete_device(device);
3493        /*
3494         * life cycle of block is bound to device, so delete it after
3495         * device was safely removed
3496         */
3497        if (block)
3498                dasd_free_block(block);
3499
3500        dasd_remove_sysfs_files(cdev);
3501}
3502EXPORT_SYMBOL_GPL(dasd_generic_remove);
3503
3504/*
3505 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3506 * the device is detected for the first time and is supposed to be used
3507 * or the user has started activation through sysfs.
3508 */
3509int dasd_generic_set_online(struct ccw_device *cdev,
3510                            struct dasd_discipline *base_discipline)
3511{
3512        struct dasd_discipline *discipline;
3513        struct dasd_device *device;
3514        int rc;
3515
3516        /* first online clears initial online feature flag */
3517        dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3518        device = dasd_create_device(cdev);
3519        if (IS_ERR(device))
3520                return PTR_ERR(device);
3521
3522        discipline = base_discipline;
3523        if (device->features & DASD_FEATURE_USEDIAG) {
3524                if (!dasd_diag_discipline_pointer) {
3525                        /* Try to load the required module. */
3526                        rc = request_module(DASD_DIAG_MOD);
3527                        if (rc) {
3528                                pr_warn("%s Setting the DASD online failed "
3529                                        "because the required module %s "
3530                                        "could not be loaded (rc=%d)\n",
3531                                        dev_name(&cdev->dev), DASD_DIAG_MOD,
3532                                        rc);
3533                                dasd_delete_device(device);
3534                                return -ENODEV;
3535                        }
3536                }
3537                /* Module init could have failed, so check again here after
3538                 * request_module(). */
3539                if (!dasd_diag_discipline_pointer) {
3540                        pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3541                                dev_name(&cdev->dev));
3542                        dasd_delete_device(device);
3543                        return -ENODEV;
3544                }
3545                discipline = dasd_diag_discipline_pointer;
3546        }
3547        if (!try_module_get(base_discipline->owner)) {
3548                dasd_delete_device(device);
3549                return -EINVAL;
3550        }
3551        if (!try_module_get(discipline->owner)) {
3552                module_put(base_discipline->owner);
3553                dasd_delete_device(device);
3554                return -EINVAL;
3555        }
3556        device->base_discipline = base_discipline;
3557        device->discipline = discipline;
3558
3559        /* check_device will allocate block device if necessary */
3560        rc = discipline->check_device(device);
3561        if (rc) {
3562                pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3563                        dev_name(&cdev->dev), discipline->name, rc);
3564                module_put(discipline->owner);
3565                module_put(base_discipline->owner);
3566                dasd_delete_device(device);
3567                return rc;
3568        }
3569
3570        dasd_set_target_state(device, DASD_STATE_ONLINE);
3571        if (device->state <= DASD_STATE_KNOWN) {
3572                pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3573                        dev_name(&cdev->dev));
3574                rc = -ENODEV;
3575                dasd_set_target_state(device, DASD_STATE_NEW);
3576                if (device->block)
3577                        dasd_free_block(device->block);
3578                dasd_delete_device(device);
3579        } else
3580                pr_debug("dasd_generic device %s found\n",
3581                                dev_name(&cdev->dev));
3582
3583        wait_event(dasd_init_waitq, _wait_for_device(device));
3584
3585        dasd_put_device(device);
3586        return rc;
3587}
3588EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3589
3590int dasd_generic_set_offline(struct ccw_device *cdev)
3591{
3592        struct dasd_device *device;
3593        struct dasd_block *block;
3594        int max_count, open_count, rc;
3595        unsigned long flags;
3596
3597        rc = 0;
3598        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3599        device = dasd_device_from_cdev_locked(cdev);
3600        if (IS_ERR(device)) {
3601                spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3602                return PTR_ERR(device);
3603        }
3604
3605        /*
3606         * We must make sure that this device is currently not in use.
3607         * The open_count is increased for every opener, that includes
3608         * the blkdev_get in dasd_scan_partitions. We are only interested
3609         * in the other openers.
3610         */
3611        if (device->block) {
3612                max_count = device->block->bdev ? 0 : -1;
3613                open_count = atomic_read(&device->block->open_count);
3614                if (open_count > max_count) {
3615                        if (open_count > 0)
3616                                pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3617                                        dev_name(&cdev->dev), open_count);
3618                        else
3619                                pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3620                                        dev_name(&cdev->dev));
3621                        rc = -EBUSY;
3622                        goto out_err;
3623                }
3624        }
3625
3626        /*
3627         * Test if the offline processing is already running and exit if so.
3628         * If a safe offline is being processed this could only be a normal
3629         * offline that should be able to overtake the safe offline and
3630         * cancel any I/O we do not want to wait for any longer
3631         */
3632        if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3633                if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3634                        clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3635                                  &device->flags);
3636                } else {
3637                        rc = -EBUSY;
3638                        goto out_err;
3639                }
3640        }
3641        set_bit(DASD_FLAG_OFFLINE, &device->flags);
3642
3643        /*
3644         * if safe_offline is called set safe_offline_running flag and
3645         * clear safe_offline so that a call to normal offline
3646         * can overrun safe_offline processing
3647         */
3648        if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3649            !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3650                /* need to unlock here to wait for outstanding I/O */
3651                spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3652                /*
3653                 * If we want to set the device safe offline all IO operations
3654                 * should be finished before continuing the offline process
3655                 * so sync bdev first and then wait for our queues to become
3656                 * empty
3657                 */
3658                if (device->block) {
3659                        rc = fsync_bdev(device->block->bdev);
3660                        if (rc != 0)
3661                                goto interrupted;
3662                }
3663                dasd_schedule_device_bh(device);
3664                rc = wait_event_interruptible(shutdown_waitq,
3665                                              _wait_for_empty_queues(device));
3666                if (rc != 0)
3667                        goto interrupted;
3668
3669                /*
3670                 * check if a normal offline process overtook the offline
3671                 * processing in this case simply do nothing beside returning
3672                 * that we got interrupted
3673                 * otherwise mark safe offline as not running any longer and
3674                 * continue with normal offline
3675                 */
3676                spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3677                if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3678                        rc = -ERESTARTSYS;
3679                        goto out_err;
3680                }
3681                clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3682        }
3683        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3684
3685        dasd_set_target_state(device, DASD_STATE_NEW);
3686        /* dasd_delete_device destroys the device reference. */
3687        block = device->block;
3688        dasd_delete_device(device);
3689        /*
3690         * life cycle of block is bound to device, so delete it after
3691         * device was safely removed
3692         */
3693        if (block)
3694                dasd_free_block(block);
3695
3696        return 0;
3697
3698interrupted:
3699        /* interrupted by signal */
3700        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3701        clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3702        clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3703out_err:
3704        dasd_put_device(device);
3705        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3706        return rc;
3707}
3708EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3709
3710int dasd_generic_last_path_gone(struct dasd_device *device)
3711{
3712        struct dasd_ccw_req *cqr;
3713
3714        dev_warn(&device->cdev->dev, "No operational channel path is left "
3715                 "for the device\n");
3716        DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3717        /* First of all call extended error reporting. */
3718        dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3719
3720        if (device->state < DASD_STATE_BASIC)
3721                return 0;
3722        /* Device is active. We want to keep it. */
3723        list_for_each_entry(cqr, &device->ccw_queue, devlist)
3724                if ((cqr->status == DASD_CQR_IN_IO) ||
3725                    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3726                        cqr->status = DASD_CQR_QUEUED;
3727                        cqr->retries++;
3728                }
3729        dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3730        dasd_device_clear_timer(device);
3731        dasd_schedule_device_bh(device);
3732        return 1;
3733}
3734EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3735
3736int dasd_generic_path_operational(struct dasd_device *device)
3737{
3738        dev_info(&device->cdev->dev, "A channel path to the device has become "
3739                 "operational\n");
3740        DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3741        dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3742        if (device->stopped & DASD_UNRESUMED_PM) {
3743                dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
3744                dasd_restore_device(device);
3745                return 1;
3746        }
3747        dasd_schedule_device_bh(device);
3748        if (device->block) {
3749                dasd_schedule_block_bh(device->block);
3750                if (device->block->request_queue)
3751                        blk_mq_run_hw_queues(device->block->request_queue,
3752                                             true);
3753                }
3754
3755        if (!device->stopped)
3756                wake_up(&generic_waitq);
3757
3758        return 1;
3759}
3760EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3761
3762int dasd_generic_notify(struct ccw_device *cdev, int event)
3763{
3764        struct dasd_device *device;
3765        int ret;
3766
3767        device = dasd_device_from_cdev_locked(cdev);
3768        if (IS_ERR(device))
3769                return 0;
3770        ret = 0;
3771        switch (event) {
3772        case CIO_GONE:
3773        case CIO_BOXED:
3774        case CIO_NO_PATH:
3775                dasd_path_no_path(device);
3776                ret = dasd_generic_last_path_gone(device);
3777                break;
3778        case CIO_OPER:
3779                ret = 1;
3780                if (dasd_path_get_opm(device))
3781                        ret = dasd_generic_path_operational(device);
3782                break;
3783        }
3784        dasd_put_device(device);
3785        return ret;
3786}
3787EXPORT_SYMBOL_GPL(dasd_generic_notify);
3788
3789void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3790{
3791        struct dasd_device *device;
3792        int chp, oldopm, hpfpm, ifccpm;
3793
3794        device = dasd_device_from_cdev_locked(cdev);
3795        if (IS_ERR(device))
3796                return;
3797
3798        oldopm = dasd_path_get_opm(device);
3799        for (chp = 0; chp < 8; chp++) {
3800                if (path_event[chp] & PE_PATH_GONE) {
3801                        dasd_path_notoper(device, chp);
3802                }
3803                if (path_event[chp] & PE_PATH_AVAILABLE) {
3804                        dasd_path_available(device, chp);
3805                        dasd_schedule_device_bh(device);
3806                }
3807                if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3808                        if (!dasd_path_is_operational(device, chp) &&
3809                            !dasd_path_need_verify(device, chp)) {
3810                                /*
3811                                 * we can not establish a pathgroup on an
3812                                 * unavailable path, so trigger a path
3813                                 * verification first
3814                                 */
3815                        dasd_path_available(device, chp);
3816                        dasd_schedule_device_bh(device);
3817                        }
3818                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3819                                      "Pathgroup re-established\n");
3820                        if (device->discipline->kick_validate)
3821                                device->discipline->kick_validate(device);
3822                }
3823        }
3824        hpfpm = dasd_path_get_hpfpm(device);
3825        ifccpm = dasd_path_get_ifccpm(device);
3826        if (!dasd_path_get_opm(device) && hpfpm) {
3827                /*
3828                 * device has no operational paths but at least one path is
3829                 * disabled due to HPF errors
3830                 * disable HPF at all and use the path(s) again
3831                 */
3832                if (device->discipline->disable_hpf)
3833                        device->discipline->disable_hpf(device);
3834                dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3835                dasd_path_set_tbvpm(device, hpfpm);
3836                dasd_schedule_device_bh(device);
3837                dasd_schedule_requeue(device);
3838        } else if (!dasd_path_get_opm(device) && ifccpm) {
3839                /*
3840                 * device has no operational paths but at least one path is
3841                 * disabled due to IFCC errors
3842                 * trigger path verification on paths with IFCC errors
3843                 */
3844                dasd_path_set_tbvpm(device, ifccpm);
3845                dasd_schedule_device_bh(device);
3846        }
3847        if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3848                dev_warn(&device->cdev->dev,
3849                         "No verified channel paths remain for the device\n");
3850                DBF_DEV_EVENT(DBF_WARNING, device,
3851                              "%s", "last verified path gone");
3852                dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3853                dasd_device_set_stop_bits(device,
3854                                          DASD_STOPPED_DC_WAIT);
3855        }
3856        dasd_put_device(device);
3857}
3858EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3859
3860int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3861{
3862        if (!dasd_path_get_opm(device) && lpm) {
3863                dasd_path_set_opm(device, lpm);
3864                dasd_generic_path_operational(device);
3865        } else
3866                dasd_path_add_opm(device, lpm);
3867        return 0;
3868}
3869EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3870
3871/*
3872 * clear active requests and requeue them to block layer if possible
3873 */
3874static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3875{
3876        struct list_head requeue_queue;
3877        struct dasd_ccw_req *cqr, *n;
3878        struct dasd_ccw_req *refers;
3879        int rc;
3880
3881        INIT_LIST_HEAD(&requeue_queue);
3882        spin_lock_irq(get_ccwdev_lock(device->cdev));
3883        rc = 0;
3884        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3885                /* Check status and move request to flush_queue */
3886                if (cqr->status == DASD_CQR_IN_IO) {
3887                        rc = device->discipline->term_IO(cqr);
3888                        if (rc) {
3889                                /* unable to terminate requeust */
3890                                dev_err(&device->cdev->dev,
3891                                        "Unable to terminate request %p "
3892                                        "on suspend\n", cqr);
3893                                spin_unlock_irq(get_ccwdev_lock(device->cdev));
3894                                dasd_put_device(device);
3895                                return rc;
3896                        }
3897                }
3898                list_move_tail(&cqr->devlist, &requeue_queue);
3899        }
3900        spin_unlock_irq(get_ccwdev_lock(device->cdev));
3901
3902        list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3903                wait_event(dasd_flush_wq,
3904                           (cqr->status != DASD_CQR_CLEAR_PENDING));
3905
3906                /*
3907                 * requeue requests to blocklayer will only work
3908                 * for block device requests
3909                 */
3910                if (_dasd_requeue_request(cqr))
3911                        continue;
3912
3913                /* remove requests from device and block queue */
3914                list_del_init(&cqr->devlist);
3915                while (cqr->refers != NULL) {
3916                        refers = cqr->refers;
3917                        /* remove the request from the block queue */
3918                        list_del(&cqr->blocklist);
3919                        /* free the finished erp request */
3920                        dasd_free_erp_request(cqr, cqr->memdev);
3921                        cqr = refers;
3922                }
3923
3924                /*
3925                 * _dasd_requeue_request already checked for a valid
3926                 * blockdevice, no need to check again
3927                 * all erp requests (cqr->refers) have a cqr->block
3928                 * pointer copy from the original cqr
3929                 */
3930                list_del_init(&cqr->blocklist);
3931                cqr->block->base->discipline->free_cp(
3932                        cqr, (struct request *) cqr->callback_data);
3933        }
3934
3935        /*
3936         * if requests remain then they are internal request
3937         * and go back to the device queue
3938         */
3939        if (!list_empty(&requeue_queue)) {
3940                /* move freeze_queue to start of the ccw_queue */
3941                spin_lock_irq(get_ccwdev_lock(device->cdev));
3942                list_splice_tail(&requeue_queue, &device->ccw_queue);
3943                spin_unlock_irq(get_ccwdev_lock(device->cdev));
3944        }
3945        dasd_schedule_device_bh(device);
3946        return rc;
3947}
3948
3949static void do_requeue_requests(struct work_struct *work)
3950{
3951        struct dasd_device *device = container_of(work, struct dasd_device,
3952                                                  requeue_requests);
3953        dasd_generic_requeue_all_requests(device);
3954        dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3955        if (device->block)
3956                dasd_schedule_block_bh(device->block);
3957        dasd_put_device(device);
3958}
3959
3960void dasd_schedule_requeue(struct dasd_device *device)
3961{
3962        dasd_get_device(device);
3963        /* queue call to dasd_reload_device to the kernel event daemon. */
3964        if (!schedule_work(&device->requeue_requests))
3965                dasd_put_device(device);
3966}
3967EXPORT_SYMBOL(dasd_schedule_requeue);
3968
3969int dasd_generic_pm_freeze(struct ccw_device *cdev)
3970{
3971        struct dasd_device *device = dasd_device_from_cdev(cdev);
3972
3973        if (IS_ERR(device))
3974                return PTR_ERR(device);
3975
3976        /* mark device as suspended */
3977        set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3978
3979        if (device->discipline->freeze)
3980                device->discipline->freeze(device);
3981
3982        /* disallow new I/O  */
3983        dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
3984
3985        return dasd_generic_requeue_all_requests(device);
3986}
3987EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
3988
3989int dasd_generic_restore_device(struct ccw_device *cdev)
3990{
3991        struct dasd_device *device = dasd_device_from_cdev(cdev);
3992        int rc = 0;
3993
3994        if (IS_ERR(device))
3995                return PTR_ERR(device);
3996
3997        /* allow new IO again */
3998        dasd_device_remove_stop_bits(device,
3999                                     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
4000
4001        dasd_schedule_device_bh(device);
4002
4003        /*
4004         * call discipline restore function
4005         * if device is stopped do nothing e.g. for disconnected devices
4006         */
4007        if (device->discipline->restore && !(device->stopped))
4008                rc = device->discipline->restore(device);
4009        if (rc || device->stopped)
4010                /*
4011                 * if the resume failed for the DASD we put it in
4012                 * an UNRESUMED stop state
4013                 */
4014                device->stopped |= DASD_UNRESUMED_PM;
4015
4016        if (device->block) {
4017                dasd_schedule_block_bh(device->block);
4018                if (device->block->request_queue)
4019                        blk_mq_run_hw_queues(device->block->request_queue,
4020                                             true);
4021        }
4022
4023        clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
4024        dasd_put_device(device);
4025        return 0;
4026}
4027EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
4028
4029static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
4030                                                   void *rdc_buffer,
4031                                                   int rdc_buffer_size,
4032                                                   int magic)
4033{
4034        struct dasd_ccw_req *cqr;
4035        struct ccw1 *ccw;
4036        unsigned long *idaw;
4037
4038        cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
4039
4040        if (IS_ERR(cqr)) {
4041                /* internal error 13 - Allocating the RDC request failed*/
4042                dev_err(&device->cdev->dev,
4043                         "An error occurred in the DASD device driver, "
4044                         "reason=%s\n", "13");
4045                return cqr;
4046        }
4047
4048        ccw = cqr->cpaddr;
4049        ccw->cmd_code = CCW_CMD_RDC;
4050        if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
4051                idaw = (unsigned long *) (cqr->data);
4052                ccw->cda = (__u32)(addr_t) idaw;
4053                ccw->flags = CCW_FLAG_IDA;
4054                idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
4055        } else {
4056                ccw->cda = (__u32)(addr_t) rdc_buffer;
4057                ccw->flags = 0;
4058        }
4059
4060        ccw->count = rdc_buffer_size;
4061        cqr->startdev = device;
4062        cqr->memdev = device;
4063        cqr->expires = 10*HZ;
4064        cqr->retries = 256;
4065        cqr->buildclk = get_tod_clock();
4066        cqr->status = DASD_CQR_FILLED;
4067        return cqr;
4068}
4069
4070
4071int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4072                                void *rdc_buffer, int rdc_buffer_size)
4073{
4074        int ret;
4075        struct dasd_ccw_req *cqr;
4076
4077        cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
4078                                     magic);
4079        if (IS_ERR(cqr))
4080                return PTR_ERR(cqr);
4081
4082        ret = dasd_sleep_on(cqr);
4083        dasd_sfree_request(cqr, cqr->memdev);
4084        return ret;
4085}
4086EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4087
4088/*
4089 *   In command mode and transport mode we need to look for sense
4090 *   data in different places. The sense data itself is allways
4091 *   an array of 32 bytes, so we can unify the sense data access
4092 *   for both modes.
4093 */
4094char *dasd_get_sense(struct irb *irb)
4095{
4096        struct tsb *tsb = NULL;
4097        char *sense = NULL;
4098
4099        if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4100                if (irb->scsw.tm.tcw)
4101                        tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4102                                          irb->scsw.tm.tcw);
4103                if (tsb && tsb->length == 64 && tsb->flags)
4104                        switch (tsb->flags & 0x07) {
4105                        case 1: /* tsa_iostat */
4106                                sense = tsb->tsa.iostat.sense;
4107                                break;
4108                        case 2: /* tsa_ddpc */
4109                                sense = tsb->tsa.ddpc.sense;
4110                                break;
4111                        default:
4112                                /* currently we don't use interrogate data */
4113                                break;
4114                        }
4115        } else if (irb->esw.esw0.erw.cons) {
4116                sense = irb->ecw;
4117        }
4118        return sense;
4119}
4120EXPORT_SYMBOL_GPL(dasd_get_sense);
4121
4122void dasd_generic_shutdown(struct ccw_device *cdev)
4123{
4124        struct dasd_device *device;
4125
4126        device = dasd_device_from_cdev(cdev);
4127        if (IS_ERR(device))
4128                return;
4129
4130        if (device->block)
4131                dasd_schedule_block_bh(device->block);
4132
4133        dasd_schedule_device_bh(device);
4134
4135        wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4136}
4137EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4138
4139static int __init dasd_init(void)
4140{
4141        int rc;
4142
4143        init_waitqueue_head(&dasd_init_waitq);
4144        init_waitqueue_head(&dasd_flush_wq);
4145        init_waitqueue_head(&generic_waitq);
4146        init_waitqueue_head(&shutdown_waitq);
4147
4148        /* register 'common' DASD debug area, used for all DBF_XXX calls */
4149        dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4150        if (dasd_debug_area == NULL) {
4151                rc = -ENOMEM;
4152                goto failed;
4153        }
4154        debug_register_view(dasd_debug_area, &debug_sprintf_view);
4155        debug_set_level(dasd_debug_area, DBF_WARNING);
4156
4157        DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4158
4159        dasd_diag_discipline_pointer = NULL;
4160
4161        dasd_statistics_createroot();
4162
4163        rc = dasd_devmap_init();
4164        if (rc)
4165                goto failed;
4166        rc = dasd_gendisk_init();
4167        if (rc)
4168                goto failed;
4169        rc = dasd_parse();
4170        if (rc)
4171                goto failed;
4172        rc = dasd_eer_init();
4173        if (rc)
4174                goto failed;
4175#ifdef CONFIG_PROC_FS
4176        rc = dasd_proc_init();
4177        if (rc)
4178                goto failed;
4179#endif
4180
4181        return 0;
4182failed:
4183        pr_info("The DASD device driver could not be initialized\n");
4184        dasd_exit();
4185        return rc;
4186}
4187
4188module_init(dasd_init);
4189module_exit(dasd_exit);
4190