linux/drivers/s390/block/dasd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
   4 *                  Horst Hummel <Horst.Hummel@de.ibm.com>
   5 *                  Carsten Otte <Cotte@de.ibm.com>
   6 *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 * Bugreports.to..: <Linux390@de.ibm.com>
   8 * Copyright IBM Corp. 1999, 2009
   9 */
  10
  11#define KMSG_COMPONENT "dasd"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/kmod.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/ctype.h>
  18#include <linux/major.h>
  19#include <linux/slab.h>
  20#include <linux/hdreg.h>
  21#include <linux/async.h>
  22#include <linux/mutex.h>
  23#include <linux/debugfs.h>
  24#include <linux/seq_file.h>
  25#include <linux/vmalloc.h>
  26
  27#include <asm/ccwdev.h>
  28#include <asm/ebcdic.h>
  29#include <asm/idals.h>
  30#include <asm/itcw.h>
  31#include <asm/diag.h>
  32
  33/* This is ugly... */
  34#define PRINTK_HEADER "dasd:"
  35
  36#include "dasd_int.h"
  37/*
  38 * SECTION: Constant definitions to be used within this file
  39 */
  40#define DASD_CHANQ_MAX_SIZE 4
  41
  42#define DASD_DIAG_MOD           "dasd_diag_mod"
  43
  44static unsigned int queue_depth = 32;
  45static unsigned int nr_hw_queues = 4;
  46
  47module_param(queue_depth, uint, 0444);
  48MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
  49
  50module_param(nr_hw_queues, uint, 0444);
  51MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
  52
  53/*
  54 * SECTION: exported variables of dasd.c
  55 */
  56debug_info_t *dasd_debug_area;
  57EXPORT_SYMBOL(dasd_debug_area);
  58static struct dentry *dasd_debugfs_root_entry;
  59struct dasd_discipline *dasd_diag_discipline_pointer;
  60EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  61void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  62
  63MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  64MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  65                   " Copyright IBM Corp. 2000");
  66MODULE_SUPPORTED_DEVICE("dasd");
  67MODULE_LICENSE("GPL");
  68
  69/*
  70 * SECTION: prototypes for static functions of dasd.c
  71 */
  72static int  dasd_alloc_queue(struct dasd_block *);
  73static void dasd_setup_queue(struct dasd_block *);
  74static void dasd_free_queue(struct dasd_block *);
  75static int dasd_flush_block_queue(struct dasd_block *);
  76static void dasd_device_tasklet(unsigned long);
  77static void dasd_block_tasklet(unsigned long);
  78static void do_kick_device(struct work_struct *);
  79static void do_restore_device(struct work_struct *);
  80static void do_reload_device(struct work_struct *);
  81static void do_requeue_requests(struct work_struct *);
  82static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
  83static void dasd_device_timeout(struct timer_list *);
  84static void dasd_block_timeout(struct timer_list *);
  85static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
  86static void dasd_profile_init(struct dasd_profile *, struct dentry *);
  87static void dasd_profile_exit(struct dasd_profile *);
  88static void dasd_hosts_init(struct dentry *, struct dasd_device *);
  89static void dasd_hosts_exit(struct dasd_device *);
  90
  91/*
  92 * SECTION: Operations on the device structure.
  93 */
  94static wait_queue_head_t dasd_init_waitq;
  95static wait_queue_head_t dasd_flush_wq;
  96static wait_queue_head_t generic_waitq;
  97static wait_queue_head_t shutdown_waitq;
  98
  99/*
 100 * Allocate memory for a new device structure.
 101 */
 102struct dasd_device *dasd_alloc_device(void)
 103{
 104        struct dasd_device *device;
 105
 106        device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
 107        if (!device)
 108                return ERR_PTR(-ENOMEM);
 109
 110        /* Get two pages for normal block device operations. */
 111        device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
 112        if (!device->ccw_mem) {
 113                kfree(device);
 114                return ERR_PTR(-ENOMEM);
 115        }
 116        /* Get one page for error recovery. */
 117        device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
 118        if (!device->erp_mem) {
 119                free_pages((unsigned long) device->ccw_mem, 1);
 120                kfree(device);
 121                return ERR_PTR(-ENOMEM);
 122        }
 123
 124        dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
 125        dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
 126        spin_lock_init(&device->mem_lock);
 127        atomic_set(&device->tasklet_scheduled, 0);
 128        tasklet_init(&device->tasklet, dasd_device_tasklet,
 129                     (unsigned long) device);
 130        INIT_LIST_HEAD(&device->ccw_queue);
 131        timer_setup(&device->timer, dasd_device_timeout, 0);
 132        INIT_WORK(&device->kick_work, do_kick_device);
 133        INIT_WORK(&device->restore_device, do_restore_device);
 134        INIT_WORK(&device->reload_device, do_reload_device);
 135        INIT_WORK(&device->requeue_requests, do_requeue_requests);
 136        device->state = DASD_STATE_NEW;
 137        device->target = DASD_STATE_NEW;
 138        mutex_init(&device->state_mutex);
 139        spin_lock_init(&device->profile.lock);
 140        return device;
 141}
 142
 143/*
 144 * Free memory of a device structure.
 145 */
 146void dasd_free_device(struct dasd_device *device)
 147{
 148        kfree(device->private);
 149        free_page((unsigned long) device->erp_mem);
 150        free_pages((unsigned long) device->ccw_mem, 1);
 151        kfree(device);
 152}
 153
 154/*
 155 * Allocate memory for a new device structure.
 156 */
 157struct dasd_block *dasd_alloc_block(void)
 158{
 159        struct dasd_block *block;
 160
 161        block = kzalloc(sizeof(*block), GFP_ATOMIC);
 162        if (!block)
 163                return ERR_PTR(-ENOMEM);
 164        /* open_count = 0 means device online but not in use */
 165        atomic_set(&block->open_count, -1);
 166
 167        atomic_set(&block->tasklet_scheduled, 0);
 168        tasklet_init(&block->tasklet, dasd_block_tasklet,
 169                     (unsigned long) block);
 170        INIT_LIST_HEAD(&block->ccw_queue);
 171        spin_lock_init(&block->queue_lock);
 172        timer_setup(&block->timer, dasd_block_timeout, 0);
 173        spin_lock_init(&block->profile.lock);
 174
 175        return block;
 176}
 177EXPORT_SYMBOL_GPL(dasd_alloc_block);
 178
 179/*
 180 * Free memory of a device structure.
 181 */
 182void dasd_free_block(struct dasd_block *block)
 183{
 184        kfree(block);
 185}
 186EXPORT_SYMBOL_GPL(dasd_free_block);
 187
 188/*
 189 * Make a new device known to the system.
 190 */
 191static int dasd_state_new_to_known(struct dasd_device *device)
 192{
 193        int rc;
 194
 195        /*
 196         * As long as the device is not in state DASD_STATE_NEW we want to
 197         * keep the reference count > 0.
 198         */
 199        dasd_get_device(device);
 200
 201        if (device->block) {
 202                rc = dasd_alloc_queue(device->block);
 203                if (rc) {
 204                        dasd_put_device(device);
 205                        return rc;
 206                }
 207        }
 208        device->state = DASD_STATE_KNOWN;
 209        return 0;
 210}
 211
 212/*
 213 * Let the system forget about a device.
 214 */
 215static int dasd_state_known_to_new(struct dasd_device *device)
 216{
 217        /* Disable extended error reporting for this device. */
 218        dasd_eer_disable(device);
 219        device->state = DASD_STATE_NEW;
 220
 221        if (device->block)
 222                dasd_free_queue(device->block);
 223
 224        /* Give up reference we took in dasd_state_new_to_known. */
 225        dasd_put_device(device);
 226        return 0;
 227}
 228
 229static struct dentry *dasd_debugfs_setup(const char *name,
 230                                         struct dentry *base_dentry)
 231{
 232        struct dentry *pde;
 233
 234        if (!base_dentry)
 235                return NULL;
 236        pde = debugfs_create_dir(name, base_dentry);
 237        if (!pde || IS_ERR(pde))
 238                return NULL;
 239        return pde;
 240}
 241
 242/*
 243 * Request the irq line for the device.
 244 */
 245static int dasd_state_known_to_basic(struct dasd_device *device)
 246{
 247        struct dasd_block *block = device->block;
 248        int rc = 0;
 249
 250        /* Allocate and register gendisk structure. */
 251        if (block) {
 252                rc = dasd_gendisk_alloc(block);
 253                if (rc)
 254                        return rc;
 255                block->debugfs_dentry =
 256                        dasd_debugfs_setup(block->gdp->disk_name,
 257                                           dasd_debugfs_root_entry);
 258                dasd_profile_init(&block->profile, block->debugfs_dentry);
 259                if (dasd_global_profile_level == DASD_PROFILE_ON)
 260                        dasd_profile_on(&device->block->profile);
 261        }
 262        device->debugfs_dentry =
 263                dasd_debugfs_setup(dev_name(&device->cdev->dev),
 264                                   dasd_debugfs_root_entry);
 265        dasd_profile_init(&device->profile, device->debugfs_dentry);
 266        dasd_hosts_init(device->debugfs_dentry, device);
 267
 268        /* register 'device' debug area, used for all DBF_DEV_XXX calls */
 269        device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
 270                                            8 * sizeof(long));
 271        debug_register_view(device->debug_area, &debug_sprintf_view);
 272        debug_set_level(device->debug_area, DBF_WARNING);
 273        DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
 274
 275        device->state = DASD_STATE_BASIC;
 276
 277        return rc;
 278}
 279
 280/*
 281 * Release the irq line for the device. Terminate any running i/o.
 282 */
 283static int dasd_state_basic_to_known(struct dasd_device *device)
 284{
 285        int rc;
 286
 287        if (device->discipline->basic_to_known) {
 288                rc = device->discipline->basic_to_known(device);
 289                if (rc)
 290                        return rc;
 291        }
 292
 293        if (device->block) {
 294                dasd_profile_exit(&device->block->profile);
 295                debugfs_remove(device->block->debugfs_dentry);
 296                dasd_gendisk_free(device->block);
 297                dasd_block_clear_timer(device->block);
 298        }
 299        rc = dasd_flush_device_queue(device);
 300        if (rc)
 301                return rc;
 302        dasd_device_clear_timer(device);
 303        dasd_profile_exit(&device->profile);
 304        dasd_hosts_exit(device);
 305        debugfs_remove(device->debugfs_dentry);
 306        DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
 307        if (device->debug_area != NULL) {
 308                debug_unregister(device->debug_area);
 309                device->debug_area = NULL;
 310        }
 311        device->state = DASD_STATE_KNOWN;
 312        return 0;
 313}
 314
 315/*
 316 * Do the initial analysis. The do_analysis function may return
 317 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
 318 * until the discipline decides to continue the startup sequence
 319 * by calling the function dasd_change_state. The eckd disciplines
 320 * uses this to start a ccw that detects the format. The completion
 321 * interrupt for this detection ccw uses the kernel event daemon to
 322 * trigger the call to dasd_change_state. All this is done in the
 323 * discipline code, see dasd_eckd.c.
 324 * After the analysis ccw is done (do_analysis returned 0) the block
 325 * device is setup.
 326 * In case the analysis returns an error, the device setup is stopped
 327 * (a fake disk was already added to allow formatting).
 328 */
 329static int dasd_state_basic_to_ready(struct dasd_device *device)
 330{
 331        int rc;
 332        struct dasd_block *block;
 333        struct gendisk *disk;
 334
 335        rc = 0;
 336        block = device->block;
 337        /* make disk known with correct capacity */
 338        if (block) {
 339                if (block->base->discipline->do_analysis != NULL)
 340                        rc = block->base->discipline->do_analysis(block);
 341                if (rc) {
 342                        if (rc != -EAGAIN) {
 343                                device->state = DASD_STATE_UNFMT;
 344                                disk = device->block->gdp;
 345                                kobject_uevent(&disk_to_dev(disk)->kobj,
 346                                               KOBJ_CHANGE);
 347                                goto out;
 348                        }
 349                        return rc;
 350                }
 351                dasd_setup_queue(block);
 352                set_capacity(block->gdp,
 353                             block->blocks << block->s2b_shift);
 354                device->state = DASD_STATE_READY;
 355                rc = dasd_scan_partitions(block);
 356                if (rc) {
 357                        device->state = DASD_STATE_BASIC;
 358                        return rc;
 359                }
 360        } else {
 361                device->state = DASD_STATE_READY;
 362        }
 363out:
 364        if (device->discipline->basic_to_ready)
 365                rc = device->discipline->basic_to_ready(device);
 366        return rc;
 367}
 368
 369static inline
 370int _wait_for_empty_queues(struct dasd_device *device)
 371{
 372        if (device->block)
 373                return list_empty(&device->ccw_queue) &&
 374                        list_empty(&device->block->ccw_queue);
 375        else
 376                return list_empty(&device->ccw_queue);
 377}
 378
 379/*
 380 * Remove device from block device layer. Destroy dirty buffers.
 381 * Forget format information. Check if the target level is basic
 382 * and if it is create fake disk for formatting.
 383 */
 384static int dasd_state_ready_to_basic(struct dasd_device *device)
 385{
 386        int rc;
 387
 388        device->state = DASD_STATE_BASIC;
 389        if (device->block) {
 390                struct dasd_block *block = device->block;
 391                rc = dasd_flush_block_queue(block);
 392                if (rc) {
 393                        device->state = DASD_STATE_READY;
 394                        return rc;
 395                }
 396                dasd_destroy_partitions(block);
 397                block->blocks = 0;
 398                block->bp_block = 0;
 399                block->s2b_shift = 0;
 400        }
 401        return 0;
 402}
 403
 404/*
 405 * Back to basic.
 406 */
 407static int dasd_state_unfmt_to_basic(struct dasd_device *device)
 408{
 409        device->state = DASD_STATE_BASIC;
 410        return 0;
 411}
 412
 413/*
 414 * Make the device online and schedule the bottom half to start
 415 * the requeueing of requests from the linux request queue to the
 416 * ccw queue.
 417 */
 418static int
 419dasd_state_ready_to_online(struct dasd_device * device)
 420{
 421        struct gendisk *disk;
 422        struct disk_part_iter piter;
 423        struct hd_struct *part;
 424
 425        device->state = DASD_STATE_ONLINE;
 426        if (device->block) {
 427                dasd_schedule_block_bh(device->block);
 428                if ((device->features & DASD_FEATURE_USERAW)) {
 429                        disk = device->block->gdp;
 430                        kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
 431                        return 0;
 432                }
 433                disk = device->block->bdev->bd_disk;
 434                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 435                while ((part = disk_part_iter_next(&piter)))
 436                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 437                disk_part_iter_exit(&piter);
 438        }
 439        return 0;
 440}
 441
 442/*
 443 * Stop the requeueing of requests again.
 444 */
 445static int dasd_state_online_to_ready(struct dasd_device *device)
 446{
 447        int rc;
 448        struct gendisk *disk;
 449        struct disk_part_iter piter;
 450        struct hd_struct *part;
 451
 452        if (device->discipline->online_to_ready) {
 453                rc = device->discipline->online_to_ready(device);
 454                if (rc)
 455                        return rc;
 456        }
 457
 458        device->state = DASD_STATE_READY;
 459        if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
 460                disk = device->block->bdev->bd_disk;
 461                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 462                while ((part = disk_part_iter_next(&piter)))
 463                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 464                disk_part_iter_exit(&piter);
 465        }
 466        return 0;
 467}
 468
 469/*
 470 * Device startup state changes.
 471 */
 472static int dasd_increase_state(struct dasd_device *device)
 473{
 474        int rc;
 475
 476        rc = 0;
 477        if (device->state == DASD_STATE_NEW &&
 478            device->target >= DASD_STATE_KNOWN)
 479                rc = dasd_state_new_to_known(device);
 480
 481        if (!rc &&
 482            device->state == DASD_STATE_KNOWN &&
 483            device->target >= DASD_STATE_BASIC)
 484                rc = dasd_state_known_to_basic(device);
 485
 486        if (!rc &&
 487            device->state == DASD_STATE_BASIC &&
 488            device->target >= DASD_STATE_READY)
 489                rc = dasd_state_basic_to_ready(device);
 490
 491        if (!rc &&
 492            device->state == DASD_STATE_UNFMT &&
 493            device->target > DASD_STATE_UNFMT)
 494                rc = -EPERM;
 495
 496        if (!rc &&
 497            device->state == DASD_STATE_READY &&
 498            device->target >= DASD_STATE_ONLINE)
 499                rc = dasd_state_ready_to_online(device);
 500
 501        return rc;
 502}
 503
 504/*
 505 * Device shutdown state changes.
 506 */
 507static int dasd_decrease_state(struct dasd_device *device)
 508{
 509        int rc;
 510
 511        rc = 0;
 512        if (device->state == DASD_STATE_ONLINE &&
 513            device->target <= DASD_STATE_READY)
 514                rc = dasd_state_online_to_ready(device);
 515
 516        if (!rc &&
 517            device->state == DASD_STATE_READY &&
 518            device->target <= DASD_STATE_BASIC)
 519                rc = dasd_state_ready_to_basic(device);
 520
 521        if (!rc &&
 522            device->state == DASD_STATE_UNFMT &&
 523            device->target <= DASD_STATE_BASIC)
 524                rc = dasd_state_unfmt_to_basic(device);
 525
 526        if (!rc &&
 527            device->state == DASD_STATE_BASIC &&
 528            device->target <= DASD_STATE_KNOWN)
 529                rc = dasd_state_basic_to_known(device);
 530
 531        if (!rc &&
 532            device->state == DASD_STATE_KNOWN &&
 533            device->target <= DASD_STATE_NEW)
 534                rc = dasd_state_known_to_new(device);
 535
 536        return rc;
 537}
 538
 539/*
 540 * This is the main startup/shutdown routine.
 541 */
 542static void dasd_change_state(struct dasd_device *device)
 543{
 544        int rc;
 545
 546        if (device->state == device->target)
 547                /* Already where we want to go today... */
 548                return;
 549        if (device->state < device->target)
 550                rc = dasd_increase_state(device);
 551        else
 552                rc = dasd_decrease_state(device);
 553        if (rc == -EAGAIN)
 554                return;
 555        if (rc)
 556                device->target = device->state;
 557
 558        /* let user-space know that the device status changed */
 559        kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
 560
 561        if (device->state == device->target)
 562                wake_up(&dasd_init_waitq);
 563}
 564
 565/*
 566 * Kick starter for devices that did not complete the startup/shutdown
 567 * procedure or were sleeping because of a pending state.
 568 * dasd_kick_device will schedule a call do do_kick_device to the kernel
 569 * event daemon.
 570 */
 571static void do_kick_device(struct work_struct *work)
 572{
 573        struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
 574        mutex_lock(&device->state_mutex);
 575        dasd_change_state(device);
 576        mutex_unlock(&device->state_mutex);
 577        dasd_schedule_device_bh(device);
 578        dasd_put_device(device);
 579}
 580
 581void dasd_kick_device(struct dasd_device *device)
 582{
 583        dasd_get_device(device);
 584        /* queue call to dasd_kick_device to the kernel event daemon. */
 585        if (!schedule_work(&device->kick_work))
 586                dasd_put_device(device);
 587}
 588EXPORT_SYMBOL(dasd_kick_device);
 589
 590/*
 591 * dasd_reload_device will schedule a call do do_reload_device to the kernel
 592 * event daemon.
 593 */
 594static void do_reload_device(struct work_struct *work)
 595{
 596        struct dasd_device *device = container_of(work, struct dasd_device,
 597                                                  reload_device);
 598        device->discipline->reload(device);
 599        dasd_put_device(device);
 600}
 601
 602void dasd_reload_device(struct dasd_device *device)
 603{
 604        dasd_get_device(device);
 605        /* queue call to dasd_reload_device to the kernel event daemon. */
 606        if (!schedule_work(&device->reload_device))
 607                dasd_put_device(device);
 608}
 609EXPORT_SYMBOL(dasd_reload_device);
 610
 611/*
 612 * dasd_restore_device will schedule a call do do_restore_device to the kernel
 613 * event daemon.
 614 */
 615static void do_restore_device(struct work_struct *work)
 616{
 617        struct dasd_device *device = container_of(work, struct dasd_device,
 618                                                  restore_device);
 619        device->cdev->drv->restore(device->cdev);
 620        dasd_put_device(device);
 621}
 622
 623void dasd_restore_device(struct dasd_device *device)
 624{
 625        dasd_get_device(device);
 626        /* queue call to dasd_restore_device to the kernel event daemon. */
 627        if (!schedule_work(&device->restore_device))
 628                dasd_put_device(device);
 629}
 630
 631/*
 632 * Set the target state for a device and starts the state change.
 633 */
 634void dasd_set_target_state(struct dasd_device *device, int target)
 635{
 636        dasd_get_device(device);
 637        mutex_lock(&device->state_mutex);
 638        /* If we are in probeonly mode stop at DASD_STATE_READY. */
 639        if (dasd_probeonly && target > DASD_STATE_READY)
 640                target = DASD_STATE_READY;
 641        if (device->target != target) {
 642                if (device->state == target)
 643                        wake_up(&dasd_init_waitq);
 644                device->target = target;
 645        }
 646        if (device->state != device->target)
 647                dasd_change_state(device);
 648        mutex_unlock(&device->state_mutex);
 649        dasd_put_device(device);
 650}
 651EXPORT_SYMBOL(dasd_set_target_state);
 652
 653/*
 654 * Enable devices with device numbers in [from..to].
 655 */
 656static inline int _wait_for_device(struct dasd_device *device)
 657{
 658        return (device->state == device->target);
 659}
 660
 661void dasd_enable_device(struct dasd_device *device)
 662{
 663        dasd_set_target_state(device, DASD_STATE_ONLINE);
 664        if (device->state <= DASD_STATE_KNOWN)
 665                /* No discipline for device found. */
 666                dasd_set_target_state(device, DASD_STATE_NEW);
 667        /* Now wait for the devices to come up. */
 668        wait_event(dasd_init_waitq, _wait_for_device(device));
 669
 670        dasd_reload_device(device);
 671        if (device->discipline->kick_validate)
 672                device->discipline->kick_validate(device);
 673}
 674EXPORT_SYMBOL(dasd_enable_device);
 675
 676/*
 677 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
 678 */
 679
 680unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
 681
 682#ifdef CONFIG_DASD_PROFILE
 683struct dasd_profile dasd_global_profile = {
 684        .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
 685};
 686static struct dentry *dasd_debugfs_global_entry;
 687
 688/*
 689 * Add profiling information for cqr before execution.
 690 */
 691static void dasd_profile_start(struct dasd_block *block,
 692                               struct dasd_ccw_req *cqr,
 693                               struct request *req)
 694{
 695        struct list_head *l;
 696        unsigned int counter;
 697        struct dasd_device *device;
 698
 699        /* count the length of the chanq for statistics */
 700        counter = 0;
 701        if (dasd_global_profile_level || block->profile.data)
 702                list_for_each(l, &block->ccw_queue)
 703                        if (++counter >= 31)
 704                                break;
 705
 706        spin_lock(&dasd_global_profile.lock);
 707        if (dasd_global_profile.data) {
 708                dasd_global_profile.data->dasd_io_nr_req[counter]++;
 709                if (rq_data_dir(req) == READ)
 710                        dasd_global_profile.data->dasd_read_nr_req[counter]++;
 711        }
 712        spin_unlock(&dasd_global_profile.lock);
 713
 714        spin_lock(&block->profile.lock);
 715        if (block->profile.data) {
 716                block->profile.data->dasd_io_nr_req[counter]++;
 717                if (rq_data_dir(req) == READ)
 718                        block->profile.data->dasd_read_nr_req[counter]++;
 719        }
 720        spin_unlock(&block->profile.lock);
 721
 722        /*
 723         * We count the request for the start device, even though it may run on
 724         * some other device due to error recovery. This way we make sure that
 725         * we count each request only once.
 726         */
 727        device = cqr->startdev;
 728        if (device->profile.data) {
 729                counter = 1; /* request is not yet queued on the start device */
 730                list_for_each(l, &device->ccw_queue)
 731                        if (++counter >= 31)
 732                                break;
 733        }
 734        spin_lock(&device->profile.lock);
 735        if (device->profile.data) {
 736                device->profile.data->dasd_io_nr_req[counter]++;
 737                if (rq_data_dir(req) == READ)
 738                        device->profile.data->dasd_read_nr_req[counter]++;
 739        }
 740        spin_unlock(&device->profile.lock);
 741}
 742
 743/*
 744 * Add profiling information for cqr after execution.
 745 */
 746
 747#define dasd_profile_counter(value, index)                         \
 748{                                                                  \
 749        for (index = 0; index < 31 && value >> (2+index); index++) \
 750                ;                                                  \
 751}
 752
 753static void dasd_profile_end_add_data(struct dasd_profile_info *data,
 754                                      int is_alias,
 755                                      int is_tpm,
 756                                      int is_read,
 757                                      long sectors,
 758                                      int sectors_ind,
 759                                      int tottime_ind,
 760                                      int tottimeps_ind,
 761                                      int strtime_ind,
 762                                      int irqtime_ind,
 763                                      int irqtimeps_ind,
 764                                      int endtime_ind)
 765{
 766        /* in case of an overflow, reset the whole profile */
 767        if (data->dasd_io_reqs == UINT_MAX) {
 768                        memset(data, 0, sizeof(*data));
 769                        ktime_get_real_ts64(&data->starttod);
 770        }
 771        data->dasd_io_reqs++;
 772        data->dasd_io_sects += sectors;
 773        if (is_alias)
 774                data->dasd_io_alias++;
 775        if (is_tpm)
 776                data->dasd_io_tpm++;
 777
 778        data->dasd_io_secs[sectors_ind]++;
 779        data->dasd_io_times[tottime_ind]++;
 780        data->dasd_io_timps[tottimeps_ind]++;
 781        data->dasd_io_time1[strtime_ind]++;
 782        data->dasd_io_time2[irqtime_ind]++;
 783        data->dasd_io_time2ps[irqtimeps_ind]++;
 784        data->dasd_io_time3[endtime_ind]++;
 785
 786        if (is_read) {
 787                data->dasd_read_reqs++;
 788                data->dasd_read_sects += sectors;
 789                if (is_alias)
 790                        data->dasd_read_alias++;
 791                if (is_tpm)
 792                        data->dasd_read_tpm++;
 793                data->dasd_read_secs[sectors_ind]++;
 794                data->dasd_read_times[tottime_ind]++;
 795                data->dasd_read_time1[strtime_ind]++;
 796                data->dasd_read_time2[irqtime_ind]++;
 797                data->dasd_read_time3[endtime_ind]++;
 798        }
 799}
 800
 801static void dasd_profile_end(struct dasd_block *block,
 802                             struct dasd_ccw_req *cqr,
 803                             struct request *req)
 804{
 805        unsigned long strtime, irqtime, endtime, tottime;
 806        unsigned long tottimeps, sectors;
 807        struct dasd_device *device;
 808        int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
 809        int irqtime_ind, irqtimeps_ind, endtime_ind;
 810        struct dasd_profile_info *data;
 811
 812        device = cqr->startdev;
 813        if (!(dasd_global_profile_level ||
 814              block->profile.data ||
 815              device->profile.data))
 816                return;
 817
 818        sectors = blk_rq_sectors(req);
 819        if (!cqr->buildclk || !cqr->startclk ||
 820            !cqr->stopclk || !cqr->endclk ||
 821            !sectors)
 822                return;
 823
 824        strtime = ((cqr->startclk - cqr->buildclk) >> 12);
 825        irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
 826        endtime = ((cqr->endclk - cqr->stopclk) >> 12);
 827        tottime = ((cqr->endclk - cqr->buildclk) >> 12);
 828        tottimeps = tottime / sectors;
 829
 830        dasd_profile_counter(sectors, sectors_ind);
 831        dasd_profile_counter(tottime, tottime_ind);
 832        dasd_profile_counter(tottimeps, tottimeps_ind);
 833        dasd_profile_counter(strtime, strtime_ind);
 834        dasd_profile_counter(irqtime, irqtime_ind);
 835        dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
 836        dasd_profile_counter(endtime, endtime_ind);
 837
 838        spin_lock(&dasd_global_profile.lock);
 839        if (dasd_global_profile.data) {
 840                data = dasd_global_profile.data;
 841                data->dasd_sum_times += tottime;
 842                data->dasd_sum_time_str += strtime;
 843                data->dasd_sum_time_irq += irqtime;
 844                data->dasd_sum_time_end += endtime;
 845                dasd_profile_end_add_data(dasd_global_profile.data,
 846                                          cqr->startdev != block->base,
 847                                          cqr->cpmode == 1,
 848                                          rq_data_dir(req) == READ,
 849                                          sectors, sectors_ind, tottime_ind,
 850                                          tottimeps_ind, strtime_ind,
 851                                          irqtime_ind, irqtimeps_ind,
 852                                          endtime_ind);
 853        }
 854        spin_unlock(&dasd_global_profile.lock);
 855
 856        spin_lock(&block->profile.lock);
 857        if (block->profile.data) {
 858                data = block->profile.data;
 859                data->dasd_sum_times += tottime;
 860                data->dasd_sum_time_str += strtime;
 861                data->dasd_sum_time_irq += irqtime;
 862                data->dasd_sum_time_end += endtime;
 863                dasd_profile_end_add_data(block->profile.data,
 864                                          cqr->startdev != block->base,
 865                                          cqr->cpmode == 1,
 866                                          rq_data_dir(req) == READ,
 867                                          sectors, sectors_ind, tottime_ind,
 868                                          tottimeps_ind, strtime_ind,
 869                                          irqtime_ind, irqtimeps_ind,
 870                                          endtime_ind);
 871        }
 872        spin_unlock(&block->profile.lock);
 873
 874        spin_lock(&device->profile.lock);
 875        if (device->profile.data) {
 876                data = device->profile.data;
 877                data->dasd_sum_times += tottime;
 878                data->dasd_sum_time_str += strtime;
 879                data->dasd_sum_time_irq += irqtime;
 880                data->dasd_sum_time_end += endtime;
 881                dasd_profile_end_add_data(device->profile.data,
 882                                          cqr->startdev != block->base,
 883                                          cqr->cpmode == 1,
 884                                          rq_data_dir(req) == READ,
 885                                          sectors, sectors_ind, tottime_ind,
 886                                          tottimeps_ind, strtime_ind,
 887                                          irqtime_ind, irqtimeps_ind,
 888                                          endtime_ind);
 889        }
 890        spin_unlock(&device->profile.lock);
 891}
 892
 893void dasd_profile_reset(struct dasd_profile *profile)
 894{
 895        struct dasd_profile_info *data;
 896
 897        spin_lock_bh(&profile->lock);
 898        data = profile->data;
 899        if (!data) {
 900                spin_unlock_bh(&profile->lock);
 901                return;
 902        }
 903        memset(data, 0, sizeof(*data));
 904        ktime_get_real_ts64(&data->starttod);
 905        spin_unlock_bh(&profile->lock);
 906}
 907
 908int dasd_profile_on(struct dasd_profile *profile)
 909{
 910        struct dasd_profile_info *data;
 911
 912        data = kzalloc(sizeof(*data), GFP_KERNEL);
 913        if (!data)
 914                return -ENOMEM;
 915        spin_lock_bh(&profile->lock);
 916        if (profile->data) {
 917                spin_unlock_bh(&profile->lock);
 918                kfree(data);
 919                return 0;
 920        }
 921        ktime_get_real_ts64(&data->starttod);
 922        profile->data = data;
 923        spin_unlock_bh(&profile->lock);
 924        return 0;
 925}
 926
 927void dasd_profile_off(struct dasd_profile *profile)
 928{
 929        spin_lock_bh(&profile->lock);
 930        kfree(profile->data);
 931        profile->data = NULL;
 932        spin_unlock_bh(&profile->lock);
 933}
 934
 935char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
 936{
 937        char *buffer;
 938
 939        buffer = vmalloc(user_len + 1);
 940        if (buffer == NULL)
 941                return ERR_PTR(-ENOMEM);
 942        if (copy_from_user(buffer, user_buf, user_len) != 0) {
 943                vfree(buffer);
 944                return ERR_PTR(-EFAULT);
 945        }
 946        /* got the string, now strip linefeed. */
 947        if (buffer[user_len - 1] == '\n')
 948                buffer[user_len - 1] = 0;
 949        else
 950                buffer[user_len] = 0;
 951        return buffer;
 952}
 953
 954static ssize_t dasd_stats_write(struct file *file,
 955                                const char __user *user_buf,
 956                                size_t user_len, loff_t *pos)
 957{
 958        char *buffer, *str;
 959        int rc;
 960        struct seq_file *m = (struct seq_file *)file->private_data;
 961        struct dasd_profile *prof = m->private;
 962
 963        if (user_len > 65536)
 964                user_len = 65536;
 965        buffer = dasd_get_user_string(user_buf, user_len);
 966        if (IS_ERR(buffer))
 967                return PTR_ERR(buffer);
 968
 969        str = skip_spaces(buffer);
 970        rc = user_len;
 971        if (strncmp(str, "reset", 5) == 0) {
 972                dasd_profile_reset(prof);
 973        } else if (strncmp(str, "on", 2) == 0) {
 974                rc = dasd_profile_on(prof);
 975                if (rc)
 976                        goto out;
 977                rc = user_len;
 978                if (prof == &dasd_global_profile) {
 979                        dasd_profile_reset(prof);
 980                        dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
 981                }
 982        } else if (strncmp(str, "off", 3) == 0) {
 983                if (prof == &dasd_global_profile)
 984                        dasd_global_profile_level = DASD_PROFILE_OFF;
 985                dasd_profile_off(prof);
 986        } else
 987                rc = -EINVAL;
 988out:
 989        vfree(buffer);
 990        return rc;
 991}
 992
 993static void dasd_stats_array(struct seq_file *m, unsigned int *array)
 994{
 995        int i;
 996
 997        for (i = 0; i < 32; i++)
 998                seq_printf(m, "%u ", array[i]);
 999        seq_putc(m, '\n');
1000}
1001
1002static void dasd_stats_seq_print(struct seq_file *m,
1003                                 struct dasd_profile_info *data)
1004{
1005        seq_printf(m, "start_time %lld.%09ld\n",
1006                   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1007        seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
1008        seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
1009        seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
1010        seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
1011        seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
1012                   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
1013        seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
1014                   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
1015        seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
1016                   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
1017        seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
1018                   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
1019        seq_puts(m, "histogram_sectors ");
1020        dasd_stats_array(m, data->dasd_io_secs);
1021        seq_puts(m, "histogram_io_times ");
1022        dasd_stats_array(m, data->dasd_io_times);
1023        seq_puts(m, "histogram_io_times_weighted ");
1024        dasd_stats_array(m, data->dasd_io_timps);
1025        seq_puts(m, "histogram_time_build_to_ssch ");
1026        dasd_stats_array(m, data->dasd_io_time1);
1027        seq_puts(m, "histogram_time_ssch_to_irq ");
1028        dasd_stats_array(m, data->dasd_io_time2);
1029        seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1030        dasd_stats_array(m, data->dasd_io_time2ps);
1031        seq_puts(m, "histogram_time_irq_to_end ");
1032        dasd_stats_array(m, data->dasd_io_time3);
1033        seq_puts(m, "histogram_ccw_queue_length ");
1034        dasd_stats_array(m, data->dasd_io_nr_req);
1035        seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1036        seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1037        seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1038        seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1039        seq_puts(m, "histogram_read_sectors ");
1040        dasd_stats_array(m, data->dasd_read_secs);
1041        seq_puts(m, "histogram_read_times ");
1042        dasd_stats_array(m, data->dasd_read_times);
1043        seq_puts(m, "histogram_read_time_build_to_ssch ");
1044        dasd_stats_array(m, data->dasd_read_time1);
1045        seq_puts(m, "histogram_read_time_ssch_to_irq ");
1046        dasd_stats_array(m, data->dasd_read_time2);
1047        seq_puts(m, "histogram_read_time_irq_to_end ");
1048        dasd_stats_array(m, data->dasd_read_time3);
1049        seq_puts(m, "histogram_read_ccw_queue_length ");
1050        dasd_stats_array(m, data->dasd_read_nr_req);
1051}
1052
1053static int dasd_stats_show(struct seq_file *m, void *v)
1054{
1055        struct dasd_profile *profile;
1056        struct dasd_profile_info *data;
1057
1058        profile = m->private;
1059        spin_lock_bh(&profile->lock);
1060        data = profile->data;
1061        if (!data) {
1062                spin_unlock_bh(&profile->lock);
1063                seq_puts(m, "disabled\n");
1064                return 0;
1065        }
1066        dasd_stats_seq_print(m, data);
1067        spin_unlock_bh(&profile->lock);
1068        return 0;
1069}
1070
1071static int dasd_stats_open(struct inode *inode, struct file *file)
1072{
1073        struct dasd_profile *profile = inode->i_private;
1074        return single_open(file, dasd_stats_show, profile);
1075}
1076
1077static const struct file_operations dasd_stats_raw_fops = {
1078        .owner          = THIS_MODULE,
1079        .open           = dasd_stats_open,
1080        .read           = seq_read,
1081        .llseek         = seq_lseek,
1082        .release        = single_release,
1083        .write          = dasd_stats_write,
1084};
1085
1086static void dasd_profile_init(struct dasd_profile *profile,
1087                              struct dentry *base_dentry)
1088{
1089        umode_t mode;
1090        struct dentry *pde;
1091
1092        if (!base_dentry)
1093                return;
1094        profile->dentry = NULL;
1095        profile->data = NULL;
1096        mode = (S_IRUSR | S_IWUSR | S_IFREG);
1097        pde = debugfs_create_file("statistics", mode, base_dentry,
1098                                  profile, &dasd_stats_raw_fops);
1099        if (pde && !IS_ERR(pde))
1100                profile->dentry = pde;
1101        return;
1102}
1103
1104static void dasd_profile_exit(struct dasd_profile *profile)
1105{
1106        dasd_profile_off(profile);
1107        debugfs_remove(profile->dentry);
1108        profile->dentry = NULL;
1109}
1110
1111static void dasd_statistics_removeroot(void)
1112{
1113        dasd_global_profile_level = DASD_PROFILE_OFF;
1114        dasd_profile_exit(&dasd_global_profile);
1115        debugfs_remove(dasd_debugfs_global_entry);
1116        debugfs_remove(dasd_debugfs_root_entry);
1117}
1118
1119static void dasd_statistics_createroot(void)
1120{
1121        struct dentry *pde;
1122
1123        dasd_debugfs_root_entry = NULL;
1124        pde = debugfs_create_dir("dasd", NULL);
1125        if (!pde || IS_ERR(pde))
1126                goto error;
1127        dasd_debugfs_root_entry = pde;
1128        pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1129        if (!pde || IS_ERR(pde))
1130                goto error;
1131        dasd_debugfs_global_entry = pde;
1132        dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1133        return;
1134
1135error:
1136        DBF_EVENT(DBF_ERR, "%s",
1137                  "Creation of the dasd debugfs interface failed");
1138        dasd_statistics_removeroot();
1139        return;
1140}
1141
1142#else
1143#define dasd_profile_start(block, cqr, req) do {} while (0)
1144#define dasd_profile_end(block, cqr, req) do {} while (0)
1145
1146static void dasd_statistics_createroot(void)
1147{
1148        return;
1149}
1150
1151static void dasd_statistics_removeroot(void)
1152{
1153        return;
1154}
1155
1156int dasd_stats_generic_show(struct seq_file *m, void *v)
1157{
1158        seq_puts(m, "Statistics are not activated in this kernel\n");
1159        return 0;
1160}
1161
1162static void dasd_profile_init(struct dasd_profile *profile,
1163                              struct dentry *base_dentry)
1164{
1165        return;
1166}
1167
1168static void dasd_profile_exit(struct dasd_profile *profile)
1169{
1170        return;
1171}
1172
1173int dasd_profile_on(struct dasd_profile *profile)
1174{
1175        return 0;
1176}
1177
1178#endif                          /* CONFIG_DASD_PROFILE */
1179
1180static int dasd_hosts_show(struct seq_file *m, void *v)
1181{
1182        struct dasd_device *device;
1183        int rc = -EOPNOTSUPP;
1184
1185        device = m->private;
1186        dasd_get_device(device);
1187
1188        if (device->discipline->hosts_print)
1189                rc = device->discipline->hosts_print(device, m);
1190
1191        dasd_put_device(device);
1192        return rc;
1193}
1194
1195DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1196
1197static void dasd_hosts_exit(struct dasd_device *device)
1198{
1199        debugfs_remove(device->hosts_dentry);
1200        device->hosts_dentry = NULL;
1201}
1202
1203static void dasd_hosts_init(struct dentry *base_dentry,
1204                            struct dasd_device *device)
1205{
1206        struct dentry *pde;
1207        umode_t mode;
1208
1209        if (!base_dentry)
1210                return;
1211
1212        mode = S_IRUSR | S_IFREG;
1213        pde = debugfs_create_file("host_access_list", mode, base_dentry,
1214                                  device, &dasd_hosts_fops);
1215        if (pde && !IS_ERR(pde))
1216                device->hosts_dentry = pde;
1217}
1218
1219struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1220                                          struct dasd_device *device,
1221                                          struct dasd_ccw_req *cqr)
1222{
1223        unsigned long flags;
1224        char *data, *chunk;
1225        int size = 0;
1226
1227        if (cplength > 0)
1228                size += cplength * sizeof(struct ccw1);
1229        if (datasize > 0)
1230                size += datasize;
1231        if (!cqr)
1232                size += (sizeof(*cqr) + 7L) & -8L;
1233
1234        spin_lock_irqsave(&device->mem_lock, flags);
1235        data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1236        spin_unlock_irqrestore(&device->mem_lock, flags);
1237        if (!chunk)
1238                return ERR_PTR(-ENOMEM);
1239        if (!cqr) {
1240                cqr = (void *) data;
1241                data += (sizeof(*cqr) + 7L) & -8L;
1242        }
1243        memset(cqr, 0, sizeof(*cqr));
1244        cqr->mem_chunk = chunk;
1245        if (cplength > 0) {
1246                cqr->cpaddr = data;
1247                data += cplength * sizeof(struct ccw1);
1248                memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1249        }
1250        if (datasize > 0) {
1251                cqr->data = data;
1252                memset(cqr->data, 0, datasize);
1253        }
1254        cqr->magic = magic;
1255        set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1256        dasd_get_device(device);
1257        return cqr;
1258}
1259EXPORT_SYMBOL(dasd_smalloc_request);
1260
1261void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1262{
1263        unsigned long flags;
1264
1265        spin_lock_irqsave(&device->mem_lock, flags);
1266        dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1267        spin_unlock_irqrestore(&device->mem_lock, flags);
1268        dasd_put_device(device);
1269}
1270EXPORT_SYMBOL(dasd_sfree_request);
1271
1272/*
1273 * Check discipline magic in cqr.
1274 */
1275static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1276{
1277        struct dasd_device *device;
1278
1279        if (cqr == NULL)
1280                return -EINVAL;
1281        device = cqr->startdev;
1282        if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1283                DBF_DEV_EVENT(DBF_WARNING, device,
1284                            " dasd_ccw_req 0x%08x magic doesn't match"
1285                            " discipline 0x%08x",
1286                            cqr->magic,
1287                            *(unsigned int *) device->discipline->name);
1288                return -EINVAL;
1289        }
1290        return 0;
1291}
1292
1293/*
1294 * Terminate the current i/o and set the request to clear_pending.
1295 * Timer keeps device runnig.
1296 * ccw_device_clear can fail if the i/o subsystem
1297 * is in a bad mood.
1298 */
1299int dasd_term_IO(struct dasd_ccw_req *cqr)
1300{
1301        struct dasd_device *device;
1302        int retries, rc;
1303        char errorstring[ERRORLENGTH];
1304
1305        /* Check the cqr */
1306        rc = dasd_check_cqr(cqr);
1307        if (rc)
1308                return rc;
1309        retries = 0;
1310        device = (struct dasd_device *) cqr->startdev;
1311        while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1312                rc = ccw_device_clear(device->cdev, (long) cqr);
1313                switch (rc) {
1314                case 0: /* termination successful */
1315                        cqr->status = DASD_CQR_CLEAR_PENDING;
1316                        cqr->stopclk = get_tod_clock();
1317                        cqr->starttime = 0;
1318                        DBF_DEV_EVENT(DBF_DEBUG, device,
1319                                      "terminate cqr %p successful",
1320                                      cqr);
1321                        break;
1322                case -ENODEV:
1323                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
1324                                      "device gone, retry");
1325                        break;
1326                case -EINVAL:
1327                        /*
1328                         * device not valid so no I/O could be running
1329                         * handle CQR as termination successful
1330                         */
1331                        cqr->status = DASD_CQR_CLEARED;
1332                        cqr->stopclk = get_tod_clock();
1333                        cqr->starttime = 0;
1334                        /* no retries for invalid devices */
1335                        cqr->retries = -1;
1336                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
1337                                      "EINVAL, handle as terminated");
1338                        /* fake rc to success */
1339                        rc = 0;
1340                        break;
1341                default:
1342                        /* internal error 10 - unknown rc*/
1343                        snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1344                        dev_err(&device->cdev->dev, "An error occurred in the "
1345                                "DASD device driver, reason=%s\n", errorstring);
1346                        BUG();
1347                        break;
1348                }
1349                retries++;
1350        }
1351        dasd_schedule_device_bh(device);
1352        return rc;
1353}
1354EXPORT_SYMBOL(dasd_term_IO);
1355
1356/*
1357 * Start the i/o. This start_IO can fail if the channel is really busy.
1358 * In that case set up a timer to start the request later.
1359 */
1360int dasd_start_IO(struct dasd_ccw_req *cqr)
1361{
1362        struct dasd_device *device;
1363        int rc;
1364        char errorstring[ERRORLENGTH];
1365
1366        /* Check the cqr */
1367        rc = dasd_check_cqr(cqr);
1368        if (rc) {
1369                cqr->intrc = rc;
1370                return rc;
1371        }
1372        device = (struct dasd_device *) cqr->startdev;
1373        if (((cqr->block &&
1374              test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1375             test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1376            !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1377                DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1378                              "because of stolen lock", cqr);
1379                cqr->status = DASD_CQR_ERROR;
1380                cqr->intrc = -EPERM;
1381                return -EPERM;
1382        }
1383        if (cqr->retries < 0) {
1384                /* internal error 14 - start_IO run out of retries */
1385                sprintf(errorstring, "14 %p", cqr);
1386                dev_err(&device->cdev->dev, "An error occurred in the DASD "
1387                        "device driver, reason=%s\n", errorstring);
1388                cqr->status = DASD_CQR_ERROR;
1389                return -EIO;
1390        }
1391        cqr->startclk = get_tod_clock();
1392        cqr->starttime = jiffies;
1393        cqr->retries--;
1394        if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1395                cqr->lpm &= dasd_path_get_opm(device);
1396                if (!cqr->lpm)
1397                        cqr->lpm = dasd_path_get_opm(device);
1398        }
1399        if (cqr->cpmode == 1) {
1400                rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1401                                         (long) cqr, cqr->lpm);
1402        } else {
1403                rc = ccw_device_start(device->cdev, cqr->cpaddr,
1404                                      (long) cqr, cqr->lpm, 0);
1405        }
1406        switch (rc) {
1407        case 0:
1408                cqr->status = DASD_CQR_IN_IO;
1409                break;
1410        case -EBUSY:
1411                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1412                              "start_IO: device busy, retry later");
1413                break;
1414        case -EACCES:
1415                /* -EACCES indicates that the request used only a subset of the
1416                 * available paths and all these paths are gone. If the lpm of
1417                 * this request was only a subset of the opm (e.g. the ppm) then
1418                 * we just do a retry with all available paths.
1419                 * If we already use the full opm, something is amiss, and we
1420                 * need a full path verification.
1421                 */
1422                if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1423                        DBF_DEV_EVENT(DBF_WARNING, device,
1424                                      "start_IO: selected paths gone (%x)",
1425                                      cqr->lpm);
1426                } else if (cqr->lpm != dasd_path_get_opm(device)) {
1427                        cqr->lpm = dasd_path_get_opm(device);
1428                        DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1429                                      "start_IO: selected paths gone,"
1430                                      " retry on all paths");
1431                } else {
1432                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1433                                      "start_IO: all paths in opm gone,"
1434                                      " do path verification");
1435                        dasd_generic_last_path_gone(device);
1436                        dasd_path_no_path(device);
1437                        dasd_path_set_tbvpm(device,
1438                                          ccw_device_get_path_mask(
1439                                                  device->cdev));
1440                }
1441                break;
1442        case -ENODEV:
1443                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1444                              "start_IO: -ENODEV device gone, retry");
1445                break;
1446        case -EIO:
1447                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1448                              "start_IO: -EIO device gone, retry");
1449                break;
1450        case -EINVAL:
1451                /* most likely caused in power management context */
1452                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1453                              "start_IO: -EINVAL device currently "
1454                              "not accessible");
1455                break;
1456        default:
1457                /* internal error 11 - unknown rc */
1458                snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1459                dev_err(&device->cdev->dev,
1460                        "An error occurred in the DASD device driver, "
1461                        "reason=%s\n", errorstring);
1462                BUG();
1463                break;
1464        }
1465        cqr->intrc = rc;
1466        return rc;
1467}
1468EXPORT_SYMBOL(dasd_start_IO);
1469
1470/*
1471 * Timeout function for dasd devices. This is used for different purposes
1472 *  1) missing interrupt handler for normal operation
1473 *  2) delayed start of request where start_IO failed with -EBUSY
1474 *  3) timeout for missing state change interrupts
1475 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1476 * DASD_CQR_QUEUED for 2) and 3).
1477 */
1478static void dasd_device_timeout(struct timer_list *t)
1479{
1480        unsigned long flags;
1481        struct dasd_device *device;
1482
1483        device = from_timer(device, t, timer);
1484        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1485        /* re-activate request queue */
1486        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1487        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1488        dasd_schedule_device_bh(device);
1489}
1490
1491/*
1492 * Setup timeout for a device in jiffies.
1493 */
1494void dasd_device_set_timer(struct dasd_device *device, int expires)
1495{
1496        if (expires == 0)
1497                del_timer(&device->timer);
1498        else
1499                mod_timer(&device->timer, jiffies + expires);
1500}
1501EXPORT_SYMBOL(dasd_device_set_timer);
1502
1503/*
1504 * Clear timeout for a device.
1505 */
1506void dasd_device_clear_timer(struct dasd_device *device)
1507{
1508        del_timer(&device->timer);
1509}
1510EXPORT_SYMBOL(dasd_device_clear_timer);
1511
1512static void dasd_handle_killed_request(struct ccw_device *cdev,
1513                                       unsigned long intparm)
1514{
1515        struct dasd_ccw_req *cqr;
1516        struct dasd_device *device;
1517
1518        if (!intparm)
1519                return;
1520        cqr = (struct dasd_ccw_req *) intparm;
1521        if (cqr->status != DASD_CQR_IN_IO) {
1522                DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1523                                "invalid status in handle_killed_request: "
1524                                "%02x", cqr->status);
1525                return;
1526        }
1527
1528        device = dasd_device_from_cdev_locked(cdev);
1529        if (IS_ERR(device)) {
1530                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1531                                "unable to get device from cdev");
1532                return;
1533        }
1534
1535        if (!cqr->startdev ||
1536            device != cqr->startdev ||
1537            strncmp(cqr->startdev->discipline->ebcname,
1538                    (char *) &cqr->magic, 4)) {
1539                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1540                                "invalid device in request");
1541                dasd_put_device(device);
1542                return;
1543        }
1544
1545        /* Schedule request to be retried. */
1546        cqr->status = DASD_CQR_QUEUED;
1547
1548        dasd_device_clear_timer(device);
1549        dasd_schedule_device_bh(device);
1550        dasd_put_device(device);
1551}
1552
1553void dasd_generic_handle_state_change(struct dasd_device *device)
1554{
1555        /* First of all start sense subsystem status request. */
1556        dasd_eer_snss(device);
1557
1558        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1559        dasd_schedule_device_bh(device);
1560        if (device->block) {
1561                dasd_schedule_block_bh(device->block);
1562                if (device->block->request_queue)
1563                        blk_mq_run_hw_queues(device->block->request_queue,
1564                                             true);
1565        }
1566}
1567EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1568
1569static int dasd_check_hpf_error(struct irb *irb)
1570{
1571        return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1572            (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1573             irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1574}
1575
1576/*
1577 * Interrupt handler for "normal" ssch-io based dasd devices.
1578 */
1579void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1580                      struct irb *irb)
1581{
1582        struct dasd_ccw_req *cqr, *next;
1583        struct dasd_device *device;
1584        unsigned long now;
1585        int nrf_suppressed = 0;
1586        int fp_suppressed = 0;
1587        u8 *sense = NULL;
1588        int expires;
1589
1590        cqr = (struct dasd_ccw_req *) intparm;
1591        if (IS_ERR(irb)) {
1592                switch (PTR_ERR(irb)) {
1593                case -EIO:
1594                        if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1595                                device = cqr->startdev;
1596                                cqr->status = DASD_CQR_CLEARED;
1597                                dasd_device_clear_timer(device);
1598                                wake_up(&dasd_flush_wq);
1599                                dasd_schedule_device_bh(device);
1600                                return;
1601                        }
1602                        break;
1603                case -ETIMEDOUT:
1604                        DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1605                                        "request timed out\n", __func__);
1606                        break;
1607                default:
1608                        DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1609                                        "unknown error %ld\n", __func__,
1610                                        PTR_ERR(irb));
1611                }
1612                dasd_handle_killed_request(cdev, intparm);
1613                return;
1614        }
1615
1616        now = get_tod_clock();
1617        /* check for conditions that should be handled immediately */
1618        if (!cqr ||
1619            !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1620              scsw_cstat(&irb->scsw) == 0)) {
1621                if (cqr)
1622                        memcpy(&cqr->irb, irb, sizeof(*irb));
1623                device = dasd_device_from_cdev_locked(cdev);
1624                if (IS_ERR(device))
1625                        return;
1626                /* ignore unsolicited interrupts for DIAG discipline */
1627                if (device->discipline == dasd_diag_discipline_pointer) {
1628                        dasd_put_device(device);
1629                        return;
1630                }
1631
1632                /*
1633                 * In some cases 'File Protected' or 'No Record Found' errors
1634                 * might be expected and debug log messages for the
1635                 * corresponding interrupts shouldn't be written then.
1636                 * Check if either of the according suppress bits is set.
1637                 */
1638                sense = dasd_get_sense(irb);
1639                if (sense) {
1640                        fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1641                                test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1642                        nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1643                                test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1644                }
1645                if (!(fp_suppressed || nrf_suppressed))
1646                        device->discipline->dump_sense_dbf(device, irb, "int");
1647
1648                if (device->features & DASD_FEATURE_ERPLOG)
1649                        device->discipline->dump_sense(device, cqr, irb);
1650                device->discipline->check_for_device_change(device, cqr, irb);
1651                dasd_put_device(device);
1652        }
1653
1654        /* check for for attention message */
1655        if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1656                device = dasd_device_from_cdev_locked(cdev);
1657                if (!IS_ERR(device)) {
1658                        device->discipline->check_attention(device,
1659                                                            irb->esw.esw1.lpum);
1660                        dasd_put_device(device);
1661                }
1662        }
1663
1664        if (!cqr)
1665                return;
1666
1667        device = (struct dasd_device *) cqr->startdev;
1668        if (!device ||
1669            strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1670                DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1671                                "invalid device in request");
1672                return;
1673        }
1674
1675        /* Check for clear pending */
1676        if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1677            scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1678                cqr->status = DASD_CQR_CLEARED;
1679                dasd_device_clear_timer(device);
1680                wake_up(&dasd_flush_wq);
1681                dasd_schedule_device_bh(device);
1682                return;
1683        }
1684
1685        /* check status - the request might have been killed by dyn detach */
1686        if (cqr->status != DASD_CQR_IN_IO) {
1687                DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1688                              "status %02x", dev_name(&cdev->dev), cqr->status);
1689                return;
1690        }
1691
1692        next = NULL;
1693        expires = 0;
1694        if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1695            scsw_cstat(&irb->scsw) == 0) {
1696                /* request was completed successfully */
1697                cqr->status = DASD_CQR_SUCCESS;
1698                cqr->stopclk = now;
1699                /* Start first request on queue if possible -> fast_io. */
1700                if (cqr->devlist.next != &device->ccw_queue) {
1701                        next = list_entry(cqr->devlist.next,
1702                                          struct dasd_ccw_req, devlist);
1703                }
1704        } else {  /* error */
1705                /* check for HPF error
1706                 * call discipline function to requeue all requests
1707                 * and disable HPF accordingly
1708                 */
1709                if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1710                    device->discipline->handle_hpf_error)
1711                        device->discipline->handle_hpf_error(device, irb);
1712                /*
1713                 * If we don't want complex ERP for this request, then just
1714                 * reset this and retry it in the fastpath
1715                 */
1716                if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1717                    cqr->retries > 0) {
1718                        if (cqr->lpm == dasd_path_get_opm(device))
1719                                DBF_DEV_EVENT(DBF_DEBUG, device,
1720                                              "default ERP in fastpath "
1721                                              "(%i retries left)",
1722                                              cqr->retries);
1723                        if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1724                                cqr->lpm = dasd_path_get_opm(device);
1725                        cqr->status = DASD_CQR_QUEUED;
1726                        next = cqr;
1727                } else
1728                        cqr->status = DASD_CQR_ERROR;
1729        }
1730        if (next && (next->status == DASD_CQR_QUEUED) &&
1731            (!device->stopped)) {
1732                if (device->discipline->start_IO(next) == 0)
1733                        expires = next->expires;
1734        }
1735        if (expires != 0)
1736                dasd_device_set_timer(device, expires);
1737        else
1738                dasd_device_clear_timer(device);
1739        dasd_schedule_device_bh(device);
1740}
1741EXPORT_SYMBOL(dasd_int_handler);
1742
1743enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1744{
1745        struct dasd_device *device;
1746
1747        device = dasd_device_from_cdev_locked(cdev);
1748
1749        if (IS_ERR(device))
1750                goto out;
1751        if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1752           device->state != device->target ||
1753           !device->discipline->check_for_device_change){
1754                dasd_put_device(device);
1755                goto out;
1756        }
1757        if (device->discipline->dump_sense_dbf)
1758                device->discipline->dump_sense_dbf(device, irb, "uc");
1759        device->discipline->check_for_device_change(device, NULL, irb);
1760        dasd_put_device(device);
1761out:
1762        return UC_TODO_RETRY;
1763}
1764EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1765
1766/*
1767 * If we have an error on a dasd_block layer request then we cancel
1768 * and return all further requests from the same dasd_block as well.
1769 */
1770static void __dasd_device_recovery(struct dasd_device *device,
1771                                   struct dasd_ccw_req *ref_cqr)
1772{
1773        struct list_head *l, *n;
1774        struct dasd_ccw_req *cqr;
1775
1776        /*
1777         * only requeue request that came from the dasd_block layer
1778         */
1779        if (!ref_cqr->block)
1780                return;
1781
1782        list_for_each_safe(l, n, &device->ccw_queue) {
1783                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1784                if (cqr->status == DASD_CQR_QUEUED &&
1785                    ref_cqr->block == cqr->block) {
1786                        cqr->status = DASD_CQR_CLEARED;
1787                }
1788        }
1789};
1790
1791/*
1792 * Remove those ccw requests from the queue that need to be returned
1793 * to the upper layer.
1794 */
1795static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1796                                            struct list_head *final_queue)
1797{
1798        struct list_head *l, *n;
1799        struct dasd_ccw_req *cqr;
1800
1801        /* Process request with final status. */
1802        list_for_each_safe(l, n, &device->ccw_queue) {
1803                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1804
1805                /* Skip any non-final request. */
1806                if (cqr->status == DASD_CQR_QUEUED ||
1807                    cqr->status == DASD_CQR_IN_IO ||
1808                    cqr->status == DASD_CQR_CLEAR_PENDING)
1809                        continue;
1810                if (cqr->status == DASD_CQR_ERROR) {
1811                        __dasd_device_recovery(device, cqr);
1812                }
1813                /* Rechain finished requests to final queue */
1814                list_move_tail(&cqr->devlist, final_queue);
1815        }
1816}
1817
1818static void __dasd_process_cqr(struct dasd_device *device,
1819                               struct dasd_ccw_req *cqr)
1820{
1821        char errorstring[ERRORLENGTH];
1822
1823        switch (cqr->status) {
1824        case DASD_CQR_SUCCESS:
1825                cqr->status = DASD_CQR_DONE;
1826                break;
1827        case DASD_CQR_ERROR:
1828                cqr->status = DASD_CQR_NEED_ERP;
1829                break;
1830        case DASD_CQR_CLEARED:
1831                cqr->status = DASD_CQR_TERMINATED;
1832                break;
1833        default:
1834                /* internal error 12 - wrong cqr status*/
1835                snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1836                dev_err(&device->cdev->dev,
1837                        "An error occurred in the DASD device driver, "
1838                        "reason=%s\n", errorstring);
1839                BUG();
1840        }
1841        if (cqr->callback)
1842                cqr->callback(cqr, cqr->callback_data);
1843}
1844
1845/*
1846 * the cqrs from the final queue are returned to the upper layer
1847 * by setting a dasd_block state and calling the callback function
1848 */
1849static void __dasd_device_process_final_queue(struct dasd_device *device,
1850                                              struct list_head *final_queue)
1851{
1852        struct list_head *l, *n;
1853        struct dasd_ccw_req *cqr;
1854        struct dasd_block *block;
1855
1856        list_for_each_safe(l, n, final_queue) {
1857                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1858                list_del_init(&cqr->devlist);
1859                block = cqr->block;
1860                if (!block) {
1861                        __dasd_process_cqr(device, cqr);
1862                } else {
1863                        spin_lock_bh(&block->queue_lock);
1864                        __dasd_process_cqr(device, cqr);
1865                        spin_unlock_bh(&block->queue_lock);
1866                }
1867        }
1868}
1869
1870/*
1871 * Take a look at the first request on the ccw queue and check
1872 * if it reached its expire time. If so, terminate the IO.
1873 */
1874static void __dasd_device_check_expire(struct dasd_device *device)
1875{
1876        struct dasd_ccw_req *cqr;
1877
1878        if (list_empty(&device->ccw_queue))
1879                return;
1880        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1881        if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1882            (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1883                if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1884                        /*
1885                         * IO in safe offline processing should not
1886                         * run out of retries
1887                         */
1888                        cqr->retries++;
1889                }
1890                if (device->discipline->term_IO(cqr) != 0) {
1891                        /* Hmpf, try again in 5 sec */
1892                        dev_err(&device->cdev->dev,
1893                                "cqr %p timed out (%lus) but cannot be "
1894                                "ended, retrying in 5 s\n",
1895                                cqr, (cqr->expires/HZ));
1896                        cqr->expires += 5*HZ;
1897                        dasd_device_set_timer(device, 5*HZ);
1898                } else {
1899                        dev_err(&device->cdev->dev,
1900                                "cqr %p timed out (%lus), %i retries "
1901                                "remaining\n", cqr, (cqr->expires/HZ),
1902                                cqr->retries);
1903                }
1904        }
1905}
1906
1907/*
1908 * return 1 when device is not eligible for IO
1909 */
1910static int __dasd_device_is_unusable(struct dasd_device *device,
1911                                     struct dasd_ccw_req *cqr)
1912{
1913        int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1914
1915        if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
1916            !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1917                /*
1918                 * dasd is being set offline
1919                 * but it is no safe offline where we have to allow I/O
1920                 */
1921                return 1;
1922        }
1923        if (device->stopped) {
1924                if (device->stopped & mask) {
1925                        /* stopped and CQR will not change that. */
1926                        return 1;
1927                }
1928                if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1929                        /* CQR is not able to change device to
1930                         * operational. */
1931                        return 1;
1932                }
1933                /* CQR required to get device operational. */
1934        }
1935        return 0;
1936}
1937
1938/*
1939 * Take a look at the first request on the ccw queue and check
1940 * if it needs to be started.
1941 */
1942static void __dasd_device_start_head(struct dasd_device *device)
1943{
1944        struct dasd_ccw_req *cqr;
1945        int rc;
1946
1947        if (list_empty(&device->ccw_queue))
1948                return;
1949        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1950        if (cqr->status != DASD_CQR_QUEUED)
1951                return;
1952        /* if device is not usable return request to upper layer */
1953        if (__dasd_device_is_unusable(device, cqr)) {
1954                cqr->intrc = -EAGAIN;
1955                cqr->status = DASD_CQR_CLEARED;
1956                dasd_schedule_device_bh(device);
1957                return;
1958        }
1959
1960        rc = device->discipline->start_IO(cqr);
1961        if (rc == 0)
1962                dasd_device_set_timer(device, cqr->expires);
1963        else if (rc == -EACCES) {
1964                dasd_schedule_device_bh(device);
1965        } else
1966                /* Hmpf, try again in 1/2 sec */
1967                dasd_device_set_timer(device, 50);
1968}
1969
1970static void __dasd_device_check_path_events(struct dasd_device *device)
1971{
1972        int rc;
1973
1974        if (!dasd_path_get_tbvpm(device))
1975                return;
1976
1977        if (device->stopped &
1978            ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1979                return;
1980        rc = device->discipline->verify_path(device,
1981                                             dasd_path_get_tbvpm(device));
1982        if (rc)
1983                dasd_device_set_timer(device, 50);
1984        else
1985                dasd_path_clear_all_verify(device);
1986};
1987
1988/*
1989 * Go through all request on the dasd_device request queue,
1990 * terminate them on the cdev if necessary, and return them to the
1991 * submitting layer via callback.
1992 * Note:
1993 * Make sure that all 'submitting layers' still exist when
1994 * this function is called!. In other words, when 'device' is a base
1995 * device then all block layer requests must have been removed before
1996 * via dasd_flush_block_queue.
1997 */
1998int dasd_flush_device_queue(struct dasd_device *device)
1999{
2000        struct dasd_ccw_req *cqr, *n;
2001        int rc;
2002        struct list_head flush_queue;
2003
2004        INIT_LIST_HEAD(&flush_queue);
2005        spin_lock_irq(get_ccwdev_lock(device->cdev));
2006        rc = 0;
2007        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2008                /* Check status and move request to flush_queue */
2009                switch (cqr->status) {
2010                case DASD_CQR_IN_IO:
2011                        rc = device->discipline->term_IO(cqr);
2012                        if (rc) {
2013                                /* unable to terminate requeust */
2014                                dev_err(&device->cdev->dev,
2015                                        "Flushing the DASD request queue "
2016                                        "failed for request %p\n", cqr);
2017                                /* stop flush processing */
2018                                goto finished;
2019                        }
2020                        break;
2021                case DASD_CQR_QUEUED:
2022                        cqr->stopclk = get_tod_clock();
2023                        cqr->status = DASD_CQR_CLEARED;
2024                        break;
2025                default: /* no need to modify the others */
2026                        break;
2027                }
2028                list_move_tail(&cqr->devlist, &flush_queue);
2029        }
2030finished:
2031        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2032        /*
2033         * After this point all requests must be in state CLEAR_PENDING,
2034         * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2035         * one of the others.
2036         */
2037        list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2038                wait_event(dasd_flush_wq,
2039                           (cqr->status != DASD_CQR_CLEAR_PENDING));
2040        /*
2041         * Now set each request back to TERMINATED, DONE or NEED_ERP
2042         * and call the callback function of flushed requests
2043         */
2044        __dasd_device_process_final_queue(device, &flush_queue);
2045        return rc;
2046}
2047EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2048
2049/*
2050 * Acquire the device lock and process queues for the device.
2051 */
2052static void dasd_device_tasklet(unsigned long data)
2053{
2054        struct dasd_device *device = (struct dasd_device *) data;
2055        struct list_head final_queue;
2056
2057        atomic_set (&device->tasklet_scheduled, 0);
2058        INIT_LIST_HEAD(&final_queue);
2059        spin_lock_irq(get_ccwdev_lock(device->cdev));
2060        /* Check expire time of first request on the ccw queue. */
2061        __dasd_device_check_expire(device);
2062        /* find final requests on ccw queue */
2063        __dasd_device_process_ccw_queue(device, &final_queue);
2064        __dasd_device_check_path_events(device);
2065        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2066        /* Now call the callback function of requests with final status */
2067        __dasd_device_process_final_queue(device, &final_queue);
2068        spin_lock_irq(get_ccwdev_lock(device->cdev));
2069        /* Now check if the head of the ccw queue needs to be started. */
2070        __dasd_device_start_head(device);
2071        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2072        if (waitqueue_active(&shutdown_waitq))
2073                wake_up(&shutdown_waitq);
2074        dasd_put_device(device);
2075}
2076
2077/*
2078 * Schedules a call to dasd_tasklet over the device tasklet.
2079 */
2080void dasd_schedule_device_bh(struct dasd_device *device)
2081{
2082        /* Protect against rescheduling. */
2083        if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2084                return;
2085        dasd_get_device(device);
2086        tasklet_hi_schedule(&device->tasklet);
2087}
2088EXPORT_SYMBOL(dasd_schedule_device_bh);
2089
2090void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2091{
2092        device->stopped |= bits;
2093}
2094EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2095
2096void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2097{
2098        device->stopped &= ~bits;
2099        if (!device->stopped)
2100                wake_up(&generic_waitq);
2101}
2102EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2103
2104/*
2105 * Queue a request to the head of the device ccw_queue.
2106 * Start the I/O if possible.
2107 */
2108void dasd_add_request_head(struct dasd_ccw_req *cqr)
2109{
2110        struct dasd_device *device;
2111        unsigned long flags;
2112
2113        device = cqr->startdev;
2114        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2115        cqr->status = DASD_CQR_QUEUED;
2116        list_add(&cqr->devlist, &device->ccw_queue);
2117        /* let the bh start the request to keep them in order */
2118        dasd_schedule_device_bh(device);
2119        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2120}
2121EXPORT_SYMBOL(dasd_add_request_head);
2122
2123/*
2124 * Queue a request to the tail of the device ccw_queue.
2125 * Start the I/O if possible.
2126 */
2127void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2128{
2129        struct dasd_device *device;
2130        unsigned long flags;
2131
2132        device = cqr->startdev;
2133        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2134        cqr->status = DASD_CQR_QUEUED;
2135        list_add_tail(&cqr->devlist, &device->ccw_queue);
2136        /* let the bh start the request to keep them in order */
2137        dasd_schedule_device_bh(device);
2138        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2139}
2140EXPORT_SYMBOL(dasd_add_request_tail);
2141
2142/*
2143 * Wakeup helper for the 'sleep_on' functions.
2144 */
2145void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2146{
2147        spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2148        cqr->callback_data = DASD_SLEEPON_END_TAG;
2149        spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2150        wake_up(&generic_waitq);
2151}
2152EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2153
2154static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2155{
2156        struct dasd_device *device;
2157        int rc;
2158
2159        device = cqr->startdev;
2160        spin_lock_irq(get_ccwdev_lock(device->cdev));
2161        rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2162        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2163        return rc;
2164}
2165
2166/*
2167 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2168 */
2169static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2170{
2171        struct dasd_device *device;
2172        dasd_erp_fn_t erp_fn;
2173
2174        if (cqr->status == DASD_CQR_FILLED)
2175                return 0;
2176        device = cqr->startdev;
2177        if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2178                if (cqr->status == DASD_CQR_TERMINATED) {
2179                        device->discipline->handle_terminated_request(cqr);
2180                        return 1;
2181                }
2182                if (cqr->status == DASD_CQR_NEED_ERP) {
2183                        erp_fn = device->discipline->erp_action(cqr);
2184                        erp_fn(cqr);
2185                        return 1;
2186                }
2187                if (cqr->status == DASD_CQR_FAILED)
2188                        dasd_log_sense(cqr, &cqr->irb);
2189                if (cqr->refers) {
2190                        __dasd_process_erp(device, cqr);
2191                        return 1;
2192                }
2193        }
2194        return 0;
2195}
2196
2197static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2198{
2199        if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2200                if (cqr->refers) /* erp is not done yet */
2201                        return 1;
2202                return ((cqr->status != DASD_CQR_DONE) &&
2203                        (cqr->status != DASD_CQR_FAILED));
2204        } else
2205                return (cqr->status == DASD_CQR_FILLED);
2206}
2207
2208static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2209{
2210        struct dasd_device *device;
2211        int rc;
2212        struct list_head ccw_queue;
2213        struct dasd_ccw_req *cqr;
2214
2215        INIT_LIST_HEAD(&ccw_queue);
2216        maincqr->status = DASD_CQR_FILLED;
2217        device = maincqr->startdev;
2218        list_add(&maincqr->blocklist, &ccw_queue);
2219        for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2220             cqr = list_first_entry(&ccw_queue,
2221                                    struct dasd_ccw_req, blocklist)) {
2222
2223                if (__dasd_sleep_on_erp(cqr))
2224                        continue;
2225                if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2226                        continue;
2227                if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2228                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2229                        cqr->status = DASD_CQR_FAILED;
2230                        cqr->intrc = -EPERM;
2231                        continue;
2232                }
2233                /* Non-temporary stop condition will trigger fail fast */
2234                if (device->stopped & ~DASD_STOPPED_PENDING &&
2235                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2236                    (!dasd_eer_enabled(device))) {
2237                        cqr->status = DASD_CQR_FAILED;
2238                        cqr->intrc = -ENOLINK;
2239                        continue;
2240                }
2241                /*
2242                 * Don't try to start requests if device is in
2243                 * offline processing, it might wait forever
2244                 */
2245                if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2246                        cqr->status = DASD_CQR_FAILED;
2247                        cqr->intrc = -ENODEV;
2248                        continue;
2249                }
2250                /*
2251                 * Don't try to start requests if device is stopped
2252                 * except path verification requests
2253                 */
2254                if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2255                        if (interruptible) {
2256                                rc = wait_event_interruptible(
2257                                        generic_waitq, !(device->stopped));
2258                                if (rc == -ERESTARTSYS) {
2259                                        cqr->status = DASD_CQR_FAILED;
2260                                        maincqr->intrc = rc;
2261                                        continue;
2262                                }
2263                        } else
2264                                wait_event(generic_waitq, !(device->stopped));
2265                }
2266                if (!cqr->callback)
2267                        cqr->callback = dasd_wakeup_cb;
2268
2269                cqr->callback_data = DASD_SLEEPON_START_TAG;
2270                dasd_add_request_tail(cqr);
2271                if (interruptible) {
2272                        rc = wait_event_interruptible(
2273                                generic_waitq, _wait_for_wakeup(cqr));
2274                        if (rc == -ERESTARTSYS) {
2275                                dasd_cancel_req(cqr);
2276                                /* wait (non-interruptible) for final status */
2277                                wait_event(generic_waitq,
2278                                           _wait_for_wakeup(cqr));
2279                                cqr->status = DASD_CQR_FAILED;
2280                                maincqr->intrc = rc;
2281                                continue;
2282                        }
2283                } else
2284                        wait_event(generic_waitq, _wait_for_wakeup(cqr));
2285        }
2286
2287        maincqr->endclk = get_tod_clock();
2288        if ((maincqr->status != DASD_CQR_DONE) &&
2289            (maincqr->intrc != -ERESTARTSYS))
2290                dasd_log_sense(maincqr, &maincqr->irb);
2291        if (maincqr->status == DASD_CQR_DONE)
2292                rc = 0;
2293        else if (maincqr->intrc)
2294                rc = maincqr->intrc;
2295        else
2296                rc = -EIO;
2297        return rc;
2298}
2299
2300static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2301{
2302        struct dasd_ccw_req *cqr;
2303
2304        list_for_each_entry(cqr, ccw_queue, blocklist) {
2305                if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2306                        return 0;
2307        }
2308
2309        return 1;
2310}
2311
2312static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2313{
2314        struct dasd_device *device;
2315        struct dasd_ccw_req *cqr, *n;
2316        u8 *sense = NULL;
2317        int rc;
2318
2319retry:
2320        list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2321                device = cqr->startdev;
2322                if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2323                        continue;
2324
2325                if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2326                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2327                        cqr->status = DASD_CQR_FAILED;
2328                        cqr->intrc = -EPERM;
2329                        continue;
2330                }
2331                /*Non-temporary stop condition will trigger fail fast*/
2332                if (device->stopped & ~DASD_STOPPED_PENDING &&
2333                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2334                    !dasd_eer_enabled(device)) {
2335                        cqr->status = DASD_CQR_FAILED;
2336                        cqr->intrc = -EAGAIN;
2337                        continue;
2338                }
2339
2340                /*Don't try to start requests if device is stopped*/
2341                if (interruptible) {
2342                        rc = wait_event_interruptible(
2343                                generic_waitq, !device->stopped);
2344                        if (rc == -ERESTARTSYS) {
2345                                cqr->status = DASD_CQR_FAILED;
2346                                cqr->intrc = rc;
2347                                continue;
2348                        }
2349                } else
2350                        wait_event(generic_waitq, !(device->stopped));
2351
2352                if (!cqr->callback)
2353                        cqr->callback = dasd_wakeup_cb;
2354                cqr->callback_data = DASD_SLEEPON_START_TAG;
2355                dasd_add_request_tail(cqr);
2356        }
2357
2358        wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2359
2360        rc = 0;
2361        list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2362                /*
2363                 * In some cases the 'File Protected' or 'Incorrect Length'
2364                 * error might be expected and error recovery would be
2365                 * unnecessary in these cases.  Check if the according suppress
2366                 * bit is set.
2367                 */
2368                sense = dasd_get_sense(&cqr->irb);
2369                if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2370                    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2371                        continue;
2372                if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2373                    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2374                        continue;
2375
2376                /*
2377                 * for alias devices simplify error recovery and
2378                 * return to upper layer
2379                 * do not skip ERP requests
2380                 */
2381                if (cqr->startdev != cqr->basedev && !cqr->refers &&
2382                    (cqr->status == DASD_CQR_TERMINATED ||
2383                     cqr->status == DASD_CQR_NEED_ERP))
2384                        return -EAGAIN;
2385
2386                /* normal recovery for basedev IO */
2387                if (__dasd_sleep_on_erp(cqr))
2388                        /* handle erp first */
2389                        goto retry;
2390        }
2391
2392        return 0;
2393}
2394
2395/*
2396 * Queue a request to the tail of the device ccw_queue and wait for
2397 * it's completion.
2398 */
2399int dasd_sleep_on(struct dasd_ccw_req *cqr)
2400{
2401        return _dasd_sleep_on(cqr, 0);
2402}
2403EXPORT_SYMBOL(dasd_sleep_on);
2404
2405/*
2406 * Start requests from a ccw_queue and wait for their completion.
2407 */
2408int dasd_sleep_on_queue(struct list_head *ccw_queue)
2409{
2410        return _dasd_sleep_on_queue(ccw_queue, 0);
2411}
2412EXPORT_SYMBOL(dasd_sleep_on_queue);
2413
2414/*
2415 * Queue a request to the tail of the device ccw_queue and wait
2416 * interruptible for it's completion.
2417 */
2418int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2419{
2420        return _dasd_sleep_on(cqr, 1);
2421}
2422EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2423
2424/*
2425 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2426 * for eckd devices) the currently running request has to be terminated
2427 * and be put back to status queued, before the special request is added
2428 * to the head of the queue. Then the special request is waited on normally.
2429 */
2430static inline int _dasd_term_running_cqr(struct dasd_device *device)
2431{
2432        struct dasd_ccw_req *cqr;
2433        int rc;
2434
2435        if (list_empty(&device->ccw_queue))
2436                return 0;
2437        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2438        rc = device->discipline->term_IO(cqr);
2439        if (!rc)
2440                /*
2441                 * CQR terminated because a more important request is pending.
2442                 * Undo decreasing of retry counter because this is
2443                 * not an error case.
2444                 */
2445                cqr->retries++;
2446        return rc;
2447}
2448
2449int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2450{
2451        struct dasd_device *device;
2452        int rc;
2453
2454        device = cqr->startdev;
2455        if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2456            !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2457                cqr->status = DASD_CQR_FAILED;
2458                cqr->intrc = -EPERM;
2459                return -EIO;
2460        }
2461        spin_lock_irq(get_ccwdev_lock(device->cdev));
2462        rc = _dasd_term_running_cqr(device);
2463        if (rc) {
2464                spin_unlock_irq(get_ccwdev_lock(device->cdev));
2465                return rc;
2466        }
2467        cqr->callback = dasd_wakeup_cb;
2468        cqr->callback_data = DASD_SLEEPON_START_TAG;
2469        cqr->status = DASD_CQR_QUEUED;
2470        /*
2471         * add new request as second
2472         * first the terminated cqr needs to be finished
2473         */
2474        list_add(&cqr->devlist, device->ccw_queue.next);
2475
2476        /* let the bh start the request to keep them in order */
2477        dasd_schedule_device_bh(device);
2478
2479        spin_unlock_irq(get_ccwdev_lock(device->cdev));
2480
2481        wait_event(generic_waitq, _wait_for_wakeup(cqr));
2482
2483        if (cqr->status == DASD_CQR_DONE)
2484                rc = 0;
2485        else if (cqr->intrc)
2486                rc = cqr->intrc;
2487        else
2488                rc = -EIO;
2489
2490        /* kick tasklets */
2491        dasd_schedule_device_bh(device);
2492        if (device->block)
2493                dasd_schedule_block_bh(device->block);
2494
2495        return rc;
2496}
2497EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2498
2499/*
2500 * Cancels a request that was started with dasd_sleep_on_req.
2501 * This is useful to timeout requests. The request will be
2502 * terminated if it is currently in i/o.
2503 * Returns 0 if request termination was successful
2504 *         negative error code if termination failed
2505 * Cancellation of a request is an asynchronous operation! The calling
2506 * function has to wait until the request is properly returned via callback.
2507 */
2508static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2509{
2510        struct dasd_device *device = cqr->startdev;
2511        int rc = 0;
2512
2513        switch (cqr->status) {
2514        case DASD_CQR_QUEUED:
2515                /* request was not started - just set to cleared */
2516                cqr->status = DASD_CQR_CLEARED;
2517                break;
2518        case DASD_CQR_IN_IO:
2519                /* request in IO - terminate IO and release again */
2520                rc = device->discipline->term_IO(cqr);
2521                if (rc) {
2522                        dev_err(&device->cdev->dev,
2523                                "Cancelling request %p failed with rc=%d\n",
2524                                cqr, rc);
2525                } else {
2526                        cqr->stopclk = get_tod_clock();
2527                }
2528                break;
2529        default: /* already finished or clear pending - do nothing */
2530                break;
2531        }
2532        dasd_schedule_device_bh(device);
2533        return rc;
2534}
2535
2536int dasd_cancel_req(struct dasd_ccw_req *cqr)
2537{
2538        struct dasd_device *device = cqr->startdev;
2539        unsigned long flags;
2540        int rc;
2541
2542        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2543        rc = __dasd_cancel_req(cqr);
2544        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2545        return rc;
2546}
2547
2548/*
2549 * SECTION: Operations of the dasd_block layer.
2550 */
2551
2552/*
2553 * Timeout function for dasd_block. This is used when the block layer
2554 * is waiting for something that may not come reliably, (e.g. a state
2555 * change interrupt)
2556 */
2557static void dasd_block_timeout(struct timer_list *t)
2558{
2559        unsigned long flags;
2560        struct dasd_block *block;
2561
2562        block = from_timer(block, t, timer);
2563        spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2564        /* re-activate request queue */
2565        dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2566        spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2567        dasd_schedule_block_bh(block);
2568        blk_mq_run_hw_queues(block->request_queue, true);
2569}
2570
2571/*
2572 * Setup timeout for a dasd_block in jiffies.
2573 */
2574void dasd_block_set_timer(struct dasd_block *block, int expires)
2575{
2576        if (expires == 0)
2577                del_timer(&block->timer);
2578        else
2579                mod_timer(&block->timer, jiffies + expires);
2580}
2581EXPORT_SYMBOL(dasd_block_set_timer);
2582
2583/*
2584 * Clear timeout for a dasd_block.
2585 */
2586void dasd_block_clear_timer(struct dasd_block *block)
2587{
2588        del_timer(&block->timer);
2589}
2590EXPORT_SYMBOL(dasd_block_clear_timer);
2591
2592/*
2593 * Process finished error recovery ccw.
2594 */
2595static void __dasd_process_erp(struct dasd_device *device,
2596                               struct dasd_ccw_req *cqr)
2597{
2598        dasd_erp_fn_t erp_fn;
2599
2600        if (cqr->status == DASD_CQR_DONE)
2601                DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2602        else
2603                dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2604        erp_fn = device->discipline->erp_postaction(cqr);
2605        erp_fn(cqr);
2606}
2607
2608static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2609{
2610        struct request *req;
2611        blk_status_t error = BLK_STS_OK;
2612        int status;
2613
2614        req = (struct request *) cqr->callback_data;
2615        dasd_profile_end(cqr->block, cqr, req);
2616
2617        status = cqr->block->base->discipline->free_cp(cqr, req);
2618        if (status < 0)
2619                error = errno_to_blk_status(status);
2620        else if (status == 0) {
2621                switch (cqr->intrc) {
2622                case -EPERM:
2623                        error = BLK_STS_NEXUS;
2624                        break;
2625                case -ENOLINK:
2626                        error = BLK_STS_TRANSPORT;
2627                        break;
2628                case -ETIMEDOUT:
2629                        error = BLK_STS_TIMEOUT;
2630                        break;
2631                default:
2632                        error = BLK_STS_IOERR;
2633                        break;
2634                }
2635        }
2636
2637        /*
2638         * We need to take care for ETIMEDOUT errors here since the
2639         * complete callback does not get called in this case.
2640         * Take care of all errors here and avoid additional code to
2641         * transfer the error value to the complete callback.
2642         */
2643        if (error) {
2644                blk_mq_end_request(req, error);
2645                blk_mq_run_hw_queues(req->q, true);
2646        } else {
2647                blk_mq_complete_request(req);
2648        }
2649}
2650
2651/*
2652 * Process ccw request queue.
2653 */
2654static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2655                                           struct list_head *final_queue)
2656{
2657        struct list_head *l, *n;
2658        struct dasd_ccw_req *cqr;
2659        dasd_erp_fn_t erp_fn;
2660        unsigned long flags;
2661        struct dasd_device *base = block->base;
2662
2663restart:
2664        /* Process request with final status. */
2665        list_for_each_safe(l, n, &block->ccw_queue) {
2666                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2667                if (cqr->status != DASD_CQR_DONE &&
2668                    cqr->status != DASD_CQR_FAILED &&
2669                    cqr->status != DASD_CQR_NEED_ERP &&
2670                    cqr->status != DASD_CQR_TERMINATED)
2671                        continue;
2672
2673                if (cqr->status == DASD_CQR_TERMINATED) {
2674                        base->discipline->handle_terminated_request(cqr);
2675                        goto restart;
2676                }
2677
2678                /*  Process requests that may be recovered */
2679                if (cqr->status == DASD_CQR_NEED_ERP) {
2680                        erp_fn = base->discipline->erp_action(cqr);
2681                        if (IS_ERR(erp_fn(cqr)))
2682                                continue;
2683                        goto restart;
2684                }
2685
2686                /* log sense for fatal error */
2687                if (cqr->status == DASD_CQR_FAILED) {
2688                        dasd_log_sense(cqr, &cqr->irb);
2689                }
2690
2691                /* First of all call extended error reporting. */
2692                if (dasd_eer_enabled(base) &&
2693                    cqr->status == DASD_CQR_FAILED) {
2694                        dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2695
2696                        /* restart request  */
2697                        cqr->status = DASD_CQR_FILLED;
2698                        cqr->retries = 255;
2699                        spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2700                        dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2701                        spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2702                                               flags);
2703                        goto restart;
2704                }
2705
2706                /* Process finished ERP request. */
2707                if (cqr->refers) {
2708                        __dasd_process_erp(base, cqr);
2709                        goto restart;
2710                }
2711
2712                /* Rechain finished requests to final queue */
2713                cqr->endclk = get_tod_clock();
2714                list_move_tail(&cqr->blocklist, final_queue);
2715        }
2716}
2717
2718static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2719{
2720        dasd_schedule_block_bh(cqr->block);
2721}
2722
2723static void __dasd_block_start_head(struct dasd_block *block)
2724{
2725        struct dasd_ccw_req *cqr;
2726
2727        if (list_empty(&block->ccw_queue))
2728                return;
2729        /* We allways begin with the first requests on the queue, as some
2730         * of previously started requests have to be enqueued on a
2731         * dasd_device again for error recovery.
2732         */
2733        list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2734                if (cqr->status != DASD_CQR_FILLED)
2735                        continue;
2736                if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2737                    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2738                        cqr->status = DASD_CQR_FAILED;
2739                        cqr->intrc = -EPERM;
2740                        dasd_schedule_block_bh(block);
2741                        continue;
2742                }
2743                /* Non-temporary stop condition will trigger fail fast */
2744                if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2745                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2746                    (!dasd_eer_enabled(block->base))) {
2747                        cqr->status = DASD_CQR_FAILED;
2748                        cqr->intrc = -ENOLINK;
2749                        dasd_schedule_block_bh(block);
2750                        continue;
2751                }
2752                /* Don't try to start requests if device is stopped */
2753                if (block->base->stopped)
2754                        return;
2755
2756                /* just a fail safe check, should not happen */
2757                if (!cqr->startdev)
2758                        cqr->startdev = block->base;
2759
2760                /* make sure that the requests we submit find their way back */
2761                cqr->callback = dasd_return_cqr_cb;
2762
2763                dasd_add_request_tail(cqr);
2764        }
2765}
2766
2767/*
2768 * Central dasd_block layer routine. Takes requests from the generic
2769 * block layer request queue, creates ccw requests, enqueues them on
2770 * a dasd_device and processes ccw requests that have been returned.
2771 */
2772static void dasd_block_tasklet(unsigned long data)
2773{
2774        struct dasd_block *block = (struct dasd_block *) data;
2775        struct list_head final_queue;
2776        struct list_head *l, *n;
2777        struct dasd_ccw_req *cqr;
2778        struct dasd_queue *dq;
2779
2780        atomic_set(&block->tasklet_scheduled, 0);
2781        INIT_LIST_HEAD(&final_queue);
2782        spin_lock_irq(&block->queue_lock);
2783        /* Finish off requests on ccw queue */
2784        __dasd_process_block_ccw_queue(block, &final_queue);
2785        spin_unlock_irq(&block->queue_lock);
2786
2787        /* Now call the callback function of requests with final status */
2788        list_for_each_safe(l, n, &final_queue) {
2789                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2790                dq = cqr->dq;
2791                spin_lock_irq(&dq->lock);
2792                list_del_init(&cqr->blocklist);
2793                __dasd_cleanup_cqr(cqr);
2794                spin_unlock_irq(&dq->lock);
2795        }
2796
2797        spin_lock_irq(&block->queue_lock);
2798        /* Now check if the head of the ccw queue needs to be started. */
2799        __dasd_block_start_head(block);
2800        spin_unlock_irq(&block->queue_lock);
2801
2802        if (waitqueue_active(&shutdown_waitq))
2803                wake_up(&shutdown_waitq);
2804        dasd_put_device(block->base);
2805}
2806
2807static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2808{
2809        wake_up(&dasd_flush_wq);
2810}
2811
2812/*
2813 * Requeue a request back to the block request queue
2814 * only works for block requests
2815 */
2816static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2817{
2818        struct dasd_block *block = cqr->block;
2819        struct request *req;
2820
2821        if (!block)
2822                return -EINVAL;
2823        spin_lock_irq(&cqr->dq->lock);
2824        req = (struct request *) cqr->callback_data;
2825        blk_mq_requeue_request(req, false);
2826        spin_unlock_irq(&cqr->dq->lock);
2827
2828        return 0;
2829}
2830
2831/*
2832 * Go through all request on the dasd_block request queue, cancel them
2833 * on the respective dasd_device, and return them to the generic
2834 * block layer.
2835 */
2836static int dasd_flush_block_queue(struct dasd_block *block)
2837{
2838        struct dasd_ccw_req *cqr, *n;
2839        int rc, i;
2840        struct list_head flush_queue;
2841        unsigned long flags;
2842
2843        INIT_LIST_HEAD(&flush_queue);
2844        spin_lock_bh(&block->queue_lock);
2845        rc = 0;
2846restart:
2847        list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2848                /* if this request currently owned by a dasd_device cancel it */
2849                if (cqr->status >= DASD_CQR_QUEUED)
2850                        rc = dasd_cancel_req(cqr);
2851                if (rc < 0)
2852                        break;
2853                /* Rechain request (including erp chain) so it won't be
2854                 * touched by the dasd_block_tasklet anymore.
2855                 * Replace the callback so we notice when the request
2856                 * is returned from the dasd_device layer.
2857                 */
2858                cqr->callback = _dasd_wake_block_flush_cb;
2859                for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2860                        list_move_tail(&cqr->blocklist, &flush_queue);
2861                if (i > 1)
2862                        /* moved more than one request - need to restart */
2863                        goto restart;
2864        }
2865        spin_unlock_bh(&block->queue_lock);
2866        /* Now call the callback function of flushed requests */
2867restart_cb:
2868        list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2869                wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2870                /* Process finished ERP request. */
2871                if (cqr->refers) {
2872                        spin_lock_bh(&block->queue_lock);
2873                        __dasd_process_erp(block->base, cqr);
2874                        spin_unlock_bh(&block->queue_lock);
2875                        /* restart list_for_xx loop since dasd_process_erp
2876                         * might remove multiple elements */
2877                        goto restart_cb;
2878                }
2879                /* call the callback function */
2880                spin_lock_irqsave(&cqr->dq->lock, flags);
2881                cqr->endclk = get_tod_clock();
2882                list_del_init(&cqr->blocklist);
2883                __dasd_cleanup_cqr(cqr);
2884                spin_unlock_irqrestore(&cqr->dq->lock, flags);
2885        }
2886        return rc;
2887}
2888
2889/*
2890 * Schedules a call to dasd_tasklet over the device tasklet.
2891 */
2892void dasd_schedule_block_bh(struct dasd_block *block)
2893{
2894        /* Protect against rescheduling. */
2895        if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2896                return;
2897        /* life cycle of block is bound to it's base device */
2898        dasd_get_device(block->base);
2899        tasklet_hi_schedule(&block->tasklet);
2900}
2901EXPORT_SYMBOL(dasd_schedule_block_bh);
2902
2903
2904/*
2905 * SECTION: external block device operations
2906 * (request queue handling, open, release, etc.)
2907 */
2908
2909/*
2910 * Dasd request queue function. Called from ll_rw_blk.c
2911 */
2912static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
2913                                    const struct blk_mq_queue_data *qd)
2914{
2915        struct dasd_block *block = hctx->queue->queuedata;
2916        struct dasd_queue *dq = hctx->driver_data;
2917        struct request *req = qd->rq;
2918        struct dasd_device *basedev;
2919        struct dasd_ccw_req *cqr;
2920        blk_status_t rc = BLK_STS_OK;
2921
2922        basedev = block->base;
2923        spin_lock_irq(&dq->lock);
2924        if (basedev->state < DASD_STATE_READY) {
2925                DBF_DEV_EVENT(DBF_ERR, basedev,
2926                              "device not ready for request %p", req);
2927                rc = BLK_STS_IOERR;
2928                goto out;
2929        }
2930
2931        /*
2932         * if device is stopped do not fetch new requests
2933         * except failfast is active which will let requests fail
2934         * immediately in __dasd_block_start_head()
2935         */
2936        if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
2937                DBF_DEV_EVENT(DBF_ERR, basedev,
2938                              "device stopped request %p", req);
2939                rc = BLK_STS_RESOURCE;
2940                goto out;
2941        }
2942
2943        if (basedev->features & DASD_FEATURE_READONLY &&
2944            rq_data_dir(req) == WRITE) {
2945                DBF_DEV_EVENT(DBF_ERR, basedev,
2946                              "Rejecting write request %p", req);
2947                rc = BLK_STS_IOERR;
2948                goto out;
2949        }
2950
2951        if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
2952            (basedev->features & DASD_FEATURE_FAILFAST ||
2953             blk_noretry_request(req))) {
2954                DBF_DEV_EVENT(DBF_ERR, basedev,
2955                              "Rejecting failfast request %p", req);
2956                rc = BLK_STS_IOERR;
2957                goto out;
2958        }
2959
2960        cqr = basedev->discipline->build_cp(basedev, block, req);
2961        if (IS_ERR(cqr)) {
2962                if (PTR_ERR(cqr) == -EBUSY ||
2963                    PTR_ERR(cqr) == -ENOMEM ||
2964                    PTR_ERR(cqr) == -EAGAIN) {
2965                        rc = BLK_STS_RESOURCE;
2966                        goto out;
2967                }
2968                DBF_DEV_EVENT(DBF_ERR, basedev,
2969                              "CCW creation failed (rc=%ld) on request %p",
2970                              PTR_ERR(cqr), req);
2971                rc = BLK_STS_IOERR;
2972                goto out;
2973        }
2974        /*
2975         *  Note: callback is set to dasd_return_cqr_cb in
2976         * __dasd_block_start_head to cover erp requests as well
2977         */
2978        cqr->callback_data = req;
2979        cqr->status = DASD_CQR_FILLED;
2980        cqr->dq = dq;
2981
2982        blk_mq_start_request(req);
2983        spin_lock(&block->queue_lock);
2984        list_add_tail(&cqr->blocklist, &block->ccw_queue);
2985        INIT_LIST_HEAD(&cqr->devlist);
2986        dasd_profile_start(block, cqr, req);
2987        dasd_schedule_block_bh(block);
2988        spin_unlock(&block->queue_lock);
2989
2990out:
2991        spin_unlock_irq(&dq->lock);
2992        return rc;
2993}
2994
2995/*
2996 * Block timeout callback, called from the block layer
2997 *
2998 * Return values:
2999 * BLK_EH_RESET_TIMER if the request should be left running
3000 * BLK_EH_DONE if the request is handled or terminated
3001 *                    by the driver.
3002 */
3003enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3004{
3005        struct dasd_block *block = req->q->queuedata;
3006        struct dasd_device *device;
3007        struct dasd_ccw_req *cqr;
3008        unsigned long flags;
3009        int rc = 0;
3010
3011        cqr = blk_mq_rq_to_pdu(req);
3012        if (!cqr)
3013                return BLK_EH_DONE;
3014
3015        spin_lock_irqsave(&cqr->dq->lock, flags);
3016        device = cqr->startdev ? cqr->startdev : block->base;
3017        if (!device->blk_timeout) {
3018                spin_unlock_irqrestore(&cqr->dq->lock, flags);
3019                return BLK_EH_RESET_TIMER;
3020        }
3021        DBF_DEV_EVENT(DBF_WARNING, device,
3022                      " dasd_times_out cqr %p status %x",
3023                      cqr, cqr->status);
3024
3025        spin_lock(&block->queue_lock);
3026        spin_lock(get_ccwdev_lock(device->cdev));
3027        cqr->retries = -1;
3028        cqr->intrc = -ETIMEDOUT;
3029        if (cqr->status >= DASD_CQR_QUEUED) {
3030                rc = __dasd_cancel_req(cqr);
3031        } else if (cqr->status == DASD_CQR_FILLED ||
3032                   cqr->status == DASD_CQR_NEED_ERP) {
3033                cqr->status = DASD_CQR_TERMINATED;
3034        } else if (cqr->status == DASD_CQR_IN_ERP) {
3035                struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3036
3037                list_for_each_entry_safe(searchcqr, nextcqr,
3038                                         &block->ccw_queue, blocklist) {
3039                        tmpcqr = searchcqr;
3040                        while (tmpcqr->refers)
3041                                tmpcqr = tmpcqr->refers;
3042                        if (tmpcqr != cqr)
3043                                continue;
3044                        /* searchcqr is an ERP request for cqr */
3045                        searchcqr->retries = -1;
3046                        searchcqr->intrc = -ETIMEDOUT;
3047                        if (searchcqr->status >= DASD_CQR_QUEUED) {
3048                                rc = __dasd_cancel_req(searchcqr);
3049                        } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3050                                   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3051                                searchcqr->status = DASD_CQR_TERMINATED;
3052                                rc = 0;
3053                        } else if (searchcqr->status == DASD_CQR_IN_ERP) {
3054                                /*
3055                                 * Shouldn't happen; most recent ERP
3056                                 * request is at the front of queue
3057                                 */
3058                                continue;
3059                        }
3060                        break;
3061                }
3062        }
3063        spin_unlock(get_ccwdev_lock(device->cdev));
3064        dasd_schedule_block_bh(block);
3065        spin_unlock(&block->queue_lock);
3066        spin_unlock_irqrestore(&cqr->dq->lock, flags);
3067
3068        return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3069}
3070
3071static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3072                          unsigned int idx)
3073{
3074        struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3075
3076        if (!dq)
3077                return -ENOMEM;
3078
3079        spin_lock_init(&dq->lock);
3080        hctx->driver_data = dq;
3081
3082        return 0;
3083}
3084
3085static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3086{
3087        kfree(hctx->driver_data);
3088        hctx->driver_data = NULL;
3089}
3090
3091static void dasd_request_done(struct request *req)
3092{
3093        blk_mq_end_request(req, 0);
3094        blk_mq_run_hw_queues(req->q, true);
3095}
3096
3097static struct blk_mq_ops dasd_mq_ops = {
3098        .queue_rq = do_dasd_request,
3099        .complete = dasd_request_done,
3100        .timeout = dasd_times_out,
3101        .init_hctx = dasd_init_hctx,
3102        .exit_hctx = dasd_exit_hctx,
3103};
3104
3105/*
3106 * Allocate and initialize request queue and default I/O scheduler.
3107 */
3108static int dasd_alloc_queue(struct dasd_block *block)
3109{
3110        int rc;
3111
3112        block->tag_set.ops = &dasd_mq_ops;
3113        block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3114        block->tag_set.nr_hw_queues = nr_hw_queues;
3115        block->tag_set.queue_depth = queue_depth;
3116        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3117        block->tag_set.numa_node = NUMA_NO_NODE;
3118
3119        rc = blk_mq_alloc_tag_set(&block->tag_set);
3120        if (rc)
3121                return rc;
3122
3123        block->request_queue = blk_mq_init_queue(&block->tag_set);
3124        if (IS_ERR(block->request_queue))
3125                return PTR_ERR(block->request_queue);
3126
3127        block->request_queue->queuedata = block;
3128
3129        return 0;
3130}
3131
3132/*
3133 * Allocate and initialize request queue.
3134 */
3135static void dasd_setup_queue(struct dasd_block *block)
3136{
3137        unsigned int logical_block_size = block->bp_block;
3138        struct request_queue *q = block->request_queue;
3139        unsigned int max_bytes, max_discard_sectors;
3140        int max;
3141
3142        if (block->base->features & DASD_FEATURE_USERAW) {
3143                /*
3144                 * the max_blocks value for raw_track access is 256
3145                 * it is higher than the native ECKD value because we
3146                 * only need one ccw per track
3147                 * so the max_hw_sectors are
3148                 * 2048 x 512B = 1024kB = 16 tracks
3149                 */
3150                max = 2048;
3151        } else {
3152                max = block->base->discipline->max_blocks << block->s2b_shift;
3153        }
3154        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3155        q->limits.max_dev_sectors = max;
3156        blk_queue_logical_block_size(q, logical_block_size);
3157        blk_queue_max_hw_sectors(q, max);
3158        blk_queue_max_segments(q, USHRT_MAX);
3159        /* with page sized segments we can translate each segement into
3160         * one idaw/tidaw
3161         */
3162        blk_queue_max_segment_size(q, PAGE_SIZE);
3163        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
3164
3165        /* Only activate blocklayer discard support for devices that support it */
3166        if (block->base->features & DASD_FEATURE_DISCARD) {
3167                q->limits.discard_granularity = logical_block_size;
3168                q->limits.discard_alignment = PAGE_SIZE;
3169
3170                /* Calculate max_discard_sectors and make it PAGE aligned */
3171                max_bytes = USHRT_MAX * logical_block_size;
3172                max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
3173                max_discard_sectors = max_bytes / logical_block_size;
3174
3175                blk_queue_max_discard_sectors(q, max_discard_sectors);
3176                blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
3177                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3178        }
3179}
3180
3181/*
3182 * Deactivate and free request queue.
3183 */
3184static void dasd_free_queue(struct dasd_block *block)
3185{
3186        if (block->request_queue) {
3187                blk_cleanup_queue(block->request_queue);
3188                blk_mq_free_tag_set(&block->tag_set);
3189                block->request_queue = NULL;
3190        }
3191}
3192
3193static int dasd_open(struct block_device *bdev, fmode_t mode)
3194{
3195        struct dasd_device *base;
3196        int rc;
3197
3198        base = dasd_device_from_gendisk(bdev->bd_disk);
3199        if (!base)
3200                return -ENODEV;
3201
3202        atomic_inc(&base->block->open_count);
3203        if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3204                rc = -ENODEV;
3205                goto unlock;
3206        }
3207
3208        if (!try_module_get(base->discipline->owner)) {
3209                rc = -EINVAL;
3210                goto unlock;
3211        }
3212
3213        if (dasd_probeonly) {
3214                dev_info(&base->cdev->dev,
3215                         "Accessing the DASD failed because it is in "
3216                         "probeonly mode\n");
3217                rc = -EPERM;
3218                goto out;
3219        }
3220
3221        if (base->state <= DASD_STATE_BASIC) {
3222                DBF_DEV_EVENT(DBF_ERR, base, " %s",
3223                              " Cannot open unrecognized device");
3224                rc = -ENODEV;
3225                goto out;
3226        }
3227
3228        if ((mode & FMODE_WRITE) &&
3229            (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3230             (base->features & DASD_FEATURE_READONLY))) {
3231                rc = -EROFS;
3232                goto out;
3233        }
3234
3235        dasd_put_device(base);
3236        return 0;
3237
3238out:
3239        module_put(base->discipline->owner);
3240unlock:
3241        atomic_dec(&base->block->open_count);
3242        dasd_put_device(base);
3243        return rc;
3244}
3245
3246static void dasd_release(struct gendisk *disk, fmode_t mode)
3247{
3248        struct dasd_device *base = dasd_device_from_gendisk(disk);
3249        if (base) {
3250                atomic_dec(&base->block->open_count);
3251                module_put(base->discipline->owner);
3252                dasd_put_device(base);
3253        }
3254}
3255
3256/*
3257 * Return disk geometry.
3258 */
3259static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3260{
3261        struct dasd_device *base;
3262
3263        base = dasd_device_from_gendisk(bdev->bd_disk);
3264        if (!base)
3265                return -ENODEV;
3266
3267        if (!base->discipline ||
3268            !base->discipline->fill_geometry) {
3269                dasd_put_device(base);
3270                return -EINVAL;
3271        }
3272        base->discipline->fill_geometry(base->block, geo);
3273        geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3274        dasd_put_device(base);
3275        return 0;
3276}
3277
3278const struct block_device_operations
3279dasd_device_operations = {
3280        .owner          = THIS_MODULE,
3281        .open           = dasd_open,
3282        .release        = dasd_release,
3283        .ioctl          = dasd_ioctl,
3284        .compat_ioctl   = dasd_ioctl,
3285        .getgeo         = dasd_getgeo,
3286};
3287
3288/*******************************************************************************
3289 * end of block device operations
3290 */
3291
3292static void
3293dasd_exit(void)
3294{
3295#ifdef CONFIG_PROC_FS
3296        dasd_proc_exit();
3297#endif
3298        dasd_eer_exit();
3299        kmem_cache_destroy(dasd_page_cache);
3300        dasd_page_cache = NULL;
3301        dasd_gendisk_exit();
3302        dasd_devmap_exit();
3303        if (dasd_debug_area != NULL) {
3304                debug_unregister(dasd_debug_area);
3305                dasd_debug_area = NULL;
3306        }
3307        dasd_statistics_removeroot();
3308}
3309
3310/*
3311 * SECTION: common functions for ccw_driver use
3312 */
3313
3314/*
3315 * Is the device read-only?
3316 * Note that this function does not report the setting of the
3317 * readonly device attribute, but how it is configured in z/VM.
3318 */
3319int dasd_device_is_ro(struct dasd_device *device)
3320{
3321        struct ccw_dev_id dev_id;
3322        struct diag210 diag_data;
3323        int rc;
3324
3325        if (!MACHINE_IS_VM)
3326                return 0;
3327        ccw_device_get_id(device->cdev, &dev_id);
3328        memset(&diag_data, 0, sizeof(diag_data));
3329        diag_data.vrdcdvno = dev_id.devno;
3330        diag_data.vrdclen = sizeof(diag_data);
3331        rc = diag210(&diag_data);
3332        if (rc == 0 || rc == 2) {
3333                return diag_data.vrdcvfla & 0x80;
3334        } else {
3335                DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3336                          dev_id.devno, rc);
3337                return 0;
3338        }
3339}
3340EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3341
3342static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3343{
3344        struct ccw_device *cdev = data;
3345        int ret;
3346
3347        ret = ccw_device_set_online(cdev);
3348        if (ret)
3349                pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3350                        dev_name(&cdev->dev), ret);
3351}
3352
3353/*
3354 * Initial attempt at a probe function. this can be simplified once
3355 * the other detection code is gone.
3356 */
3357int dasd_generic_probe(struct ccw_device *cdev,
3358                       struct dasd_discipline *discipline)
3359{
3360        int ret;
3361
3362        ret = dasd_add_sysfs_files(cdev);
3363        if (ret) {
3364                DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
3365                                "dasd_generic_probe: could not add "
3366                                "sysfs entries");
3367                return ret;
3368        }
3369        cdev->handler = &dasd_int_handler;
3370
3371        /*
3372         * Automatically online either all dasd devices (dasd_autodetect)
3373         * or all devices specified with dasd= parameters during
3374         * initial probe.
3375         */
3376        if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3377            (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3378                async_schedule(dasd_generic_auto_online, cdev);
3379        return 0;
3380}
3381EXPORT_SYMBOL_GPL(dasd_generic_probe);
3382
3383void dasd_generic_free_discipline(struct dasd_device *device)
3384{
3385        /* Forget the discipline information. */
3386        if (device->discipline) {
3387                if (device->discipline->uncheck_device)
3388                        device->discipline->uncheck_device(device);
3389                module_put(device->discipline->owner);
3390                device->discipline = NULL;
3391        }
3392        if (device->base_discipline) {
3393                module_put(device->base_discipline->owner);
3394                device->base_discipline = NULL;
3395        }
3396}
3397EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3398
3399/*
3400 * This will one day be called from a global not_oper handler.
3401 * It is also used by driver_unregister during module unload.
3402 */
3403void dasd_generic_remove(struct ccw_device *cdev)
3404{
3405        struct dasd_device *device;
3406        struct dasd_block *block;
3407
3408        cdev->handler = NULL;
3409
3410        device = dasd_device_from_cdev(cdev);
3411        if (IS_ERR(device)) {
3412                dasd_remove_sysfs_files(cdev);
3413                return;
3414        }
3415        if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3416            !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3417                /* Already doing offline processing */
3418                dasd_put_device(device);
3419                dasd_remove_sysfs_files(cdev);
3420                return;
3421        }
3422        /*
3423         * This device is removed unconditionally. Set offline
3424         * flag to prevent dasd_open from opening it while it is
3425         * no quite down yet.
3426         */
3427        dasd_set_target_state(device, DASD_STATE_NEW);
3428        /* dasd_delete_device destroys the device reference. */
3429        block = device->block;
3430        dasd_delete_device(device);
3431        /*
3432         * life cycle of block is bound to device, so delete it after
3433         * device was safely removed
3434         */
3435        if (block)
3436                dasd_free_block(block);
3437
3438        dasd_remove_sysfs_files(cdev);
3439}
3440EXPORT_SYMBOL_GPL(dasd_generic_remove);
3441
3442/*
3443 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3444 * the device is detected for the first time and is supposed to be used
3445 * or the user has started activation through sysfs.
3446 */
3447int dasd_generic_set_online(struct ccw_device *cdev,
3448                            struct dasd_discipline *base_discipline)
3449{
3450        struct dasd_discipline *discipline;
3451        struct dasd_device *device;
3452        int rc;
3453
3454        /* first online clears initial online feature flag */
3455        dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3456        device = dasd_create_device(cdev);
3457        if (IS_ERR(device))
3458                return PTR_ERR(device);
3459
3460        discipline = base_discipline;
3461        if (device->features & DASD_FEATURE_USEDIAG) {
3462                if (!dasd_diag_discipline_pointer) {
3463                        /* Try to load the required module. */
3464                        rc = request_module(DASD_DIAG_MOD);
3465                        if (rc) {
3466                                pr_warn("%s Setting the DASD online failed "
3467                                        "because the required module %s "
3468                                        "could not be loaded (rc=%d)\n",
3469                                        dev_name(&cdev->dev), DASD_DIAG_MOD,
3470                                        rc);
3471                                dasd_delete_device(device);
3472                                return -ENODEV;
3473                        }
3474                }
3475                /* Module init could have failed, so check again here after
3476                 * request_module(). */
3477                if (!dasd_diag_discipline_pointer) {
3478                        pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3479                                dev_name(&cdev->dev));
3480                        dasd_delete_device(device);
3481                        return -ENODEV;
3482                }
3483                discipline = dasd_diag_discipline_pointer;
3484        }
3485        if (!try_module_get(base_discipline->owner)) {
3486                dasd_delete_device(device);
3487                return -EINVAL;
3488        }
3489        if (!try_module_get(discipline->owner)) {
3490                module_put(base_discipline->owner);
3491                dasd_delete_device(device);
3492                return -EINVAL;
3493        }
3494        device->base_discipline = base_discipline;
3495        device->discipline = discipline;
3496
3497        /* check_device will allocate block device if necessary */
3498        rc = discipline->check_device(device);
3499        if (rc) {
3500                pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3501                        dev_name(&cdev->dev), discipline->name, rc);
3502                module_put(discipline->owner);
3503                module_put(base_discipline->owner);
3504                dasd_delete_device(device);
3505                return rc;
3506        }
3507
3508        dasd_set_target_state(device, DASD_STATE_ONLINE);
3509        if (device->state <= DASD_STATE_KNOWN) {
3510                pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3511                        dev_name(&cdev->dev));
3512                rc = -ENODEV;
3513                dasd_set_target_state(device, DASD_STATE_NEW);
3514                if (device->block)
3515                        dasd_free_block(device->block);
3516                dasd_delete_device(device);
3517        } else
3518                pr_debug("dasd_generic device %s found\n",
3519                                dev_name(&cdev->dev));
3520
3521        wait_event(dasd_init_waitq, _wait_for_device(device));
3522
3523        dasd_put_device(device);
3524        return rc;
3525}
3526EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3527
3528int dasd_generic_set_offline(struct ccw_device *cdev)
3529{
3530        struct dasd_device *device;
3531        struct dasd_block *block;
3532        int max_count, open_count, rc;
3533        unsigned long flags;
3534
3535        rc = 0;
3536        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3537        device = dasd_device_from_cdev_locked(cdev);
3538        if (IS_ERR(device)) {
3539                spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3540                return PTR_ERR(device);
3541        }
3542
3543        /*
3544         * We must make sure that this device is currently not in use.
3545         * The open_count is increased for every opener, that includes
3546         * the blkdev_get in dasd_scan_partitions. We are only interested
3547         * in the other openers.
3548         */
3549        if (device->block) {
3550                max_count = device->block->bdev ? 0 : -1;
3551                open_count = atomic_read(&device->block->open_count);
3552                if (open_count > max_count) {
3553                        if (open_count > 0)
3554                                pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3555                                        dev_name(&cdev->dev), open_count);
3556                        else
3557                                pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3558                                        dev_name(&cdev->dev));
3559                        rc = -EBUSY;
3560                        goto out_err;
3561                }
3562        }
3563
3564        /*
3565         * Test if the offline processing is already running and exit if so.
3566         * If a safe offline is being processed this could only be a normal
3567         * offline that should be able to overtake the safe offline and
3568         * cancel any I/O we do not want to wait for any longer
3569         */
3570        if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3571                if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3572                        clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3573                                  &device->flags);
3574                } else {
3575                        rc = -EBUSY;
3576                        goto out_err;
3577                }
3578        }
3579        set_bit(DASD_FLAG_OFFLINE, &device->flags);
3580
3581        /*
3582         * if safe_offline is called set safe_offline_running flag and
3583         * clear safe_offline so that a call to normal offline
3584         * can overrun safe_offline processing
3585         */
3586        if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3587            !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3588                /* need to unlock here to wait for outstanding I/O */
3589                spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3590                /*
3591                 * If we want to set the device safe offline all IO operations
3592                 * should be finished before continuing the offline process
3593                 * so sync bdev first and then wait for our queues to become
3594                 * empty
3595                 */
3596                if (device->block) {
3597                        rc = fsync_bdev(device->block->bdev);
3598                        if (rc != 0)
3599                                goto interrupted;
3600                }
3601                dasd_schedule_device_bh(device);
3602                rc = wait_event_interruptible(shutdown_waitq,
3603                                              _wait_for_empty_queues(device));
3604                if (rc != 0)
3605                        goto interrupted;
3606
3607                /*
3608                 * check if a normal offline process overtook the offline
3609                 * processing in this case simply do nothing beside returning
3610                 * that we got interrupted
3611                 * otherwise mark safe offline as not running any longer and
3612                 * continue with normal offline
3613                 */
3614                spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3615                if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3616                        rc = -ERESTARTSYS;
3617                        goto out_err;
3618                }
3619                clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3620        }
3621        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3622
3623        dasd_set_target_state(device, DASD_STATE_NEW);
3624        /* dasd_delete_device destroys the device reference. */
3625        block = device->block;
3626        dasd_delete_device(device);
3627        /*
3628         * life cycle of block is bound to device, so delete it after
3629         * device was safely removed
3630         */
3631        if (block)
3632                dasd_free_block(block);
3633
3634        return 0;
3635
3636interrupted:
3637        /* interrupted by signal */
3638        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3639        clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3640        clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3641out_err:
3642        dasd_put_device(device);
3643        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3644        return rc;
3645}
3646EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3647
3648int dasd_generic_last_path_gone(struct dasd_device *device)
3649{
3650        struct dasd_ccw_req *cqr;
3651
3652        dev_warn(&device->cdev->dev, "No operational channel path is left "
3653                 "for the device\n");
3654        DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3655        /* First of all call extended error reporting. */
3656        dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3657
3658        if (device->state < DASD_STATE_BASIC)
3659                return 0;
3660        /* Device is active. We want to keep it. */
3661        list_for_each_entry(cqr, &device->ccw_queue, devlist)
3662                if ((cqr->status == DASD_CQR_IN_IO) ||
3663                    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3664                        cqr->status = DASD_CQR_QUEUED;
3665                        cqr->retries++;
3666                }
3667        dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3668        dasd_device_clear_timer(device);
3669        dasd_schedule_device_bh(device);
3670        return 1;
3671}
3672EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3673
3674int dasd_generic_path_operational(struct dasd_device *device)
3675{
3676        dev_info(&device->cdev->dev, "A channel path to the device has become "
3677                 "operational\n");
3678        DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3679        dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3680        if (device->stopped & DASD_UNRESUMED_PM) {
3681                dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
3682                dasd_restore_device(device);
3683                return 1;
3684        }
3685        dasd_schedule_device_bh(device);
3686        if (device->block) {
3687                dasd_schedule_block_bh(device->block);
3688                if (device->block->request_queue)
3689                        blk_mq_run_hw_queues(device->block->request_queue,
3690                                             true);
3691                }
3692
3693        if (!device->stopped)
3694                wake_up(&generic_waitq);
3695
3696        return 1;
3697}
3698EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3699
3700int dasd_generic_notify(struct ccw_device *cdev, int event)
3701{
3702        struct dasd_device *device;
3703        int ret;
3704
3705        device = dasd_device_from_cdev_locked(cdev);
3706        if (IS_ERR(device))
3707                return 0;
3708        ret = 0;
3709        switch (event) {
3710        case CIO_GONE:
3711        case CIO_BOXED:
3712        case CIO_NO_PATH:
3713                dasd_path_no_path(device);
3714                ret = dasd_generic_last_path_gone(device);
3715                break;
3716        case CIO_OPER:
3717                ret = 1;
3718                if (dasd_path_get_opm(device))
3719                        ret = dasd_generic_path_operational(device);
3720                break;
3721        }
3722        dasd_put_device(device);
3723        return ret;
3724}
3725EXPORT_SYMBOL_GPL(dasd_generic_notify);
3726
3727void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3728{
3729        struct dasd_device *device;
3730        int chp, oldopm, hpfpm, ifccpm;
3731
3732        device = dasd_device_from_cdev_locked(cdev);
3733        if (IS_ERR(device))
3734                return;
3735
3736        oldopm = dasd_path_get_opm(device);
3737        for (chp = 0; chp < 8; chp++) {
3738                if (path_event[chp] & PE_PATH_GONE) {
3739                        dasd_path_notoper(device, chp);
3740                }
3741                if (path_event[chp] & PE_PATH_AVAILABLE) {
3742                        dasd_path_available(device, chp);
3743                        dasd_schedule_device_bh(device);
3744                }
3745                if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3746                        if (!dasd_path_is_operational(device, chp) &&
3747                            !dasd_path_need_verify(device, chp)) {
3748                                /*
3749                                 * we can not establish a pathgroup on an
3750                                 * unavailable path, so trigger a path
3751                                 * verification first
3752                                 */
3753                        dasd_path_available(device, chp);
3754                        dasd_schedule_device_bh(device);
3755                        }
3756                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3757                                      "Pathgroup re-established\n");
3758                        if (device->discipline->kick_validate)
3759                                device->discipline->kick_validate(device);
3760                }
3761        }
3762        hpfpm = dasd_path_get_hpfpm(device);
3763        ifccpm = dasd_path_get_ifccpm(device);
3764        if (!dasd_path_get_opm(device) && hpfpm) {
3765                /*
3766                 * device has no operational paths but at least one path is
3767                 * disabled due to HPF errors
3768                 * disable HPF at all and use the path(s) again
3769                 */
3770                if (device->discipline->disable_hpf)
3771                        device->discipline->disable_hpf(device);
3772                dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3773                dasd_path_set_tbvpm(device, hpfpm);
3774                dasd_schedule_device_bh(device);
3775                dasd_schedule_requeue(device);
3776        } else if (!dasd_path_get_opm(device) && ifccpm) {
3777                /*
3778                 * device has no operational paths but at least one path is
3779                 * disabled due to IFCC errors
3780                 * trigger path verification on paths with IFCC errors
3781                 */
3782                dasd_path_set_tbvpm(device, ifccpm);
3783                dasd_schedule_device_bh(device);
3784        }
3785        if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3786                dev_warn(&device->cdev->dev,
3787                         "No verified channel paths remain for the device\n");
3788                DBF_DEV_EVENT(DBF_WARNING, device,
3789                              "%s", "last verified path gone");
3790                dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3791                dasd_device_set_stop_bits(device,
3792                                          DASD_STOPPED_DC_WAIT);
3793        }
3794        dasd_put_device(device);
3795}
3796EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3797
3798int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3799{
3800        if (!dasd_path_get_opm(device) && lpm) {
3801                dasd_path_set_opm(device, lpm);
3802                dasd_generic_path_operational(device);
3803        } else
3804                dasd_path_add_opm(device, lpm);
3805        return 0;
3806}
3807EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3808
3809/*
3810 * clear active requests and requeue them to block layer if possible
3811 */
3812static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3813{
3814        struct list_head requeue_queue;
3815        struct dasd_ccw_req *cqr, *n;
3816        struct dasd_ccw_req *refers;
3817        int rc;
3818
3819        INIT_LIST_HEAD(&requeue_queue);
3820        spin_lock_irq(get_ccwdev_lock(device->cdev));
3821        rc = 0;
3822        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3823                /* Check status and move request to flush_queue */
3824                if (cqr->status == DASD_CQR_IN_IO) {
3825                        rc = device->discipline->term_IO(cqr);
3826                        if (rc) {
3827                                /* unable to terminate requeust */
3828                                dev_err(&device->cdev->dev,
3829                                        "Unable to terminate request %p "
3830                                        "on suspend\n", cqr);
3831                                spin_unlock_irq(get_ccwdev_lock(device->cdev));
3832                                dasd_put_device(device);
3833                                return rc;
3834                        }
3835                }
3836                list_move_tail(&cqr->devlist, &requeue_queue);
3837        }
3838        spin_unlock_irq(get_ccwdev_lock(device->cdev));
3839
3840        list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3841                wait_event(dasd_flush_wq,
3842                           (cqr->status != DASD_CQR_CLEAR_PENDING));
3843
3844                /*
3845                 * requeue requests to blocklayer will only work
3846                 * for block device requests
3847                 */
3848                if (_dasd_requeue_request(cqr))
3849                        continue;
3850
3851                /* remove requests from device and block queue */
3852                list_del_init(&cqr->devlist);
3853                while (cqr->refers != NULL) {
3854                        refers = cqr->refers;
3855                        /* remove the request from the block queue */
3856                        list_del(&cqr->blocklist);
3857                        /* free the finished erp request */
3858                        dasd_free_erp_request(cqr, cqr->memdev);
3859                        cqr = refers;
3860                }
3861
3862                /*
3863                 * _dasd_requeue_request already checked for a valid
3864                 * blockdevice, no need to check again
3865                 * all erp requests (cqr->refers) have a cqr->block
3866                 * pointer copy from the original cqr
3867                 */
3868                list_del_init(&cqr->blocklist);
3869                cqr->block->base->discipline->free_cp(
3870                        cqr, (struct request *) cqr->callback_data);
3871        }
3872
3873        /*
3874         * if requests remain then they are internal request
3875         * and go back to the device queue
3876         */
3877        if (!list_empty(&requeue_queue)) {
3878                /* move freeze_queue to start of the ccw_queue */
3879                spin_lock_irq(get_ccwdev_lock(device->cdev));
3880                list_splice_tail(&requeue_queue, &device->ccw_queue);
3881                spin_unlock_irq(get_ccwdev_lock(device->cdev));
3882        }
3883        dasd_schedule_device_bh(device);
3884        return rc;
3885}
3886
3887static void do_requeue_requests(struct work_struct *work)
3888{
3889        struct dasd_device *device = container_of(work, struct dasd_device,
3890                                                  requeue_requests);
3891        dasd_generic_requeue_all_requests(device);
3892        dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3893        if (device->block)
3894                dasd_schedule_block_bh(device->block);
3895        dasd_put_device(device);
3896}
3897
3898void dasd_schedule_requeue(struct dasd_device *device)
3899{
3900        dasd_get_device(device);
3901        /* queue call to dasd_reload_device to the kernel event daemon. */
3902        if (!schedule_work(&device->requeue_requests))
3903                dasd_put_device(device);
3904}
3905EXPORT_SYMBOL(dasd_schedule_requeue);
3906
3907int dasd_generic_pm_freeze(struct ccw_device *cdev)
3908{
3909        struct dasd_device *device = dasd_device_from_cdev(cdev);
3910
3911        if (IS_ERR(device))
3912                return PTR_ERR(device);
3913
3914        /* mark device as suspended */
3915        set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3916
3917        if (device->discipline->freeze)
3918                device->discipline->freeze(device);
3919
3920        /* disallow new I/O  */
3921        dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
3922
3923        return dasd_generic_requeue_all_requests(device);
3924}
3925EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
3926
3927int dasd_generic_restore_device(struct ccw_device *cdev)
3928{
3929        struct dasd_device *device = dasd_device_from_cdev(cdev);
3930        int rc = 0;
3931
3932        if (IS_ERR(device))
3933                return PTR_ERR(device);
3934
3935        /* allow new IO again */
3936        dasd_device_remove_stop_bits(device,
3937                                     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
3938
3939        dasd_schedule_device_bh(device);
3940
3941        /*
3942         * call discipline restore function
3943         * if device is stopped do nothing e.g. for disconnected devices
3944         */
3945        if (device->discipline->restore && !(device->stopped))
3946                rc = device->discipline->restore(device);
3947        if (rc || device->stopped)
3948                /*
3949                 * if the resume failed for the DASD we put it in
3950                 * an UNRESUMED stop state
3951                 */
3952                device->stopped |= DASD_UNRESUMED_PM;
3953
3954        if (device->block) {
3955                dasd_schedule_block_bh(device->block);
3956                if (device->block->request_queue)
3957                        blk_mq_run_hw_queues(device->block->request_queue,
3958                                             true);
3959        }
3960
3961        clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
3962        dasd_put_device(device);
3963        return 0;
3964}
3965EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
3966
3967static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3968                                                   int rdc_buffer_size,
3969                                                   int magic)
3970{
3971        struct dasd_ccw_req *cqr;
3972        struct ccw1 *ccw;
3973
3974        cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3975                                   NULL);
3976
3977        if (IS_ERR(cqr)) {
3978                /* internal error 13 - Allocating the RDC request failed*/
3979                dev_err(&device->cdev->dev,
3980                         "An error occurred in the DASD device driver, "
3981                         "reason=%s\n", "13");
3982                return cqr;
3983        }
3984
3985        ccw = cqr->cpaddr;
3986        ccw->cmd_code = CCW_CMD_RDC;
3987        ccw->cda = (__u32)(addr_t) cqr->data;
3988        ccw->flags = 0;
3989        ccw->count = rdc_buffer_size;
3990        cqr->startdev = device;
3991        cqr->memdev = device;
3992        cqr->expires = 10*HZ;
3993        cqr->retries = 256;
3994        cqr->buildclk = get_tod_clock();
3995        cqr->status = DASD_CQR_FILLED;
3996        return cqr;
3997}
3998
3999
4000int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4001                                void *rdc_buffer, int rdc_buffer_size)
4002{
4003        int ret;
4004        struct dasd_ccw_req *cqr;
4005
4006        cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4007        if (IS_ERR(cqr))
4008                return PTR_ERR(cqr);
4009
4010        ret = dasd_sleep_on(cqr);
4011        if (ret == 0)
4012                memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4013        dasd_sfree_request(cqr, cqr->memdev);
4014        return ret;
4015}
4016EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4017
4018/*
4019 *   In command mode and transport mode we need to look for sense
4020 *   data in different places. The sense data itself is allways
4021 *   an array of 32 bytes, so we can unify the sense data access
4022 *   for both modes.
4023 */
4024char *dasd_get_sense(struct irb *irb)
4025{
4026        struct tsb *tsb = NULL;
4027        char *sense = NULL;
4028
4029        if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4030                if (irb->scsw.tm.tcw)
4031                        tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4032                                          irb->scsw.tm.tcw);
4033                if (tsb && tsb->length == 64 && tsb->flags)
4034                        switch (tsb->flags & 0x07) {
4035                        case 1: /* tsa_iostat */
4036                                sense = tsb->tsa.iostat.sense;
4037                                break;
4038                        case 2: /* tsa_ddpc */
4039                                sense = tsb->tsa.ddpc.sense;
4040                                break;
4041                        default:
4042                                /* currently we don't use interrogate data */
4043                                break;
4044                        }
4045        } else if (irb->esw.esw0.erw.cons) {
4046                sense = irb->ecw;
4047        }
4048        return sense;
4049}
4050EXPORT_SYMBOL_GPL(dasd_get_sense);
4051
4052void dasd_generic_shutdown(struct ccw_device *cdev)
4053{
4054        struct dasd_device *device;
4055
4056        device = dasd_device_from_cdev(cdev);
4057        if (IS_ERR(device))
4058                return;
4059
4060        if (device->block)
4061                dasd_schedule_block_bh(device->block);
4062
4063        dasd_schedule_device_bh(device);
4064
4065        wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4066}
4067EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4068
4069static int __init dasd_init(void)
4070{
4071        int rc;
4072
4073        init_waitqueue_head(&dasd_init_waitq);
4074        init_waitqueue_head(&dasd_flush_wq);
4075        init_waitqueue_head(&generic_waitq);
4076        init_waitqueue_head(&shutdown_waitq);
4077
4078        /* register 'common' DASD debug area, used for all DBF_XXX calls */
4079        dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4080        if (dasd_debug_area == NULL) {
4081                rc = -ENOMEM;
4082                goto failed;
4083        }
4084        debug_register_view(dasd_debug_area, &debug_sprintf_view);
4085        debug_set_level(dasd_debug_area, DBF_WARNING);
4086
4087        DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4088
4089        dasd_diag_discipline_pointer = NULL;
4090
4091        dasd_statistics_createroot();
4092
4093        rc = dasd_devmap_init();
4094        if (rc)
4095                goto failed;
4096        rc = dasd_gendisk_init();
4097        if (rc)
4098                goto failed;
4099        rc = dasd_parse();
4100        if (rc)
4101                goto failed;
4102        rc = dasd_eer_init();
4103        if (rc)
4104                goto failed;
4105#ifdef CONFIG_PROC_FS
4106        rc = dasd_proc_init();
4107        if (rc)
4108                goto failed;
4109#endif
4110
4111        return 0;
4112failed:
4113        pr_info("The DASD device driver could not be initialized\n");
4114        dasd_exit();
4115        return rc;
4116}
4117
4118module_init(dasd_init);
4119module_exit(dasd_exit);
4120