linux/drivers/s390/block/dasd.c
<<
>>
Prefs
   1/*
   2 * File...........: linux/drivers/s390/block/dasd.c
   3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
   4 *                  Horst Hummel <Horst.Hummel@de.ibm.com>
   5 *                  Carsten Otte <Cotte@de.ibm.com>
   6 *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 * Bugreports.to..: <Linux390@de.ibm.com>
   8 * Copyright IBM Corp. 1999, 2009
   9 */
  10
  11#define KMSG_COMPONENT "dasd"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/kmod.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/ctype.h>
  18#include <linux/major.h>
  19#include <linux/slab.h>
  20#include <linux/buffer_head.h>
  21#include <linux/hdreg.h>
  22#include <linux/async.h>
  23
  24#include <asm/ccwdev.h>
  25#include <asm/ebcdic.h>
  26#include <asm/idals.h>
  27#include <asm/todclk.h>
  28#include <asm/itcw.h>
  29
  30/* This is ugly... */
  31#define PRINTK_HEADER "dasd:"
  32
  33#include "dasd_int.h"
  34/*
  35 * SECTION: Constant definitions to be used within this file
  36 */
  37#define DASD_CHANQ_MAX_SIZE 4
  38
  39/*
  40 * SECTION: exported variables of dasd.c
  41 */
  42debug_info_t *dasd_debug_area;
  43struct dasd_discipline *dasd_diag_discipline_pointer;
  44void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  45
  46MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  47MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  48                   " Copyright 2000 IBM Corporation");
  49MODULE_SUPPORTED_DEVICE("dasd");
  50MODULE_LICENSE("GPL");
  51
  52/*
  53 * SECTION: prototypes for static functions of dasd.c
  54 */
  55static int  dasd_alloc_queue(struct dasd_block *);
  56static void dasd_setup_queue(struct dasd_block *);
  57static void dasd_free_queue(struct dasd_block *);
  58static void dasd_flush_request_queue(struct dasd_block *);
  59static int dasd_flush_block_queue(struct dasd_block *);
  60static void dasd_device_tasklet(struct dasd_device *);
  61static void dasd_block_tasklet(struct dasd_block *);
  62static void do_kick_device(struct work_struct *);
  63static void do_restore_device(struct work_struct *);
  64static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
  65static void dasd_device_timeout(unsigned long);
  66static void dasd_block_timeout(unsigned long);
  67
  68/*
  69 * SECTION: Operations on the device structure.
  70 */
  71static wait_queue_head_t dasd_init_waitq;
  72static wait_queue_head_t dasd_flush_wq;
  73static wait_queue_head_t generic_waitq;
  74
  75/*
  76 * Allocate memory for a new device structure.
  77 */
  78struct dasd_device *dasd_alloc_device(void)
  79{
  80        struct dasd_device *device;
  81
  82        device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
  83        if (!device)
  84                return ERR_PTR(-ENOMEM);
  85
  86        /* Get two pages for normal block device operations. */
  87        device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  88        if (!device->ccw_mem) {
  89                kfree(device);
  90                return ERR_PTR(-ENOMEM);
  91        }
  92        /* Get one page for error recovery. */
  93        device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  94        if (!device->erp_mem) {
  95                free_pages((unsigned long) device->ccw_mem, 1);
  96                kfree(device);
  97                return ERR_PTR(-ENOMEM);
  98        }
  99
 100        dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
 101        dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
 102        spin_lock_init(&device->mem_lock);
 103        atomic_set(&device->tasklet_scheduled, 0);
 104        tasklet_init(&device->tasklet,
 105                     (void (*)(unsigned long)) dasd_device_tasklet,
 106                     (unsigned long) device);
 107        INIT_LIST_HEAD(&device->ccw_queue);
 108        init_timer(&device->timer);
 109        device->timer.function = dasd_device_timeout;
 110        device->timer.data = (unsigned long) device;
 111        INIT_WORK(&device->kick_work, do_kick_device);
 112        INIT_WORK(&device->restore_device, do_restore_device);
 113        device->state = DASD_STATE_NEW;
 114        device->target = DASD_STATE_NEW;
 115
 116        return device;
 117}
 118
 119/*
 120 * Free memory of a device structure.
 121 */
 122void dasd_free_device(struct dasd_device *device)
 123{
 124        kfree(device->private);
 125        free_page((unsigned long) device->erp_mem);
 126        free_pages((unsigned long) device->ccw_mem, 1);
 127        kfree(device);
 128}
 129
 130/*
 131 * Allocate memory for a new device structure.
 132 */
 133struct dasd_block *dasd_alloc_block(void)
 134{
 135        struct dasd_block *block;
 136
 137        block = kzalloc(sizeof(*block), GFP_ATOMIC);
 138        if (!block)
 139                return ERR_PTR(-ENOMEM);
 140        /* open_count = 0 means device online but not in use */
 141        atomic_set(&block->open_count, -1);
 142
 143        spin_lock_init(&block->request_queue_lock);
 144        atomic_set(&block->tasklet_scheduled, 0);
 145        tasklet_init(&block->tasklet,
 146                     (void (*)(unsigned long)) dasd_block_tasklet,
 147                     (unsigned long) block);
 148        INIT_LIST_HEAD(&block->ccw_queue);
 149        spin_lock_init(&block->queue_lock);
 150        init_timer(&block->timer);
 151        block->timer.function = dasd_block_timeout;
 152        block->timer.data = (unsigned long) block;
 153
 154        return block;
 155}
 156
 157/*
 158 * Free memory of a device structure.
 159 */
 160void dasd_free_block(struct dasd_block *block)
 161{
 162        kfree(block);
 163}
 164
 165/*
 166 * Make a new device known to the system.
 167 */
 168static int dasd_state_new_to_known(struct dasd_device *device)
 169{
 170        int rc;
 171
 172        /*
 173         * As long as the device is not in state DASD_STATE_NEW we want to
 174         * keep the reference count > 0.
 175         */
 176        dasd_get_device(device);
 177
 178        if (device->block) {
 179                rc = dasd_alloc_queue(device->block);
 180                if (rc) {
 181                        dasd_put_device(device);
 182                        return rc;
 183                }
 184        }
 185        device->state = DASD_STATE_KNOWN;
 186        return 0;
 187}
 188
 189/*
 190 * Let the system forget about a device.
 191 */
 192static int dasd_state_known_to_new(struct dasd_device *device)
 193{
 194        /* Disable extended error reporting for this device. */
 195        dasd_eer_disable(device);
 196        /* Forget the discipline information. */
 197        if (device->discipline) {
 198                if (device->discipline->uncheck_device)
 199                        device->discipline->uncheck_device(device);
 200                module_put(device->discipline->owner);
 201        }
 202        device->discipline = NULL;
 203        if (device->base_discipline)
 204                module_put(device->base_discipline->owner);
 205        device->base_discipline = NULL;
 206        device->state = DASD_STATE_NEW;
 207
 208        if (device->block)
 209                dasd_free_queue(device->block);
 210
 211        /* Give up reference we took in dasd_state_new_to_known. */
 212        dasd_put_device(device);
 213        return 0;
 214}
 215
 216/*
 217 * Request the irq line for the device.
 218 */
 219static int dasd_state_known_to_basic(struct dasd_device *device)
 220{
 221        int rc;
 222
 223        /* Allocate and register gendisk structure. */
 224        if (device->block) {
 225                rc = dasd_gendisk_alloc(device->block);
 226                if (rc)
 227                        return rc;
 228        }
 229        /* register 'device' debug area, used for all DBF_DEV_XXX calls */
 230        device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
 231                                            8 * sizeof(long));
 232        debug_register_view(device->debug_area, &debug_sprintf_view);
 233        debug_set_level(device->debug_area, DBF_WARNING);
 234        DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
 235
 236        device->state = DASD_STATE_BASIC;
 237        return 0;
 238}
 239
 240/*
 241 * Release the irq line for the device. Terminate any running i/o.
 242 */
 243static int dasd_state_basic_to_known(struct dasd_device *device)
 244{
 245        int rc;
 246        if (device->block) {
 247                dasd_gendisk_free(device->block);
 248                dasd_block_clear_timer(device->block);
 249        }
 250        rc = dasd_flush_device_queue(device);
 251        if (rc)
 252                return rc;
 253        dasd_device_clear_timer(device);
 254
 255        DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
 256        if (device->debug_area != NULL) {
 257                debug_unregister(device->debug_area);
 258                device->debug_area = NULL;
 259        }
 260        device->state = DASD_STATE_KNOWN;
 261        return 0;
 262}
 263
 264/*
 265 * Do the initial analysis. The do_analysis function may return
 266 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
 267 * until the discipline decides to continue the startup sequence
 268 * by calling the function dasd_change_state. The eckd disciplines
 269 * uses this to start a ccw that detects the format. The completion
 270 * interrupt for this detection ccw uses the kernel event daemon to
 271 * trigger the call to dasd_change_state. All this is done in the
 272 * discipline code, see dasd_eckd.c.
 273 * After the analysis ccw is done (do_analysis returned 0) the block
 274 * device is setup.
 275 * In case the analysis returns an error, the device setup is stopped
 276 * (a fake disk was already added to allow formatting).
 277 */
 278static int dasd_state_basic_to_ready(struct dasd_device *device)
 279{
 280        int rc;
 281        struct dasd_block *block;
 282
 283        rc = 0;
 284        block = device->block;
 285        /* make disk known with correct capacity */
 286        if (block) {
 287                if (block->base->discipline->do_analysis != NULL)
 288                        rc = block->base->discipline->do_analysis(block);
 289                if (rc) {
 290                        if (rc != -EAGAIN)
 291                                device->state = DASD_STATE_UNFMT;
 292                        return rc;
 293                }
 294                dasd_setup_queue(block);
 295                set_capacity(block->gdp,
 296                             block->blocks << block->s2b_shift);
 297                device->state = DASD_STATE_READY;
 298                rc = dasd_scan_partitions(block);
 299                if (rc)
 300                        device->state = DASD_STATE_BASIC;
 301        } else {
 302                device->state = DASD_STATE_READY;
 303        }
 304        return rc;
 305}
 306
 307/*
 308 * Remove device from block device layer. Destroy dirty buffers.
 309 * Forget format information. Check if the target level is basic
 310 * and if it is create fake disk for formatting.
 311 */
 312static int dasd_state_ready_to_basic(struct dasd_device *device)
 313{
 314        int rc;
 315
 316        device->state = DASD_STATE_BASIC;
 317        if (device->block) {
 318                struct dasd_block *block = device->block;
 319                rc = dasd_flush_block_queue(block);
 320                if (rc) {
 321                        device->state = DASD_STATE_READY;
 322                        return rc;
 323                }
 324                dasd_destroy_partitions(block);
 325                dasd_flush_request_queue(block);
 326                block->blocks = 0;
 327                block->bp_block = 0;
 328                block->s2b_shift = 0;
 329        }
 330        return 0;
 331}
 332
 333/*
 334 * Back to basic.
 335 */
 336static int dasd_state_unfmt_to_basic(struct dasd_device *device)
 337{
 338        device->state = DASD_STATE_BASIC;
 339        return 0;
 340}
 341
 342/*
 343 * Make the device online and schedule the bottom half to start
 344 * the requeueing of requests from the linux request queue to the
 345 * ccw queue.
 346 */
 347static int
 348dasd_state_ready_to_online(struct dasd_device * device)
 349{
 350        int rc;
 351        struct gendisk *disk;
 352        struct disk_part_iter piter;
 353        struct hd_struct *part;
 354
 355        if (device->discipline->ready_to_online) {
 356                rc = device->discipline->ready_to_online(device);
 357                if (rc)
 358                        return rc;
 359        }
 360        device->state = DASD_STATE_ONLINE;
 361        if (device->block) {
 362                dasd_schedule_block_bh(device->block);
 363                disk = device->block->bdev->bd_disk;
 364                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 365                while ((part = disk_part_iter_next(&piter)))
 366                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 367                disk_part_iter_exit(&piter);
 368        }
 369        return 0;
 370}
 371
 372/*
 373 * Stop the requeueing of requests again.
 374 */
 375static int dasd_state_online_to_ready(struct dasd_device *device)
 376{
 377        int rc;
 378        struct gendisk *disk;
 379        struct disk_part_iter piter;
 380        struct hd_struct *part;
 381
 382        if (device->discipline->online_to_ready) {
 383                rc = device->discipline->online_to_ready(device);
 384                if (rc)
 385                        return rc;
 386        }
 387        device->state = DASD_STATE_READY;
 388        if (device->block) {
 389                disk = device->block->bdev->bd_disk;
 390                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 391                while ((part = disk_part_iter_next(&piter)))
 392                        kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
 393                disk_part_iter_exit(&piter);
 394        }
 395        return 0;
 396}
 397
 398/*
 399 * Device startup state changes.
 400 */
 401static int dasd_increase_state(struct dasd_device *device)
 402{
 403        int rc;
 404
 405        rc = 0;
 406        if (device->state == DASD_STATE_NEW &&
 407            device->target >= DASD_STATE_KNOWN)
 408                rc = dasd_state_new_to_known(device);
 409
 410        if (!rc &&
 411            device->state == DASD_STATE_KNOWN &&
 412            device->target >= DASD_STATE_BASIC)
 413                rc = dasd_state_known_to_basic(device);
 414
 415        if (!rc &&
 416            device->state == DASD_STATE_BASIC &&
 417            device->target >= DASD_STATE_READY)
 418                rc = dasd_state_basic_to_ready(device);
 419
 420        if (!rc &&
 421            device->state == DASD_STATE_UNFMT &&
 422            device->target > DASD_STATE_UNFMT)
 423                rc = -EPERM;
 424
 425        if (!rc &&
 426            device->state == DASD_STATE_READY &&
 427            device->target >= DASD_STATE_ONLINE)
 428                rc = dasd_state_ready_to_online(device);
 429
 430        return rc;
 431}
 432
 433/*
 434 * Device shutdown state changes.
 435 */
 436static int dasd_decrease_state(struct dasd_device *device)
 437{
 438        int rc;
 439
 440        rc = 0;
 441        if (device->state == DASD_STATE_ONLINE &&
 442            device->target <= DASD_STATE_READY)
 443                rc = dasd_state_online_to_ready(device);
 444
 445        if (!rc &&
 446            device->state == DASD_STATE_READY &&
 447            device->target <= DASD_STATE_BASIC)
 448                rc = dasd_state_ready_to_basic(device);
 449
 450        if (!rc &&
 451            device->state == DASD_STATE_UNFMT &&
 452            device->target <= DASD_STATE_BASIC)
 453                rc = dasd_state_unfmt_to_basic(device);
 454
 455        if (!rc &&
 456            device->state == DASD_STATE_BASIC &&
 457            device->target <= DASD_STATE_KNOWN)
 458                rc = dasd_state_basic_to_known(device);
 459
 460        if (!rc &&
 461            device->state == DASD_STATE_KNOWN &&
 462            device->target <= DASD_STATE_NEW)
 463                rc = dasd_state_known_to_new(device);
 464
 465        return rc;
 466}
 467
 468/*
 469 * This is the main startup/shutdown routine.
 470 */
 471static void dasd_change_state(struct dasd_device *device)
 472{
 473        int rc;
 474
 475        if (device->state == device->target)
 476                /* Already where we want to go today... */
 477                return;
 478        if (device->state < device->target)
 479                rc = dasd_increase_state(device);
 480        else
 481                rc = dasd_decrease_state(device);
 482        if (rc == -EAGAIN)
 483                return;
 484        if (rc)
 485                device->target = device->state;
 486
 487        if (device->state == device->target) {
 488                wake_up(&dasd_init_waitq);
 489                dasd_put_device(device);
 490        }
 491
 492        /* let user-space know that the device status changed */
 493        kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
 494}
 495
 496/*
 497 * Kick starter for devices that did not complete the startup/shutdown
 498 * procedure or were sleeping because of a pending state.
 499 * dasd_kick_device will schedule a call do do_kick_device to the kernel
 500 * event daemon.
 501 */
 502static void do_kick_device(struct work_struct *work)
 503{
 504        struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
 505        dasd_change_state(device);
 506        dasd_schedule_device_bh(device);
 507        dasd_put_device(device);
 508}
 509
 510void dasd_kick_device(struct dasd_device *device)
 511{
 512        dasd_get_device(device);
 513        /* queue call to dasd_kick_device to the kernel event daemon. */
 514        schedule_work(&device->kick_work);
 515}
 516
 517/*
 518 * dasd_restore_device will schedule a call do do_restore_device to the kernel
 519 * event daemon.
 520 */
 521static void do_restore_device(struct work_struct *work)
 522{
 523        struct dasd_device *device = container_of(work, struct dasd_device,
 524                                                  restore_device);
 525        device->cdev->drv->restore(device->cdev);
 526        dasd_put_device(device);
 527}
 528
 529void dasd_restore_device(struct dasd_device *device)
 530{
 531        dasd_get_device(device);
 532        /* queue call to dasd_restore_device to the kernel event daemon. */
 533        schedule_work(&device->restore_device);
 534}
 535
 536/*
 537 * Set the target state for a device and starts the state change.
 538 */
 539void dasd_set_target_state(struct dasd_device *device, int target)
 540{
 541        dasd_get_device(device);
 542        /* If we are in probeonly mode stop at DASD_STATE_READY. */
 543        if (dasd_probeonly && target > DASD_STATE_READY)
 544                target = DASD_STATE_READY;
 545        if (device->target != target) {
 546                if (device->state == target) {
 547                        wake_up(&dasd_init_waitq);
 548                        dasd_put_device(device);
 549                }
 550                device->target = target;
 551        }
 552        if (device->state != device->target)
 553                dasd_change_state(device);
 554}
 555
 556/*
 557 * Enable devices with device numbers in [from..to].
 558 */
 559static inline int _wait_for_device(struct dasd_device *device)
 560{
 561        return (device->state == device->target);
 562}
 563
 564void dasd_enable_device(struct dasd_device *device)
 565{
 566        dasd_set_target_state(device, DASD_STATE_ONLINE);
 567        if (device->state <= DASD_STATE_KNOWN)
 568                /* No discipline for device found. */
 569                dasd_set_target_state(device, DASD_STATE_NEW);
 570        /* Now wait for the devices to come up. */
 571        wait_event(dasd_init_waitq, _wait_for_device(device));
 572}
 573
 574/*
 575 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
 576 */
 577#ifdef CONFIG_DASD_PROFILE
 578
 579struct dasd_profile_info_t dasd_global_profile;
 580unsigned int dasd_profile_level = DASD_PROFILE_OFF;
 581
 582/*
 583 * Increments counter in global and local profiling structures.
 584 */
 585#define dasd_profile_counter(value, counter, block) \
 586{ \
 587        int index; \
 588        for (index = 0; index < 31 && value >> (2+index); index++); \
 589        dasd_global_profile.counter[index]++; \
 590        block->profile.counter[index]++; \
 591}
 592
 593/*
 594 * Add profiling information for cqr before execution.
 595 */
 596static void dasd_profile_start(struct dasd_block *block,
 597                               struct dasd_ccw_req *cqr,
 598                               struct request *req)
 599{
 600        struct list_head *l;
 601        unsigned int counter;
 602
 603        if (dasd_profile_level != DASD_PROFILE_ON)
 604                return;
 605
 606        /* count the length of the chanq for statistics */
 607        counter = 0;
 608        list_for_each(l, &block->ccw_queue)
 609                if (++counter >= 31)
 610                        break;
 611        dasd_global_profile.dasd_io_nr_req[counter]++;
 612        block->profile.dasd_io_nr_req[counter]++;
 613}
 614
 615/*
 616 * Add profiling information for cqr after execution.
 617 */
 618static void dasd_profile_end(struct dasd_block *block,
 619                             struct dasd_ccw_req *cqr,
 620                             struct request *req)
 621{
 622        long strtime, irqtime, endtime, tottime;        /* in microseconds */
 623        long tottimeps, sectors;
 624
 625        if (dasd_profile_level != DASD_PROFILE_ON)
 626                return;
 627
 628        sectors = blk_rq_sectors(req);
 629        if (!cqr->buildclk || !cqr->startclk ||
 630            !cqr->stopclk || !cqr->endclk ||
 631            !sectors)
 632                return;
 633
 634        strtime = ((cqr->startclk - cqr->buildclk) >> 12);
 635        irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
 636        endtime = ((cqr->endclk - cqr->stopclk) >> 12);
 637        tottime = ((cqr->endclk - cqr->buildclk) >> 12);
 638        tottimeps = tottime / sectors;
 639
 640        if (!dasd_global_profile.dasd_io_reqs)
 641                memset(&dasd_global_profile, 0,
 642                       sizeof(struct dasd_profile_info_t));
 643        dasd_global_profile.dasd_io_reqs++;
 644        dasd_global_profile.dasd_io_sects += sectors;
 645
 646        if (!block->profile.dasd_io_reqs)
 647                memset(&block->profile, 0,
 648                       sizeof(struct dasd_profile_info_t));
 649        block->profile.dasd_io_reqs++;
 650        block->profile.dasd_io_sects += sectors;
 651
 652        dasd_profile_counter(sectors, dasd_io_secs, block);
 653        dasd_profile_counter(tottime, dasd_io_times, block);
 654        dasd_profile_counter(tottimeps, dasd_io_timps, block);
 655        dasd_profile_counter(strtime, dasd_io_time1, block);
 656        dasd_profile_counter(irqtime, dasd_io_time2, block);
 657        dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
 658        dasd_profile_counter(endtime, dasd_io_time3, block);
 659}
 660#else
 661#define dasd_profile_start(block, cqr, req) do {} while (0)
 662#define dasd_profile_end(block, cqr, req) do {} while (0)
 663#endif                          /* CONFIG_DASD_PROFILE */
 664
 665/*
 666 * Allocate memory for a channel program with 'cplength' channel
 667 * command words and 'datasize' additional space. There are two
 668 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
 669 * memory and 2) dasd_smalloc_request uses the static ccw memory
 670 * that gets allocated for each device.
 671 */
 672struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
 673                                          int datasize,
 674                                          struct dasd_device *device)
 675{
 676        struct dasd_ccw_req *cqr;
 677
 678        /* Sanity checks */
 679        BUG_ON(datasize > PAGE_SIZE ||
 680             (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
 681
 682        cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
 683        if (cqr == NULL)
 684                return ERR_PTR(-ENOMEM);
 685        cqr->cpaddr = NULL;
 686        if (cplength > 0) {
 687                cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
 688                                      GFP_ATOMIC | GFP_DMA);
 689                if (cqr->cpaddr == NULL) {
 690                        kfree(cqr);
 691                        return ERR_PTR(-ENOMEM);
 692                }
 693        }
 694        cqr->data = NULL;
 695        if (datasize > 0) {
 696                cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
 697                if (cqr->data == NULL) {
 698                        kfree(cqr->cpaddr);
 699                        kfree(cqr);
 700                        return ERR_PTR(-ENOMEM);
 701                }
 702        }
 703        cqr->magic =  magic;
 704        set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
 705        dasd_get_device(device);
 706        return cqr;
 707}
 708
 709struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 710                                          int datasize,
 711                                          struct dasd_device *device)
 712{
 713        unsigned long flags;
 714        struct dasd_ccw_req *cqr;
 715        char *data;
 716        int size;
 717
 718        /* Sanity checks */
 719        BUG_ON(datasize > PAGE_SIZE ||
 720             (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
 721
 722        size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
 723        if (cplength > 0)
 724                size += cplength * sizeof(struct ccw1);
 725        if (datasize > 0)
 726                size += datasize;
 727        spin_lock_irqsave(&device->mem_lock, flags);
 728        cqr = (struct dasd_ccw_req *)
 729                dasd_alloc_chunk(&device->ccw_chunks, size);
 730        spin_unlock_irqrestore(&device->mem_lock, flags);
 731        if (cqr == NULL)
 732                return ERR_PTR(-ENOMEM);
 733        memset(cqr, 0, sizeof(struct dasd_ccw_req));
 734        data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
 735        cqr->cpaddr = NULL;
 736        if (cplength > 0) {
 737                cqr->cpaddr = (struct ccw1 *) data;
 738                data += cplength*sizeof(struct ccw1);
 739                memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
 740        }
 741        cqr->data = NULL;
 742        if (datasize > 0) {
 743                cqr->data = data;
 744                memset(cqr->data, 0, datasize);
 745        }
 746        cqr->magic = magic;
 747        set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
 748        dasd_get_device(device);
 749        return cqr;
 750}
 751
 752/*
 753 * Free memory of a channel program. This function needs to free all the
 754 * idal lists that might have been created by dasd_set_cda and the
 755 * struct dasd_ccw_req itself.
 756 */
 757void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 758{
 759#ifdef CONFIG_64BIT
 760        struct ccw1 *ccw;
 761
 762        /* Clear any idals used for the request. */
 763        ccw = cqr->cpaddr;
 764        do {
 765                clear_normalized_cda(ccw);
 766        } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
 767#endif
 768        kfree(cqr->cpaddr);
 769        kfree(cqr->data);
 770        kfree(cqr);
 771        dasd_put_device(device);
 772}
 773
 774void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 775{
 776        unsigned long flags;
 777
 778        spin_lock_irqsave(&device->mem_lock, flags);
 779        dasd_free_chunk(&device->ccw_chunks, cqr);
 780        spin_unlock_irqrestore(&device->mem_lock, flags);
 781        dasd_put_device(device);
 782}
 783
 784/*
 785 * Check discipline magic in cqr.
 786 */
 787static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
 788{
 789        struct dasd_device *device;
 790
 791        if (cqr == NULL)
 792                return -EINVAL;
 793        device = cqr->startdev;
 794        if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
 795                DBF_DEV_EVENT(DBF_WARNING, device,
 796                            " dasd_ccw_req 0x%08x magic doesn't match"
 797                            " discipline 0x%08x",
 798                            cqr->magic,
 799                            *(unsigned int *) device->discipline->name);
 800                return -EINVAL;
 801        }
 802        return 0;
 803}
 804
 805/*
 806 * Terminate the current i/o and set the request to clear_pending.
 807 * Timer keeps device runnig.
 808 * ccw_device_clear can fail if the i/o subsystem
 809 * is in a bad mood.
 810 */
 811int dasd_term_IO(struct dasd_ccw_req *cqr)
 812{
 813        struct dasd_device *device;
 814        int retries, rc;
 815        char errorstring[ERRORLENGTH];
 816
 817        /* Check the cqr */
 818        rc = dasd_check_cqr(cqr);
 819        if (rc)
 820                return rc;
 821        retries = 0;
 822        device = (struct dasd_device *) cqr->startdev;
 823        while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
 824                rc = ccw_device_clear(device->cdev, (long) cqr);
 825                switch (rc) {
 826                case 0: /* termination successful */
 827                        cqr->retries--;
 828                        cqr->status = DASD_CQR_CLEAR_PENDING;
 829                        cqr->stopclk = get_clock();
 830                        cqr->starttime = 0;
 831                        DBF_DEV_EVENT(DBF_DEBUG, device,
 832                                      "terminate cqr %p successful",
 833                                      cqr);
 834                        break;
 835                case -ENODEV:
 836                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
 837                                      "device gone, retry");
 838                        break;
 839                case -EIO:
 840                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
 841                                      "I/O error, retry");
 842                        break;
 843                case -EINVAL:
 844                case -EBUSY:
 845                        DBF_DEV_EVENT(DBF_ERR, device, "%s",
 846                                      "device busy, retry later");
 847                        break;
 848                default:
 849                        /* internal error 10 - unknown rc*/
 850                        snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
 851                        dev_err(&device->cdev->dev, "An error occurred in the "
 852                                "DASD device driver, reason=%s\n", errorstring);
 853                        BUG();
 854                        break;
 855                }
 856                retries++;
 857        }
 858        dasd_schedule_device_bh(device);
 859        return rc;
 860}
 861
 862/*
 863 * Start the i/o. This start_IO can fail if the channel is really busy.
 864 * In that case set up a timer to start the request later.
 865 */
 866int dasd_start_IO(struct dasd_ccw_req *cqr)
 867{
 868        struct dasd_device *device;
 869        int rc;
 870        char errorstring[ERRORLENGTH];
 871
 872        /* Check the cqr */
 873        rc = dasd_check_cqr(cqr);
 874        if (rc) {
 875                cqr->intrc = rc;
 876                return rc;
 877        }
 878        device = (struct dasd_device *) cqr->startdev;
 879        if (cqr->retries < 0) {
 880                /* internal error 14 - start_IO run out of retries */
 881                sprintf(errorstring, "14 %p", cqr);
 882                dev_err(&device->cdev->dev, "An error occurred in the DASD "
 883                        "device driver, reason=%s\n", errorstring);
 884                cqr->status = DASD_CQR_ERROR;
 885                return -EIO;
 886        }
 887        cqr->startclk = get_clock();
 888        cqr->starttime = jiffies;
 889        cqr->retries--;
 890        if (cqr->cpmode == 1) {
 891                rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
 892                                         (long) cqr, cqr->lpm);
 893        } else {
 894                rc = ccw_device_start(device->cdev, cqr->cpaddr,
 895                                      (long) cqr, cqr->lpm, 0);
 896        }
 897        switch (rc) {
 898        case 0:
 899                cqr->status = DASD_CQR_IN_IO;
 900                break;
 901        case -EBUSY:
 902                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 903                              "start_IO: device busy, retry later");
 904                break;
 905        case -ETIMEDOUT:
 906                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 907                              "start_IO: request timeout, retry later");
 908                break;
 909        case -EACCES:
 910                /* -EACCES indicates that the request used only a
 911                 * subset of the available pathes and all these
 912                 * pathes are gone.
 913                 * Do a retry with all available pathes.
 914                 */
 915                cqr->lpm = LPM_ANYPATH;
 916                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 917                              "start_IO: selected pathes gone,"
 918                              " retry on all pathes");
 919                break;
 920        case -ENODEV:
 921                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 922                              "start_IO: -ENODEV device gone, retry");
 923                break;
 924        case -EIO:
 925                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 926                              "start_IO: -EIO device gone, retry");
 927                break;
 928        case -EINVAL:
 929                /* most likely caused in power management context */
 930                DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 931                              "start_IO: -EINVAL device currently "
 932                              "not accessible");
 933                break;
 934        default:
 935                /* internal error 11 - unknown rc */
 936                snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
 937                dev_err(&device->cdev->dev,
 938                        "An error occurred in the DASD device driver, "
 939                        "reason=%s\n", errorstring);
 940                BUG();
 941                break;
 942        }
 943        cqr->intrc = rc;
 944        return rc;
 945}
 946
 947/*
 948 * Timeout function for dasd devices. This is used for different purposes
 949 *  1) missing interrupt handler for normal operation
 950 *  2) delayed start of request where start_IO failed with -EBUSY
 951 *  3) timeout for missing state change interrupts
 952 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
 953 * DASD_CQR_QUEUED for 2) and 3).
 954 */
 955static void dasd_device_timeout(unsigned long ptr)
 956{
 957        unsigned long flags;
 958        struct dasd_device *device;
 959
 960        device = (struct dasd_device *) ptr;
 961        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 962        /* re-activate request queue */
 963        device->stopped &= ~DASD_STOPPED_PENDING;
 964        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 965        dasd_schedule_device_bh(device);
 966}
 967
 968/*
 969 * Setup timeout for a device in jiffies.
 970 */
 971void dasd_device_set_timer(struct dasd_device *device, int expires)
 972{
 973        if (expires == 0)
 974                del_timer(&device->timer);
 975        else
 976                mod_timer(&device->timer, jiffies + expires);
 977}
 978
 979/*
 980 * Clear timeout for a device.
 981 */
 982void dasd_device_clear_timer(struct dasd_device *device)
 983{
 984        del_timer(&device->timer);
 985}
 986
 987static void dasd_handle_killed_request(struct ccw_device *cdev,
 988                                       unsigned long intparm)
 989{
 990        struct dasd_ccw_req *cqr;
 991        struct dasd_device *device;
 992
 993        if (!intparm)
 994                return;
 995        cqr = (struct dasd_ccw_req *) intparm;
 996        if (cqr->status != DASD_CQR_IN_IO) {
 997                DBF_EVENT(DBF_DEBUG,
 998                        "invalid status in handle_killed_request: "
 999                        "bus_id %s, status %02x",
1000                        dev_name(&cdev->dev), cqr->status);
1001                return;
1002        }
1003
1004        device = (struct dasd_device *) cqr->startdev;
1005        if (device == NULL ||
1006            device != dasd_device_from_cdev_locked(cdev) ||
1007            strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1008                DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1009                              "bus_id %s", dev_name(&cdev->dev));
1010                return;
1011        }
1012
1013        /* Schedule request to be retried. */
1014        cqr->status = DASD_CQR_QUEUED;
1015
1016        dasd_device_clear_timer(device);
1017        dasd_schedule_device_bh(device);
1018        dasd_put_device(device);
1019}
1020
1021void dasd_generic_handle_state_change(struct dasd_device *device)
1022{
1023        /* First of all start sense subsystem status request. */
1024        dasd_eer_snss(device);
1025
1026        device->stopped &= ~DASD_STOPPED_PENDING;
1027        dasd_schedule_device_bh(device);
1028        if (device->block)
1029                dasd_schedule_block_bh(device->block);
1030}
1031
1032/*
1033 * Interrupt handler for "normal" ssch-io based dasd devices.
1034 */
1035void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1036                      struct irb *irb)
1037{
1038        struct dasd_ccw_req *cqr, *next;
1039        struct dasd_device *device;
1040        unsigned long long now;
1041        int expires;
1042
1043        if (IS_ERR(irb)) {
1044                switch (PTR_ERR(irb)) {
1045                case -EIO:
1046                        break;
1047                case -ETIMEDOUT:
1048                        DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
1049                               __func__, dev_name(&cdev->dev));
1050                        break;
1051                default:
1052                        DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
1053                               __func__, dev_name(&cdev->dev), PTR_ERR(irb));
1054                }
1055                dasd_handle_killed_request(cdev, intparm);
1056                return;
1057        }
1058
1059        now = get_clock();
1060
1061        /* check for unsolicited interrupts */
1062        cqr = (struct dasd_ccw_req *) intparm;
1063        if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1064                     (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1065                     (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1066                if (cqr && cqr->status == DASD_CQR_IN_IO)
1067                        cqr->status = DASD_CQR_QUEUED;
1068                device = dasd_device_from_cdev_locked(cdev);
1069                if (!IS_ERR(device)) {
1070                        dasd_device_clear_timer(device);
1071                        device->discipline->handle_unsolicited_interrupt(device,
1072                                                                         irb);
1073                        dasd_put_device(device);
1074                }
1075                return;
1076        }
1077
1078        device = (struct dasd_device *) cqr->startdev;
1079        if (!device ||
1080            strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1081                DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1082                              "bus_id %s", dev_name(&cdev->dev));
1083                return;
1084        }
1085
1086        /* Check for clear pending */
1087        if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1088            scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1089                cqr->status = DASD_CQR_CLEARED;
1090                dasd_device_clear_timer(device);
1091                wake_up(&dasd_flush_wq);
1092                dasd_schedule_device_bh(device);
1093                return;
1094        }
1095
1096        /* check status - the request might have been killed by dyn detach */
1097        if (cqr->status != DASD_CQR_IN_IO) {
1098                DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1099                              "status %02x", dev_name(&cdev->dev), cqr->status);
1100                return;
1101        }
1102
1103        next = NULL;
1104        expires = 0;
1105        if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1106            scsw_cstat(&irb->scsw) == 0) {
1107                /* request was completed successfully */
1108                cqr->status = DASD_CQR_SUCCESS;
1109                cqr->stopclk = now;
1110                /* Start first request on queue if possible -> fast_io. */
1111                if (cqr->devlist.next != &device->ccw_queue) {
1112                        next = list_entry(cqr->devlist.next,
1113                                          struct dasd_ccw_req, devlist);
1114                }
1115        } else {  /* error */
1116                memcpy(&cqr->irb, irb, sizeof(struct irb));
1117                /* log sense for every failed I/O to s390 debugfeature */
1118                dasd_log_sense_dbf(cqr, irb);
1119                if (device->features & DASD_FEATURE_ERPLOG) {
1120                        dasd_log_sense(cqr, irb);
1121                }
1122
1123                /*
1124                 * If we don't want complex ERP for this request, then just
1125                 * reset this and retry it in the fastpath
1126                 */
1127                if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1128                    cqr->retries > 0) {
1129                        if (cqr->lpm == LPM_ANYPATH)
1130                                DBF_DEV_EVENT(DBF_DEBUG, device,
1131                                              "default ERP in fastpath "
1132                                              "(%i retries left)",
1133                                              cqr->retries);
1134                        cqr->lpm    = LPM_ANYPATH;
1135                        cqr->status = DASD_CQR_QUEUED;
1136                        next = cqr;
1137                } else
1138                        cqr->status = DASD_CQR_ERROR;
1139        }
1140        if (next && (next->status == DASD_CQR_QUEUED) &&
1141            (!device->stopped)) {
1142                if (device->discipline->start_IO(next) == 0)
1143                        expires = next->expires;
1144        }
1145        if (expires != 0)
1146                dasd_device_set_timer(device, expires);
1147        else
1148                dasd_device_clear_timer(device);
1149        dasd_schedule_device_bh(device);
1150}
1151
1152/*
1153 * If we have an error on a dasd_block layer request then we cancel
1154 * and return all further requests from the same dasd_block as well.
1155 */
1156static void __dasd_device_recovery(struct dasd_device *device,
1157                                   struct dasd_ccw_req *ref_cqr)
1158{
1159        struct list_head *l, *n;
1160        struct dasd_ccw_req *cqr;
1161
1162        /*
1163         * only requeue request that came from the dasd_block layer
1164         */
1165        if (!ref_cqr->block)
1166                return;
1167
1168        list_for_each_safe(l, n, &device->ccw_queue) {
1169                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1170                if (cqr->status == DASD_CQR_QUEUED &&
1171                    ref_cqr->block == cqr->block) {
1172                        cqr->status = DASD_CQR_CLEARED;
1173                }
1174        }
1175};
1176
1177/*
1178 * Remove those ccw requests from the queue that need to be returned
1179 * to the upper layer.
1180 */
1181static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1182                                            struct list_head *final_queue)
1183{
1184        struct list_head *l, *n;
1185        struct dasd_ccw_req *cqr;
1186
1187        /* Process request with final status. */
1188        list_for_each_safe(l, n, &device->ccw_queue) {
1189                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1190
1191                /* Stop list processing at the first non-final request. */
1192                if (cqr->status == DASD_CQR_QUEUED ||
1193                    cqr->status == DASD_CQR_IN_IO ||
1194                    cqr->status == DASD_CQR_CLEAR_PENDING)
1195                        break;
1196                if (cqr->status == DASD_CQR_ERROR) {
1197                        __dasd_device_recovery(device, cqr);
1198                }
1199                /* Rechain finished requests to final queue */
1200                list_move_tail(&cqr->devlist, final_queue);
1201        }
1202}
1203
1204/*
1205 * the cqrs from the final queue are returned to the upper layer
1206 * by setting a dasd_block state and calling the callback function
1207 */
1208static void __dasd_device_process_final_queue(struct dasd_device *device,
1209                                              struct list_head *final_queue)
1210{
1211        struct list_head *l, *n;
1212        struct dasd_ccw_req *cqr;
1213        struct dasd_block *block;
1214        void (*callback)(struct dasd_ccw_req *, void *data);
1215        void *callback_data;
1216        char errorstring[ERRORLENGTH];
1217
1218        list_for_each_safe(l, n, final_queue) {
1219                cqr = list_entry(l, struct dasd_ccw_req, devlist);
1220                list_del_init(&cqr->devlist);
1221                block = cqr->block;
1222                callback = cqr->callback;
1223                callback_data = cqr->callback_data;
1224                if (block)
1225                        spin_lock_bh(&block->queue_lock);
1226                switch (cqr->status) {
1227                case DASD_CQR_SUCCESS:
1228                        cqr->status = DASD_CQR_DONE;
1229                        break;
1230                case DASD_CQR_ERROR:
1231                        cqr->status = DASD_CQR_NEED_ERP;
1232                        break;
1233                case DASD_CQR_CLEARED:
1234                        cqr->status = DASD_CQR_TERMINATED;
1235                        break;
1236                default:
1237                        /* internal error 12 - wrong cqr status*/
1238                        snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1239                        dev_err(&device->cdev->dev,
1240                                "An error occurred in the DASD device driver, "
1241                                "reason=%s\n", errorstring);
1242                        BUG();
1243                }
1244                if (cqr->callback != NULL)
1245                        (callback)(cqr, callback_data);
1246                if (block)
1247                        spin_unlock_bh(&block->queue_lock);
1248        }
1249}
1250
1251/*
1252 * Take a look at the first request on the ccw queue and check
1253 * if it reached its expire time. If so, terminate the IO.
1254 */
1255static void __dasd_device_check_expire(struct dasd_device *device)
1256{
1257        struct dasd_ccw_req *cqr;
1258
1259        if (list_empty(&device->ccw_queue))
1260                return;
1261        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1262        if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1263            (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1264                if (device->discipline->term_IO(cqr) != 0) {
1265                        /* Hmpf, try again in 5 sec */
1266                        dev_err(&device->cdev->dev,
1267                                "cqr %p timed out (%is) but cannot be "
1268                                "ended, retrying in 5 s\n",
1269                                cqr, (cqr->expires/HZ));
1270                        cqr->expires += 5*HZ;
1271                        dasd_device_set_timer(device, 5*HZ);
1272                } else {
1273                        dev_err(&device->cdev->dev,
1274                                "cqr %p timed out (%is), %i retries "
1275                                "remaining\n", cqr, (cqr->expires/HZ),
1276                                cqr->retries);
1277                }
1278        }
1279}
1280
1281/*
1282 * Take a look at the first request on the ccw queue and check
1283 * if it needs to be started.
1284 */
1285static void __dasd_device_start_head(struct dasd_device *device)
1286{
1287        struct dasd_ccw_req *cqr;
1288        int rc;
1289
1290        if (list_empty(&device->ccw_queue))
1291                return;
1292        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1293        if (cqr->status != DASD_CQR_QUEUED)
1294                return;
1295        /* when device is stopped, return request to previous layer */
1296        if (device->stopped) {
1297                cqr->status = DASD_CQR_CLEARED;
1298                dasd_schedule_device_bh(device);
1299                return;
1300        }
1301
1302        rc = device->discipline->start_IO(cqr);
1303        if (rc == 0)
1304                dasd_device_set_timer(device, cqr->expires);
1305        else if (rc == -EACCES) {
1306                dasd_schedule_device_bh(device);
1307        } else
1308                /* Hmpf, try again in 1/2 sec */
1309                dasd_device_set_timer(device, 50);
1310}
1311
1312/*
1313 * Go through all request on the dasd_device request queue,
1314 * terminate them on the cdev if necessary, and return them to the
1315 * submitting layer via callback.
1316 * Note:
1317 * Make sure that all 'submitting layers' still exist when
1318 * this function is called!. In other words, when 'device' is a base
1319 * device then all block layer requests must have been removed before
1320 * via dasd_flush_block_queue.
1321 */
1322int dasd_flush_device_queue(struct dasd_device *device)
1323{
1324        struct dasd_ccw_req *cqr, *n;
1325        int rc;
1326        struct list_head flush_queue;
1327
1328        INIT_LIST_HEAD(&flush_queue);
1329        spin_lock_irq(get_ccwdev_lock(device->cdev));
1330        rc = 0;
1331        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1332                /* Check status and move request to flush_queue */
1333                switch (cqr->status) {
1334                case DASD_CQR_IN_IO:
1335                        rc = device->discipline->term_IO(cqr);
1336                        if (rc) {
1337                                /* unable to terminate requeust */
1338                                dev_err(&device->cdev->dev,
1339                                        "Flushing the DASD request queue "
1340                                        "failed for request %p\n", cqr);
1341                                /* stop flush processing */
1342                                goto finished;
1343                        }
1344                        break;
1345                case DASD_CQR_QUEUED:
1346                        cqr->stopclk = get_clock();
1347                        cqr->status = DASD_CQR_CLEARED;
1348                        break;
1349                default: /* no need to modify the others */
1350                        break;
1351                }
1352                list_move_tail(&cqr->devlist, &flush_queue);
1353        }
1354finished:
1355        spin_unlock_irq(get_ccwdev_lock(device->cdev));
1356        /*
1357         * After this point all requests must be in state CLEAR_PENDING,
1358         * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1359         * one of the others.
1360         */
1361        list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1362                wait_event(dasd_flush_wq,
1363                           (cqr->status != DASD_CQR_CLEAR_PENDING));
1364        /*
1365         * Now set each request back to TERMINATED, DONE or NEED_ERP
1366         * and call the callback function of flushed requests
1367         */
1368        __dasd_device_process_final_queue(device, &flush_queue);
1369        return rc;
1370}
1371
1372/*
1373 * Acquire the device lock and process queues for the device.
1374 */
1375static void dasd_device_tasklet(struct dasd_device *device)
1376{
1377        struct list_head final_queue;
1378
1379        atomic_set (&device->tasklet_scheduled, 0);
1380        INIT_LIST_HEAD(&final_queue);
1381        spin_lock_irq(get_ccwdev_lock(device->cdev));
1382        /* Check expire time of first request on the ccw queue. */
1383        __dasd_device_check_expire(device);
1384        /* find final requests on ccw queue */
1385        __dasd_device_process_ccw_queue(device, &final_queue);
1386        spin_unlock_irq(get_ccwdev_lock(device->cdev));
1387        /* Now call the callback function of requests with final status */
1388        __dasd_device_process_final_queue(device, &final_queue);
1389        spin_lock_irq(get_ccwdev_lock(device->cdev));
1390        /* Now check if the head of the ccw queue needs to be started. */
1391        __dasd_device_start_head(device);
1392        spin_unlock_irq(get_ccwdev_lock(device->cdev));
1393        dasd_put_device(device);
1394}
1395
1396/*
1397 * Schedules a call to dasd_tasklet over the device tasklet.
1398 */
1399void dasd_schedule_device_bh(struct dasd_device *device)
1400{
1401        /* Protect against rescheduling. */
1402        if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1403                return;
1404        dasd_get_device(device);
1405        tasklet_hi_schedule(&device->tasklet);
1406}
1407
1408/*
1409 * Queue a request to the head of the device ccw_queue.
1410 * Start the I/O if possible.
1411 */
1412void dasd_add_request_head(struct dasd_ccw_req *cqr)
1413{
1414        struct dasd_device *device;
1415        unsigned long flags;
1416
1417        device = cqr->startdev;
1418        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1419        cqr->status = DASD_CQR_QUEUED;
1420        list_add(&cqr->devlist, &device->ccw_queue);
1421        /* let the bh start the request to keep them in order */
1422        dasd_schedule_device_bh(device);
1423        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1424}
1425
1426/*
1427 * Queue a request to the tail of the device ccw_queue.
1428 * Start the I/O if possible.
1429 */
1430void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1431{
1432        struct dasd_device *device;
1433        unsigned long flags;
1434
1435        device = cqr->startdev;
1436        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1437        cqr->status = DASD_CQR_QUEUED;
1438        list_add_tail(&cqr->devlist, &device->ccw_queue);
1439        /* let the bh start the request to keep them in order */
1440        dasd_schedule_device_bh(device);
1441        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1442}
1443
1444/*
1445 * Wakeup helper for the 'sleep_on' functions.
1446 */
1447static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1448{
1449        wake_up((wait_queue_head_t *) data);
1450}
1451
1452static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1453{
1454        struct dasd_device *device;
1455        int rc;
1456
1457        device = cqr->startdev;
1458        spin_lock_irq(get_ccwdev_lock(device->cdev));
1459        rc = ((cqr->status == DASD_CQR_DONE ||
1460               cqr->status == DASD_CQR_NEED_ERP ||
1461               cqr->status == DASD_CQR_TERMINATED) &&
1462              list_empty(&cqr->devlist));
1463        spin_unlock_irq(get_ccwdev_lock(device->cdev));
1464        return rc;
1465}
1466
1467/*
1468 * Queue a request to the tail of the device ccw_queue and wait for
1469 * it's completion.
1470 */
1471int dasd_sleep_on(struct dasd_ccw_req *cqr)
1472{
1473        struct dasd_device *device;
1474        int rc;
1475
1476        device = cqr->startdev;
1477
1478        cqr->callback = dasd_wakeup_cb;
1479        cqr->callback_data = (void *) &generic_waitq;
1480        dasd_add_request_tail(cqr);
1481        wait_event(generic_waitq, _wait_for_wakeup(cqr));
1482
1483        if (cqr->status == DASD_CQR_DONE)
1484                rc = 0;
1485        else if (cqr->intrc)
1486                rc = cqr->intrc;
1487        else
1488                rc = -EIO;
1489        return rc;
1490}
1491
1492/*
1493 * Queue a request to the tail of the device ccw_queue and wait
1494 * interruptible for it's completion.
1495 */
1496int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1497{
1498        struct dasd_device *device;
1499        int rc;
1500
1501        device = cqr->startdev;
1502        cqr->callback = dasd_wakeup_cb;
1503        cqr->callback_data = (void *) &generic_waitq;
1504        dasd_add_request_tail(cqr);
1505        rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1506        if (rc == -ERESTARTSYS) {
1507                dasd_cancel_req(cqr);
1508                /* wait (non-interruptible) for final status */
1509                wait_event(generic_waitq, _wait_for_wakeup(cqr));
1510                cqr->intrc = rc;
1511        }
1512
1513        if (cqr->status == DASD_CQR_DONE)
1514                rc = 0;
1515        else if (cqr->intrc)
1516                rc = cqr->intrc;
1517        else
1518                rc = -EIO;
1519        return rc;
1520}
1521
1522/*
1523 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1524 * for eckd devices) the currently running request has to be terminated
1525 * and be put back to status queued, before the special request is added
1526 * to the head of the queue. Then the special request is waited on normally.
1527 */
1528static inline int _dasd_term_running_cqr(struct dasd_device *device)
1529{
1530        struct dasd_ccw_req *cqr;
1531
1532        if (list_empty(&device->ccw_queue))
1533                return 0;
1534        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1535        return device->discipline->term_IO(cqr);
1536}
1537
1538int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1539{
1540        struct dasd_device *device;
1541        int rc;
1542
1543        device = cqr->startdev;
1544        spin_lock_irq(get_ccwdev_lock(device->cdev));
1545        rc = _dasd_term_running_cqr(device);
1546        if (rc) {
1547                spin_unlock_irq(get_ccwdev_lock(device->cdev));
1548                return rc;
1549        }
1550
1551        cqr->callback = dasd_wakeup_cb;
1552        cqr->callback_data = (void *) &generic_waitq;
1553        cqr->status = DASD_CQR_QUEUED;
1554        list_add(&cqr->devlist, &device->ccw_queue);
1555
1556        /* let the bh start the request to keep them in order */
1557        dasd_schedule_device_bh(device);
1558
1559        spin_unlock_irq(get_ccwdev_lock(device->cdev));
1560
1561        wait_event(generic_waitq, _wait_for_wakeup(cqr));
1562
1563        if (cqr->status == DASD_CQR_DONE)
1564                rc = 0;
1565        else if (cqr->intrc)
1566                rc = cqr->intrc;
1567        else
1568                rc = -EIO;
1569        return rc;
1570}
1571
1572/*
1573 * Cancels a request that was started with dasd_sleep_on_req.
1574 * This is useful to timeout requests. The request will be
1575 * terminated if it is currently in i/o.
1576 * Returns 1 if the request has been terminated.
1577 *         0 if there was no need to terminate the request (not started yet)
1578 *         negative error code if termination failed
1579 * Cancellation of a request is an asynchronous operation! The calling
1580 * function has to wait until the request is properly returned via callback.
1581 */
1582int dasd_cancel_req(struct dasd_ccw_req *cqr)
1583{
1584        struct dasd_device *device = cqr->startdev;
1585        unsigned long flags;
1586        int rc;
1587
1588        rc = 0;
1589        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1590        switch (cqr->status) {
1591        case DASD_CQR_QUEUED:
1592                /* request was not started - just set to cleared */
1593                cqr->status = DASD_CQR_CLEARED;
1594                break;
1595        case DASD_CQR_IN_IO:
1596                /* request in IO - terminate IO and release again */
1597                rc = device->discipline->term_IO(cqr);
1598                if (rc) {
1599                        dev_err(&device->cdev->dev,
1600                                "Cancelling request %p failed with rc=%d\n",
1601                                cqr, rc);
1602                } else {
1603                        cqr->stopclk = get_clock();
1604                        rc = 1;
1605                }
1606                break;
1607        default: /* already finished or clear pending - do nothing */
1608                break;
1609        }
1610        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1611        dasd_schedule_device_bh(device);
1612        return rc;
1613}
1614
1615
1616/*
1617 * SECTION: Operations of the dasd_block layer.
1618 */
1619
1620/*
1621 * Timeout function for dasd_block. This is used when the block layer
1622 * is waiting for something that may not come reliably, (e.g. a state
1623 * change interrupt)
1624 */
1625static void dasd_block_timeout(unsigned long ptr)
1626{
1627        unsigned long flags;
1628        struct dasd_block *block;
1629
1630        block = (struct dasd_block *) ptr;
1631        spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1632        /* re-activate request queue */
1633        block->base->stopped &= ~DASD_STOPPED_PENDING;
1634        spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1635        dasd_schedule_block_bh(block);
1636}
1637
1638/*
1639 * Setup timeout for a dasd_block in jiffies.
1640 */
1641void dasd_block_set_timer(struct dasd_block *block, int expires)
1642{
1643        if (expires == 0)
1644                del_timer(&block->timer);
1645        else
1646                mod_timer(&block->timer, jiffies + expires);
1647}
1648
1649/*
1650 * Clear timeout for a dasd_block.
1651 */
1652void dasd_block_clear_timer(struct dasd_block *block)
1653{
1654        del_timer(&block->timer);
1655}
1656
1657/*
1658 * Process finished error recovery ccw.
1659 */
1660static inline void __dasd_block_process_erp(struct dasd_block *block,
1661                                            struct dasd_ccw_req *cqr)
1662{
1663        dasd_erp_fn_t erp_fn;
1664        struct dasd_device *device = block->base;
1665
1666        if (cqr->status == DASD_CQR_DONE)
1667                DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1668        else
1669                dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1670        erp_fn = device->discipline->erp_postaction(cqr);
1671        erp_fn(cqr);
1672}
1673
1674/*
1675 * Fetch requests from the block device queue.
1676 */
1677static void __dasd_process_request_queue(struct dasd_block *block)
1678{
1679        struct request_queue *queue;
1680        struct request *req;
1681        struct dasd_ccw_req *cqr;
1682        struct dasd_device *basedev;
1683        unsigned long flags;
1684        queue = block->request_queue;
1685        basedev = block->base;
1686        /* No queue ? Then there is nothing to do. */
1687        if (queue == NULL)
1688                return;
1689
1690        /*
1691         * We requeue request from the block device queue to the ccw
1692         * queue only in two states. In state DASD_STATE_READY the
1693         * partition detection is done and we need to requeue requests
1694         * for that. State DASD_STATE_ONLINE is normal block device
1695         * operation.
1696         */
1697        if (basedev->state < DASD_STATE_READY) {
1698                while ((req = blk_fetch_request(block->request_queue)))
1699                        __blk_end_request_all(req, -EIO);
1700                return;
1701        }
1702        /* Now we try to fetch requests from the request queue */
1703        while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1704                if (basedev->features & DASD_FEATURE_READONLY &&
1705                    rq_data_dir(req) == WRITE) {
1706                        DBF_DEV_EVENT(DBF_ERR, basedev,
1707                                      "Rejecting write request %p",
1708                                      req);
1709                        blk_start_request(req);
1710                        __blk_end_request_all(req, -EIO);
1711                        continue;
1712                }
1713                cqr = basedev->discipline->build_cp(basedev, block, req);
1714                if (IS_ERR(cqr)) {
1715                        if (PTR_ERR(cqr) == -EBUSY)
1716                                break;  /* normal end condition */
1717                        if (PTR_ERR(cqr) == -ENOMEM)
1718                                break;  /* terminate request queue loop */
1719                        if (PTR_ERR(cqr) == -EAGAIN) {
1720                                /*
1721                                 * The current request cannot be build right
1722                                 * now, we have to try later. If this request
1723                                 * is the head-of-queue we stop the device
1724                                 * for 1/2 second.
1725                                 */
1726                                if (!list_empty(&block->ccw_queue))
1727                                        break;
1728                                spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1729                                basedev->stopped |= DASD_STOPPED_PENDING;
1730                                spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1731                                dasd_block_set_timer(block, HZ/2);
1732                                break;
1733                        }
1734                        DBF_DEV_EVENT(DBF_ERR, basedev,
1735                                      "CCW creation failed (rc=%ld) "
1736                                      "on request %p",
1737                                      PTR_ERR(cqr), req);
1738                        blk_start_request(req);
1739                        __blk_end_request_all(req, -EIO);
1740                        continue;
1741                }
1742                /*
1743                 *  Note: callback is set to dasd_return_cqr_cb in
1744                 * __dasd_block_start_head to cover erp requests as well
1745                 */
1746                cqr->callback_data = (void *) req;
1747                cqr->status = DASD_CQR_FILLED;
1748                blk_start_request(req);
1749                list_add_tail(&cqr->blocklist, &block->ccw_queue);
1750                dasd_profile_start(block, cqr, req);
1751        }
1752}
1753
1754static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1755{
1756        struct request *req;
1757        int status;
1758        int error = 0;
1759
1760        req = (struct request *) cqr->callback_data;
1761        dasd_profile_end(cqr->block, cqr, req);
1762        status = cqr->block->base->discipline->free_cp(cqr, req);
1763        if (status <= 0)
1764                error = status ? status : -EIO;
1765        __blk_end_request_all(req, error);
1766}
1767
1768/*
1769 * Process ccw request queue.
1770 */
1771static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1772                                           struct list_head *final_queue)
1773{
1774        struct list_head *l, *n;
1775        struct dasd_ccw_req *cqr;
1776        dasd_erp_fn_t erp_fn;
1777        unsigned long flags;
1778        struct dasd_device *base = block->base;
1779
1780restart:
1781        /* Process request with final status. */
1782        list_for_each_safe(l, n, &block->ccw_queue) {
1783                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1784                if (cqr->status != DASD_CQR_DONE &&
1785                    cqr->status != DASD_CQR_FAILED &&
1786                    cqr->status != DASD_CQR_NEED_ERP &&
1787                    cqr->status != DASD_CQR_TERMINATED)
1788                        continue;
1789
1790                if (cqr->status == DASD_CQR_TERMINATED) {
1791                        base->discipline->handle_terminated_request(cqr);
1792                        goto restart;
1793                }
1794
1795                /*  Process requests that may be recovered */
1796                if (cqr->status == DASD_CQR_NEED_ERP) {
1797                        erp_fn = base->discipline->erp_action(cqr);
1798                        erp_fn(cqr);
1799                        goto restart;
1800                }
1801
1802                /* log sense for fatal error */
1803                if (cqr->status == DASD_CQR_FAILED) {
1804                        dasd_log_sense(cqr, &cqr->irb);
1805                }
1806
1807                /* First of all call extended error reporting. */
1808                if (dasd_eer_enabled(base) &&
1809                    cqr->status == DASD_CQR_FAILED) {
1810                        dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1811
1812                        /* restart request  */
1813                        cqr->status = DASD_CQR_FILLED;
1814                        cqr->retries = 255;
1815                        spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1816                        base->stopped |= DASD_STOPPED_QUIESCE;
1817                        spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1818                                               flags);
1819                        goto restart;
1820                }
1821
1822                /* Process finished ERP request. */
1823                if (cqr->refers) {
1824                        __dasd_block_process_erp(block, cqr);
1825                        goto restart;
1826                }
1827
1828                /* Rechain finished requests to final queue */
1829                cqr->endclk = get_clock();
1830                list_move_tail(&cqr->blocklist, final_queue);
1831        }
1832}
1833
1834static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1835{
1836        dasd_schedule_block_bh(cqr->block);
1837}
1838
1839static void __dasd_block_start_head(struct dasd_block *block)
1840{
1841        struct dasd_ccw_req *cqr;
1842
1843        if (list_empty(&block->ccw_queue))
1844                return;
1845        /* We allways begin with the first requests on the queue, as some
1846         * of previously started requests have to be enqueued on a
1847         * dasd_device again for error recovery.
1848         */
1849        list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1850                if (cqr->status != DASD_CQR_FILLED)
1851                        continue;
1852                /* Non-temporary stop condition will trigger fail fast */
1853                if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1854                    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1855                    (!dasd_eer_enabled(block->base))) {
1856                        cqr->status = DASD_CQR_FAILED;
1857                        dasd_schedule_block_bh(block);
1858                        continue;
1859                }
1860                /* Don't try to start requests if device is stopped */
1861                if (block->base->stopped)
1862                        return;
1863
1864                /* just a fail safe check, should not happen */
1865                if (!cqr->startdev)
1866                        cqr->startdev = block->base;
1867
1868                /* make sure that the requests we submit find their way back */
1869                cqr->callback = dasd_return_cqr_cb;
1870
1871                dasd_add_request_tail(cqr);
1872        }
1873}
1874
1875/*
1876 * Central dasd_block layer routine. Takes requests from the generic
1877 * block layer request queue, creates ccw requests, enqueues them on
1878 * a dasd_device and processes ccw requests that have been returned.
1879 */
1880static void dasd_block_tasklet(struct dasd_block *block)
1881{
1882        struct list_head final_queue;
1883        struct list_head *l, *n;
1884        struct dasd_ccw_req *cqr;
1885
1886        atomic_set(&block->tasklet_scheduled, 0);
1887        INIT_LIST_HEAD(&final_queue);
1888        spin_lock(&block->queue_lock);
1889        /* Finish off requests on ccw queue */
1890        __dasd_process_block_ccw_queue(block, &final_queue);
1891        spin_unlock(&block->queue_lock);
1892        /* Now call the callback function of requests with final status */
1893        spin_lock_irq(&block->request_queue_lock);
1894        list_for_each_safe(l, n, &final_queue) {
1895                cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1896                list_del_init(&cqr->blocklist);
1897                __dasd_cleanup_cqr(cqr);
1898        }
1899        spin_lock(&block->queue_lock);
1900        /* Get new request from the block device request queue */
1901        __dasd_process_request_queue(block);
1902        /* Now check if the head of the ccw queue needs to be started. */
1903        __dasd_block_start_head(block);
1904        spin_unlock(&block->queue_lock);
1905        spin_unlock_irq(&block->request_queue_lock);
1906        dasd_put_device(block->base);
1907}
1908
1909static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1910{
1911        wake_up(&dasd_flush_wq);
1912}
1913
1914/*
1915 * Go through all request on the dasd_block request queue, cancel them
1916 * on the respective dasd_device, and return them to the generic
1917 * block layer.
1918 */
1919static int dasd_flush_block_queue(struct dasd_block *block)
1920{
1921        struct dasd_ccw_req *cqr, *n;
1922        int rc, i;
1923        struct list_head flush_queue;
1924
1925        INIT_LIST_HEAD(&flush_queue);
1926        spin_lock_bh(&block->queue_lock);
1927        rc = 0;
1928restart:
1929        list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1930                /* if this request currently owned by a dasd_device cancel it */
1931                if (cqr->status >= DASD_CQR_QUEUED)
1932                        rc = dasd_cancel_req(cqr);
1933                if (rc < 0)
1934                        break;
1935                /* Rechain request (including erp chain) so it won't be
1936                 * touched by the dasd_block_tasklet anymore.
1937                 * Replace the callback so we notice when the request
1938                 * is returned from the dasd_device layer.
1939                 */
1940                cqr->callback = _dasd_wake_block_flush_cb;
1941                for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1942                        list_move_tail(&cqr->blocklist, &flush_queue);
1943                if (i > 1)
1944                        /* moved more than one request - need to restart */
1945                        goto restart;
1946        }
1947        spin_unlock_bh(&block->queue_lock);
1948        /* Now call the callback function of flushed requests */
1949restart_cb:
1950        list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1951                wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1952                /* Process finished ERP request. */
1953                if (cqr->refers) {
1954                        spin_lock_bh(&block->queue_lock);
1955                        __dasd_block_process_erp(block, cqr);
1956                        spin_unlock_bh(&block->queue_lock);
1957                        /* restart list_for_xx loop since dasd_process_erp
1958                         * might remove multiple elements */
1959                        goto restart_cb;
1960                }
1961                /* call the callback function */
1962                spin_lock_irq(&block->request_queue_lock);
1963                cqr->endclk = get_clock();
1964                list_del_init(&cqr->blocklist);
1965                __dasd_cleanup_cqr(cqr);
1966                spin_unlock_irq(&block->request_queue_lock);
1967        }
1968        return rc;
1969}
1970
1971/*
1972 * Schedules a call to dasd_tasklet over the device tasklet.
1973 */
1974void dasd_schedule_block_bh(struct dasd_block *block)
1975{
1976        /* Protect against rescheduling. */
1977        if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1978                return;
1979        /* life cycle of block is bound to it's base device */
1980        dasd_get_device(block->base);
1981        tasklet_hi_schedule(&block->tasklet);
1982}
1983
1984
1985/*
1986 * SECTION: external block device operations
1987 * (request queue handling, open, release, etc.)
1988 */
1989
1990/*
1991 * Dasd request queue function. Called from ll_rw_blk.c
1992 */
1993static void do_dasd_request(struct request_queue *queue)
1994{
1995        struct dasd_block *block;
1996
1997        block = queue->queuedata;
1998        spin_lock(&block->queue_lock);
1999        /* Get new request from the block device request queue */
2000        __dasd_process_request_queue(block);
2001        /* Now check if the head of the ccw queue needs to be started. */
2002        __dasd_block_start_head(block);
2003        spin_unlock(&block->queue_lock);
2004}
2005
2006/*
2007 * Allocate and initialize request queue and default I/O scheduler.
2008 */
2009static int dasd_alloc_queue(struct dasd_block *block)
2010{
2011        int rc;
2012
2013        block->request_queue = blk_init_queue(do_dasd_request,
2014                                               &block->request_queue_lock);
2015        if (block->request_queue == NULL)
2016                return -ENOMEM;
2017
2018        block->request_queue->queuedata = block;
2019
2020        elevator_exit(block->request_queue->elevator);
2021        block->request_queue->elevator = NULL;
2022        rc = elevator_init(block->request_queue, "deadline");
2023        if (rc) {
2024                blk_cleanup_queue(block->request_queue);
2025                return rc;
2026        }
2027        return 0;
2028}
2029
2030/*
2031 * Allocate and initialize request queue.
2032 */
2033static void dasd_setup_queue(struct dasd_block *block)
2034{
2035        int max;
2036
2037        blk_queue_logical_block_size(block->request_queue, block->bp_block);
2038        max = block->base->discipline->max_blocks << block->s2b_shift;
2039        blk_queue_max_sectors(block->request_queue, max);
2040        blk_queue_max_phys_segments(block->request_queue, -1L);
2041        blk_queue_max_hw_segments(block->request_queue, -1L);
2042        /* with page sized segments we can translate each segement into
2043         * one idaw/tidaw
2044         */
2045        blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2046        blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2047        blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
2048}
2049
2050/*
2051 * Deactivate and free request queue.
2052 */
2053static void dasd_free_queue(struct dasd_block *block)
2054{
2055        if (block->request_queue) {
2056                blk_cleanup_queue(block->request_queue);
2057                block->request_queue = NULL;
2058        }
2059}
2060
2061/*
2062 * Flush request on the request queue.
2063 */
2064static void dasd_flush_request_queue(struct dasd_block *block)
2065{
2066        struct request *req;
2067
2068        if (!block->request_queue)
2069                return;
2070
2071        spin_lock_irq(&block->request_queue_lock);
2072        while ((req = blk_fetch_request(block->request_queue)))
2073                __blk_end_request_all(req, -EIO);
2074        spin_unlock_irq(&block->request_queue_lock);
2075}
2076
2077static int dasd_open(struct block_device *bdev, fmode_t mode)
2078{
2079        struct dasd_block *block = bdev->bd_disk->private_data;
2080        struct dasd_device *base = block->base;
2081        int rc;
2082
2083        atomic_inc(&block->open_count);
2084        if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2085                rc = -ENODEV;
2086                goto unlock;
2087        }
2088
2089        if (!try_module_get(base->discipline->owner)) {
2090                rc = -EINVAL;
2091                goto unlock;
2092        }
2093
2094        if (dasd_probeonly) {
2095                dev_info(&base->cdev->dev,
2096                         "Accessing the DASD failed because it is in "
2097                         "probeonly mode\n");
2098                rc = -EPERM;
2099                goto out;
2100        }
2101
2102        if (base->state <= DASD_STATE_BASIC) {
2103                DBF_DEV_EVENT(DBF_ERR, base, " %s",
2104                              " Cannot open unrecognized device");
2105                rc = -ENODEV;
2106                goto out;
2107        }
2108
2109        return 0;
2110
2111out:
2112        module_put(base->discipline->owner);
2113unlock:
2114        atomic_dec(&block->open_count);
2115        return rc;
2116}
2117
2118static int dasd_release(struct gendisk *disk, fmode_t mode)
2119{
2120        struct dasd_block *block = disk->private_data;
2121
2122        atomic_dec(&block->open_count);
2123        module_put(block->base->discipline->owner);
2124        return 0;
2125}
2126
2127/*
2128 * Return disk geometry.
2129 */
2130static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2131{
2132        struct dasd_block *block;
2133        struct dasd_device *base;
2134
2135        block = bdev->bd_disk->private_data;
2136        if (!block)
2137                return -ENODEV;
2138        base = block->base;
2139
2140        if (!base->discipline ||
2141            !base->discipline->fill_geometry)
2142                return -EINVAL;
2143
2144        base->discipline->fill_geometry(block, geo);
2145        geo->start = get_start_sect(bdev) >> block->s2b_shift;
2146        return 0;
2147}
2148
2149const struct block_device_operations
2150dasd_device_operations = {
2151        .owner          = THIS_MODULE,
2152        .open           = dasd_open,
2153        .release        = dasd_release,
2154        .ioctl          = dasd_ioctl,
2155        .compat_ioctl   = dasd_ioctl,
2156        .getgeo         = dasd_getgeo,
2157};
2158
2159/*******************************************************************************
2160 * end of block device operations
2161 */
2162
2163static void
2164dasd_exit(void)
2165{
2166#ifdef CONFIG_PROC_FS
2167        dasd_proc_exit();
2168#endif
2169        dasd_eer_exit();
2170        if (dasd_page_cache != NULL) {
2171                kmem_cache_destroy(dasd_page_cache);
2172                dasd_page_cache = NULL;
2173        }
2174        dasd_gendisk_exit();
2175        dasd_devmap_exit();
2176        if (dasd_debug_area != NULL) {
2177                debug_unregister(dasd_debug_area);
2178                dasd_debug_area = NULL;
2179        }
2180}
2181
2182/*
2183 * SECTION: common functions for ccw_driver use
2184 */
2185
2186static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2187{
2188        struct ccw_device *cdev = data;
2189        int ret;
2190
2191        ret = ccw_device_set_online(cdev);
2192        if (ret)
2193                pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2194                           dev_name(&cdev->dev), ret);
2195        else {
2196                struct dasd_device *device = dasd_device_from_cdev(cdev);
2197                wait_event(dasd_init_waitq, _wait_for_device(device));
2198                dasd_put_device(device);
2199        }
2200}
2201
2202/*
2203 * Initial attempt at a probe function. this can be simplified once
2204 * the other detection code is gone.
2205 */
2206int dasd_generic_probe(struct ccw_device *cdev,
2207                       struct dasd_discipline *discipline)
2208{
2209        int ret;
2210
2211        ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2212        if (ret) {
2213                DBF_EVENT(DBF_WARNING,
2214                       "dasd_generic_probe: could not set ccw-device options "
2215                       "for %s\n", dev_name(&cdev->dev));
2216                return ret;
2217        }
2218        ret = dasd_add_sysfs_files(cdev);
2219        if (ret) {
2220                DBF_EVENT(DBF_WARNING,
2221                       "dasd_generic_probe: could not add sysfs entries "
2222                       "for %s\n", dev_name(&cdev->dev));
2223                return ret;
2224        }
2225        cdev->handler = &dasd_int_handler;
2226
2227        /*
2228         * Automatically online either all dasd devices (dasd_autodetect)
2229         * or all devices specified with dasd= parameters during
2230         * initial probe.
2231         */
2232        if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2233            (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2234                async_schedule(dasd_generic_auto_online, cdev);
2235        return 0;
2236}
2237
2238/*
2239 * This will one day be called from a global not_oper handler.
2240 * It is also used by driver_unregister during module unload.
2241 */
2242void dasd_generic_remove(struct ccw_device *cdev)
2243{
2244        struct dasd_device *device;
2245        struct dasd_block *block;
2246
2247        cdev->handler = NULL;
2248
2249        dasd_remove_sysfs_files(cdev);
2250        device = dasd_device_from_cdev(cdev);
2251        if (IS_ERR(device))
2252                return;
2253        if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2254                /* Already doing offline processing */
2255                dasd_put_device(device);
2256                return;
2257        }
2258        /*
2259         * This device is removed unconditionally. Set offline
2260         * flag to prevent dasd_open from opening it while it is
2261         * no quite down yet.
2262         */
2263        dasd_set_target_state(device, DASD_STATE_NEW);
2264        /* dasd_delete_device destroys the device reference. */
2265        block = device->block;
2266        device->block = NULL;
2267        dasd_delete_device(device);
2268        /*
2269         * life cycle of block is bound to device, so delete it after
2270         * device was safely removed
2271         */
2272        if (block)
2273                dasd_free_block(block);
2274}
2275
2276/*
2277 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2278 * the device is detected for the first time and is supposed to be used
2279 * or the user has started activation through sysfs.
2280 */
2281int dasd_generic_set_online(struct ccw_device *cdev,
2282                            struct dasd_discipline *base_discipline)
2283{
2284        struct dasd_discipline *discipline;
2285        struct dasd_device *device;
2286        int rc;
2287
2288        /* first online clears initial online feature flag */
2289        dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2290        device = dasd_create_device(cdev);
2291        if (IS_ERR(device))
2292                return PTR_ERR(device);
2293
2294        discipline = base_discipline;
2295        if (device->features & DASD_FEATURE_USEDIAG) {
2296                if (!dasd_diag_discipline_pointer) {
2297                        pr_warning("%s Setting the DASD online failed because "
2298                                   "of missing DIAG discipline\n",
2299                                   dev_name(&cdev->dev));
2300                        dasd_delete_device(device);
2301                        return -ENODEV;
2302                }
2303                discipline = dasd_diag_discipline_pointer;
2304        }
2305        if (!try_module_get(base_discipline->owner)) {
2306                dasd_delete_device(device);
2307                return -EINVAL;
2308        }
2309        if (!try_module_get(discipline->owner)) {
2310                module_put(base_discipline->owner);
2311                dasd_delete_device(device);
2312                return -EINVAL;
2313        }
2314        device->base_discipline = base_discipline;
2315        device->discipline = discipline;
2316
2317        /* check_device will allocate block device if necessary */
2318        rc = discipline->check_device(device);
2319        if (rc) {
2320                pr_warning("%s Setting the DASD online with discipline %s "
2321                           "failed with rc=%i\n",
2322                           dev_name(&cdev->dev), discipline->name, rc);
2323                module_put(discipline->owner);
2324                module_put(base_discipline->owner);
2325                dasd_delete_device(device);
2326                return rc;
2327        }
2328
2329        dasd_set_target_state(device, DASD_STATE_ONLINE);
2330        if (device->state <= DASD_STATE_KNOWN) {
2331                pr_warning("%s Setting the DASD online failed because of a "
2332                           "missing discipline\n", dev_name(&cdev->dev));
2333                rc = -ENODEV;
2334                dasd_set_target_state(device, DASD_STATE_NEW);
2335                if (device->block)
2336                        dasd_free_block(device->block);
2337                dasd_delete_device(device);
2338        } else
2339                pr_debug("dasd_generic device %s found\n",
2340                                dev_name(&cdev->dev));
2341        dasd_put_device(device);
2342        return rc;
2343}
2344
2345int dasd_generic_set_offline(struct ccw_device *cdev)
2346{
2347        struct dasd_device *device;
2348        struct dasd_block *block;
2349        int max_count, open_count;
2350
2351        device = dasd_device_from_cdev(cdev);
2352        if (IS_ERR(device))
2353                return PTR_ERR(device);
2354        if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2355                /* Already doing offline processing */
2356                dasd_put_device(device);
2357                return 0;
2358        }
2359        /*
2360         * We must make sure that this device is currently not in use.
2361         * The open_count is increased for every opener, that includes
2362         * the blkdev_get in dasd_scan_partitions. We are only interested
2363         * in the other openers.
2364         */
2365        if (device->block) {
2366                max_count = device->block->bdev ? 0 : -1;
2367                open_count = atomic_read(&device->block->open_count);
2368                if (open_count > max_count) {
2369                        if (open_count > 0)
2370                                pr_warning("%s: The DASD cannot be set offline "
2371                                           "with open count %i\n",
2372                                           dev_name(&cdev->dev), open_count);
2373                        else
2374                                pr_warning("%s: The DASD cannot be set offline "
2375                                           "while it is in use\n",
2376                                           dev_name(&cdev->dev));
2377                        clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2378                        dasd_put_device(device);
2379                        return -EBUSY;
2380                }
2381        }
2382        dasd_set_target_state(device, DASD_STATE_NEW);
2383        /* dasd_delete_device destroys the device reference. */
2384        block = device->block;
2385        device->block = NULL;
2386        dasd_delete_device(device);
2387        /*
2388         * life cycle of block is bound to device, so delete it after
2389         * device was safely removed
2390         */
2391        if (block)
2392                dasd_free_block(block);
2393        return 0;
2394}
2395
2396int dasd_generic_notify(struct ccw_device *cdev, int event)
2397{
2398        struct dasd_device *device;
2399        struct dasd_ccw_req *cqr;
2400        int ret;
2401
2402        device = dasd_device_from_cdev_locked(cdev);
2403        if (IS_ERR(device))
2404                return 0;
2405        ret = 0;
2406        switch (event) {
2407        case CIO_GONE:
2408        case CIO_BOXED:
2409        case CIO_NO_PATH:
2410                /* First of all call extended error reporting. */
2411                dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2412
2413                if (device->state < DASD_STATE_BASIC)
2414                        break;
2415                /* Device is active. We want to keep it. */
2416                list_for_each_entry(cqr, &device->ccw_queue, devlist)
2417                        if (cqr->status == DASD_CQR_IN_IO) {
2418                                cqr->status = DASD_CQR_QUEUED;
2419                                cqr->retries++;
2420                        }
2421                device->stopped |= DASD_STOPPED_DC_WAIT;
2422                dasd_device_clear_timer(device);
2423                dasd_schedule_device_bh(device);
2424                ret = 1;
2425                break;
2426        case CIO_OPER:
2427                /* FIXME: add a sanity check. */
2428                device->stopped &= ~DASD_STOPPED_DC_WAIT;
2429                if (device->stopped & DASD_UNRESUMED_PM) {
2430                        device->stopped &= ~DASD_UNRESUMED_PM;
2431                        dasd_restore_device(device);
2432                        ret = 1;
2433                        break;
2434                }
2435                dasd_schedule_device_bh(device);
2436                if (device->block)
2437                        dasd_schedule_block_bh(device->block);
2438                ret = 1;
2439                break;
2440        }
2441        dasd_put_device(device);
2442        return ret;
2443}
2444
2445int dasd_generic_pm_freeze(struct ccw_device *cdev)
2446{
2447        struct dasd_ccw_req *cqr, *n;
2448        int rc;
2449        struct list_head freeze_queue;
2450        struct dasd_device *device = dasd_device_from_cdev(cdev);
2451
2452        if (IS_ERR(device))
2453                return PTR_ERR(device);
2454        /* disallow new I/O  */
2455        device->stopped |= DASD_STOPPED_PM;
2456        /* clear active requests */
2457        INIT_LIST_HEAD(&freeze_queue);
2458        spin_lock_irq(get_ccwdev_lock(cdev));
2459        rc = 0;
2460        list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2461                /* Check status and move request to flush_queue */
2462                if (cqr->status == DASD_CQR_IN_IO) {
2463                        rc = device->discipline->term_IO(cqr);
2464                        if (rc) {
2465                                /* unable to terminate requeust */
2466                                dev_err(&device->cdev->dev,
2467                                        "Unable to terminate request %p "
2468                                        "on suspend\n", cqr);
2469                                spin_unlock_irq(get_ccwdev_lock(cdev));
2470                                dasd_put_device(device);
2471                                return rc;
2472                        }
2473                }
2474                list_move_tail(&cqr->devlist, &freeze_queue);
2475        }
2476
2477        spin_unlock_irq(get_ccwdev_lock(cdev));
2478
2479        list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2480                wait_event(dasd_flush_wq,
2481                           (cqr->status != DASD_CQR_CLEAR_PENDING));
2482                if (cqr->status == DASD_CQR_CLEARED)
2483                        cqr->status = DASD_CQR_QUEUED;
2484        }
2485        /* move freeze_queue to start of the ccw_queue */
2486        spin_lock_irq(get_ccwdev_lock(cdev));
2487        list_splice_tail(&freeze_queue, &device->ccw_queue);
2488        spin_unlock_irq(get_ccwdev_lock(cdev));
2489
2490        if (device->discipline->freeze)
2491                rc = device->discipline->freeze(device);
2492
2493        dasd_put_device(device);
2494        return rc;
2495}
2496EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2497
2498int dasd_generic_restore_device(struct ccw_device *cdev)
2499{
2500        struct dasd_device *device = dasd_device_from_cdev(cdev);
2501        int rc = 0;
2502
2503        if (IS_ERR(device))
2504                return PTR_ERR(device);
2505
2506        /* allow new IO again */
2507        device->stopped &= ~DASD_STOPPED_PM;
2508        device->stopped &= ~DASD_UNRESUMED_PM;
2509
2510        dasd_schedule_device_bh(device);
2511
2512        if (device->discipline->restore)
2513                rc = device->discipline->restore(device);
2514        if (rc)
2515                /*
2516                 * if the resume failed for the DASD we put it in
2517                 * an UNRESUMED stop state
2518                 */
2519                device->stopped |= DASD_UNRESUMED_PM;
2520
2521        if (device->block)
2522                dasd_schedule_block_bh(device->block);
2523
2524        dasd_put_device(device);
2525        return 0;
2526}
2527EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2528
2529static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2530                                                   void *rdc_buffer,
2531                                                   int rdc_buffer_size,
2532                                                   int magic)
2533{
2534        struct dasd_ccw_req *cqr;
2535        struct ccw1 *ccw;
2536        unsigned long *idaw;
2537
2538        cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2539
2540        if (IS_ERR(cqr)) {
2541                /* internal error 13 - Allocating the RDC request failed*/
2542                dev_err(&device->cdev->dev,
2543                         "An error occurred in the DASD device driver, "
2544                         "reason=%s\n", "13");
2545                return cqr;
2546        }
2547
2548        ccw = cqr->cpaddr;
2549        ccw->cmd_code = CCW_CMD_RDC;
2550        if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2551                idaw = (unsigned long *) (cqr->data);
2552                ccw->cda = (__u32)(addr_t) idaw;
2553                ccw->flags = CCW_FLAG_IDA;
2554                idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2555        } else {
2556                ccw->cda = (__u32)(addr_t) rdc_buffer;
2557                ccw->flags = 0;
2558        }
2559
2560        ccw->count = rdc_buffer_size;
2561        cqr->startdev = device;
2562        cqr->memdev = device;
2563        cqr->expires = 10*HZ;
2564        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2565        cqr->retries = 2;
2566        cqr->buildclk = get_clock();
2567        cqr->status = DASD_CQR_FILLED;
2568        return cqr;
2569}
2570
2571
2572int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2573                                void *rdc_buffer, int rdc_buffer_size)
2574{
2575        int ret;
2576        struct dasd_ccw_req *cqr;
2577
2578        cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2579                                     magic);
2580        if (IS_ERR(cqr))
2581                return PTR_ERR(cqr);
2582
2583        ret = dasd_sleep_on(cqr);
2584        dasd_sfree_request(cqr, cqr->memdev);
2585        return ret;
2586}
2587EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2588
2589/*
2590 *   In command mode and transport mode we need to look for sense
2591 *   data in different places. The sense data itself is allways
2592 *   an array of 32 bytes, so we can unify the sense data access
2593 *   for both modes.
2594 */
2595char *dasd_get_sense(struct irb *irb)
2596{
2597        struct tsb *tsb = NULL;
2598        char *sense = NULL;
2599
2600        if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2601                if (irb->scsw.tm.tcw)
2602                        tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2603                                          irb->scsw.tm.tcw);
2604                if (tsb && tsb->length == 64 && tsb->flags)
2605                        switch (tsb->flags & 0x07) {
2606                        case 1: /* tsa_iostat */
2607                                sense = tsb->tsa.iostat.sense;
2608                                break;
2609                        case 2: /* tsa_ddpc */
2610                                sense = tsb->tsa.ddpc.sense;
2611                                break;
2612                        default:
2613                                /* currently we don't use interrogate data */
2614                                break;
2615                        }
2616        } else if (irb->esw.esw0.erw.cons) {
2617                sense = irb->ecw;
2618        }
2619        return sense;
2620}
2621EXPORT_SYMBOL_GPL(dasd_get_sense);
2622
2623static int __init dasd_init(void)
2624{
2625        int rc;
2626
2627        init_waitqueue_head(&dasd_init_waitq);
2628        init_waitqueue_head(&dasd_flush_wq);
2629        init_waitqueue_head(&generic_waitq);
2630
2631        /* register 'common' DASD debug area, used for all DBF_XXX calls */
2632        dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2633        if (dasd_debug_area == NULL) {
2634                rc = -ENOMEM;
2635                goto failed;
2636        }
2637        debug_register_view(dasd_debug_area, &debug_sprintf_view);
2638        debug_set_level(dasd_debug_area, DBF_WARNING);
2639
2640        DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2641
2642        dasd_diag_discipline_pointer = NULL;
2643
2644        rc = dasd_devmap_init();
2645        if (rc)
2646                goto failed;
2647        rc = dasd_gendisk_init();
2648        if (rc)
2649                goto failed;
2650        rc = dasd_parse();
2651        if (rc)
2652                goto failed;
2653        rc = dasd_eer_init();
2654        if (rc)
2655                goto failed;
2656#ifdef CONFIG_PROC_FS
2657        rc = dasd_proc_init();
2658        if (rc)
2659                goto failed;
2660#endif
2661
2662        return 0;
2663failed:
2664        pr_info("The DASD device driver could not be initialized\n");
2665        dasd_exit();
2666        return rc;
2667}
2668
2669module_init(dasd_init);
2670module_exit(dasd_exit);
2671
2672EXPORT_SYMBOL(dasd_debug_area);
2673EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2674
2675EXPORT_SYMBOL(dasd_add_request_head);
2676EXPORT_SYMBOL(dasd_add_request_tail);
2677EXPORT_SYMBOL(dasd_cancel_req);
2678EXPORT_SYMBOL(dasd_device_clear_timer);
2679EXPORT_SYMBOL(dasd_block_clear_timer);
2680EXPORT_SYMBOL(dasd_enable_device);
2681EXPORT_SYMBOL(dasd_int_handler);
2682EXPORT_SYMBOL(dasd_kfree_request);
2683EXPORT_SYMBOL(dasd_kick_device);
2684EXPORT_SYMBOL(dasd_kmalloc_request);
2685EXPORT_SYMBOL(dasd_schedule_device_bh);
2686EXPORT_SYMBOL(dasd_schedule_block_bh);
2687EXPORT_SYMBOL(dasd_set_target_state);
2688EXPORT_SYMBOL(dasd_device_set_timer);
2689EXPORT_SYMBOL(dasd_block_set_timer);
2690EXPORT_SYMBOL(dasd_sfree_request);
2691EXPORT_SYMBOL(dasd_sleep_on);
2692EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2693EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2694EXPORT_SYMBOL(dasd_smalloc_request);
2695EXPORT_SYMBOL(dasd_start_IO);
2696EXPORT_SYMBOL(dasd_term_IO);
2697
2698EXPORT_SYMBOL_GPL(dasd_generic_probe);
2699EXPORT_SYMBOL_GPL(dasd_generic_remove);
2700EXPORT_SYMBOL_GPL(dasd_generic_notify);
2701EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2702EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2703EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2704EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2705EXPORT_SYMBOL_GPL(dasd_alloc_block);
2706EXPORT_SYMBOL_GPL(dasd_free_block);
2707