linux/drivers/s390/char/tape_block.c
<<
>>
Prefs
   1/*
   2 *  drivers/s390/char/tape_block.c
   3 *    block device frontend for tape device driver
   4 *
   5 *  S390 and zSeries version
   6 *    Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
   7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
   8 *               Tuan Ngo-Anh <ngoanh@de.ibm.com>
   9 *               Martin Schwidefsky <schwidefsky@de.ibm.com>
  10 *               Stefan Bader <shbader@de.ibm.com>
  11 */
  12
  13#define KMSG_COMPONENT "tape"
  14
  15#include <linux/fs.h>
  16#include <linux/module.h>
  17#include <linux/blkdev.h>
  18#include <linux/interrupt.h>
  19#include <linux/buffer_head.h>
  20#include <linux/kernel.h>
  21
  22#include <asm/debug.h>
  23
  24#define TAPE_DBF_AREA   tape_core_dbf
  25
  26#include "tape.h"
  27
  28#define TAPEBLOCK_MAX_SEC       100
  29#define TAPEBLOCK_MIN_REQUEUE   3
  30
  31/*
  32 * 2003/11/25  Stefan Bader <shbader@de.ibm.com>
  33 *
  34 * In 2.5/2.6 the block device request function is very likely to be called
  35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
  36 * just call any function that tries to allocate CCW requests from that con-
  37 * text since it might sleep. There are two choices to work around this:
  38 *      a) do not allocate with kmalloc but use its own memory pool
  39 *      b) take requests from the queue outside that context, knowing that
  40 *         allocation might sleep
  41 */
  42
  43/*
  44 * file operation structure for tape block frontend
  45 */
  46static int tapeblock_open(struct block_device *, fmode_t);
  47static int tapeblock_release(struct gendisk *, fmode_t);
  48static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
  49                                unsigned long);
  50static int tapeblock_medium_changed(struct gendisk *);
  51static int tapeblock_revalidate_disk(struct gendisk *);
  52
  53static const struct block_device_operations tapeblock_fops = {
  54        .owner           = THIS_MODULE,
  55        .open            = tapeblock_open,
  56        .release         = tapeblock_release,
  57        .locked_ioctl           = tapeblock_ioctl,
  58        .media_changed   = tapeblock_medium_changed,
  59        .revalidate_disk = tapeblock_revalidate_disk,
  60};
  61
  62static int tapeblock_major = 0;
  63
  64static void
  65tapeblock_trigger_requeue(struct tape_device *device)
  66{
  67        /* Protect against rescheduling. */
  68        if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
  69                return;
  70        schedule_work(&device->blk_data.requeue_task);
  71}
  72
  73/*
  74 * Post finished request.
  75 */
  76static void
  77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
  78{
  79        struct tape_device *device;
  80        struct request *req;
  81
  82        DBF_LH(6, "__tapeblock_end_request()\n");
  83
  84        device = ccw_req->device;
  85        req = (struct request *) data;
  86        blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
  87        if (ccw_req->rc == 0)
  88                /* Update position. */
  89                device->blk_data.block_position =
  90                  (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
  91        else
  92                /* We lost the position information due to an error. */
  93                device->blk_data.block_position = -1;
  94        device->discipline->free_bread(ccw_req);
  95        if (!list_empty(&device->req_queue) ||
  96            blk_peek_request(device->blk_data.request_queue))
  97                tapeblock_trigger_requeue(device);
  98}
  99
 100/*
 101 * Feed the tape device CCW queue with requests supplied in a list.
 102 */
 103static int
 104tapeblock_start_request(struct tape_device *device, struct request *req)
 105{
 106        struct tape_request *   ccw_req;
 107        int                     rc;
 108
 109        DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
 110
 111        ccw_req = device->discipline->bread(device, req);
 112        if (IS_ERR(ccw_req)) {
 113                DBF_EVENT(1, "TBLOCK: bread failed\n");
 114                blk_end_request_all(req, -EIO);
 115                return PTR_ERR(ccw_req);
 116        }
 117        ccw_req->callback = __tapeblock_end_request;
 118        ccw_req->callback_data = (void *) req;
 119        ccw_req->retries = TAPEBLOCK_RETRIES;
 120
 121        rc = tape_do_io_async(device, ccw_req);
 122        if (rc) {
 123                /*
 124                 * Start/enqueueing failed. No retries in
 125                 * this case.
 126                 */
 127                blk_end_request_all(req, -EIO);
 128                device->discipline->free_bread(ccw_req);
 129        }
 130
 131        return rc;
 132}
 133
 134/*
 135 * Move requests from the block device request queue to the tape device ccw
 136 * queue.
 137 */
 138static void
 139tapeblock_requeue(struct work_struct *work) {
 140        struct tape_blk_data *  blkdat;
 141        struct tape_device *    device;
 142        struct request_queue *  queue;
 143        int                     nr_queued;
 144        struct request *        req;
 145        struct list_head *      l;
 146        int                     rc;
 147
 148        blkdat = container_of(work, struct tape_blk_data, requeue_task);
 149        device = blkdat->device;
 150        if (!device)
 151                return;
 152
 153        spin_lock_irq(get_ccwdev_lock(device->cdev));
 154        queue  = device->blk_data.request_queue;
 155
 156        /* Count number of requests on ccw queue. */
 157        nr_queued = 0;
 158        list_for_each(l, &device->req_queue)
 159                nr_queued++;
 160        spin_unlock(get_ccwdev_lock(device->cdev));
 161
 162        spin_lock_irq(&device->blk_data.request_queue_lock);
 163        while (
 164                !blk_queue_plugged(queue) &&
 165                blk_peek_request(queue) &&
 166                nr_queued < TAPEBLOCK_MIN_REQUEUE
 167        ) {
 168                req = blk_fetch_request(queue);
 169                if (rq_data_dir(req) == WRITE) {
 170                        DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
 171                        spin_unlock_irq(&device->blk_data.request_queue_lock);
 172                        blk_end_request_all(req, -EIO);
 173                        spin_lock_irq(&device->blk_data.request_queue_lock);
 174                        continue;
 175                }
 176                nr_queued++;
 177                spin_unlock_irq(&device->blk_data.request_queue_lock);
 178                rc = tapeblock_start_request(device, req);
 179                spin_lock_irq(&device->blk_data.request_queue_lock);
 180        }
 181        spin_unlock_irq(&device->blk_data.request_queue_lock);
 182        atomic_set(&device->blk_data.requeue_scheduled, 0);
 183}
 184
 185/*
 186 * Tape request queue function. Called from ll_rw_blk.c
 187 */
 188static void
 189tapeblock_request_fn(struct request_queue *queue)
 190{
 191        struct tape_device *device;
 192
 193        device = (struct tape_device *) queue->queuedata;
 194        DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
 195        BUG_ON(device == NULL);
 196        tapeblock_trigger_requeue(device);
 197}
 198
 199/*
 200 * This function is called for every new tapedevice
 201 */
 202int
 203tapeblock_setup_device(struct tape_device * device)
 204{
 205        struct tape_blk_data *  blkdat;
 206        struct gendisk *        disk;
 207        int                     rc;
 208
 209        blkdat = &device->blk_data;
 210        blkdat->device = device;
 211        spin_lock_init(&blkdat->request_queue_lock);
 212        atomic_set(&blkdat->requeue_scheduled, 0);
 213
 214        blkdat->request_queue = blk_init_queue(
 215                tapeblock_request_fn,
 216                &blkdat->request_queue_lock
 217        );
 218        if (!blkdat->request_queue)
 219                return -ENOMEM;
 220
 221        elevator_exit(blkdat->request_queue->elevator);
 222        rc = elevator_init(blkdat->request_queue, "noop");
 223        if (rc)
 224                goto cleanup_queue;
 225
 226        blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
 227        blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
 228        blk_queue_max_phys_segments(blkdat->request_queue, -1L);
 229        blk_queue_max_hw_segments(blkdat->request_queue, -1L);
 230        blk_queue_max_segment_size(blkdat->request_queue, -1L);
 231        blk_queue_segment_boundary(blkdat->request_queue, -1L);
 232
 233        disk = alloc_disk(1);
 234        if (!disk) {
 235                rc = -ENOMEM;
 236                goto cleanup_queue;
 237        }
 238
 239        disk->major = tapeblock_major;
 240        disk->first_minor = device->first_minor;
 241        disk->fops = &tapeblock_fops;
 242        disk->private_data = tape_get_device_reference(device);
 243        disk->queue = blkdat->request_queue;
 244        set_capacity(disk, 0);
 245        sprintf(disk->disk_name, "btibm%d",
 246                device->first_minor / TAPE_MINORS_PER_DEV);
 247
 248        blkdat->disk = disk;
 249        blkdat->medium_changed = 1;
 250        blkdat->request_queue->queuedata = tape_get_device_reference(device);
 251
 252        add_disk(disk);
 253
 254        tape_get_device_reference(device);
 255        INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
 256
 257        return 0;
 258
 259cleanup_queue:
 260        blk_cleanup_queue(blkdat->request_queue);
 261        blkdat->request_queue = NULL;
 262
 263        return rc;
 264}
 265
 266void
 267tapeblock_cleanup_device(struct tape_device *device)
 268{
 269        flush_scheduled_work();
 270        tape_put_device(device);
 271
 272        if (!device->blk_data.disk) {
 273                goto cleanup_queue;
 274        }
 275
 276        del_gendisk(device->blk_data.disk);
 277        device->blk_data.disk->private_data =
 278                tape_put_device(device->blk_data.disk->private_data);
 279        put_disk(device->blk_data.disk);
 280
 281        device->blk_data.disk = NULL;
 282cleanup_queue:
 283        device->blk_data.request_queue->queuedata = tape_put_device(device);
 284
 285        blk_cleanup_queue(device->blk_data.request_queue);
 286        device->blk_data.request_queue = NULL;
 287}
 288
 289/*
 290 * Detect number of blocks of the tape.
 291 * FIXME: can we extent this to detect the blocks size as well ?
 292 */
 293static int
 294tapeblock_revalidate_disk(struct gendisk *disk)
 295{
 296        struct tape_device *    device;
 297        unsigned int            nr_of_blks;
 298        int                     rc;
 299
 300        device = (struct tape_device *) disk->private_data;
 301        BUG_ON(!device);
 302
 303        if (!device->blk_data.medium_changed)
 304                return 0;
 305
 306        rc = tape_mtop(device, MTFSFM, 1);
 307        if (rc)
 308                return rc;
 309
 310        rc = tape_mtop(device, MTTELL, 1);
 311        if (rc < 0)
 312                return rc;
 313
 314        pr_info("%s: Determining the size of the recorded area...\n",
 315                dev_name(&device->cdev->dev));
 316        DBF_LH(3, "Image file ends at %d\n", rc);
 317        nr_of_blks = rc;
 318
 319        /* This will fail for the first file. Catch the error by checking the
 320         * position. */
 321        tape_mtop(device, MTBSF, 1);
 322
 323        rc = tape_mtop(device, MTTELL, 1);
 324        if (rc < 0)
 325                return rc;
 326
 327        if (rc > nr_of_blks)
 328                return -EINVAL;
 329
 330        DBF_LH(3, "Image file starts at %d\n", rc);
 331        device->bof = rc;
 332        nr_of_blks -= rc;
 333
 334        pr_info("%s: The size of the recorded area is %i blocks\n",
 335                dev_name(&device->cdev->dev), nr_of_blks);
 336        set_capacity(device->blk_data.disk,
 337                nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
 338
 339        device->blk_data.block_position = 0;
 340        device->blk_data.medium_changed = 0;
 341        return 0;
 342}
 343
 344static int
 345tapeblock_medium_changed(struct gendisk *disk)
 346{
 347        struct tape_device *device;
 348
 349        device = (struct tape_device *) disk->private_data;
 350        DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
 351                device, device->blk_data.medium_changed);
 352
 353        return device->blk_data.medium_changed;
 354}
 355
 356/*
 357 * Block frontend tape device open function.
 358 */
 359static int
 360tapeblock_open(struct block_device *bdev, fmode_t mode)
 361{
 362        struct gendisk *        disk = bdev->bd_disk;
 363        struct tape_device *    device;
 364        int                     rc;
 365
 366        device = tape_get_device_reference(disk->private_data);
 367
 368        if (device->required_tapemarks) {
 369                DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
 370                pr_warning("%s: Opening the tape failed because of missing "
 371                           "end-of-file marks\n", dev_name(&device->cdev->dev));
 372                rc = -EPERM;
 373                goto put_device;
 374        }
 375
 376        rc = tape_open(device);
 377        if (rc)
 378                goto put_device;
 379
 380        rc = tapeblock_revalidate_disk(disk);
 381        if (rc)
 382                goto release;
 383
 384        /*
 385         * Note: The reference to <device> is hold until the release function
 386         *       is called.
 387         */
 388        tape_state_set(device, TS_BLKUSE);
 389        return 0;
 390
 391release:
 392        tape_release(device);
 393 put_device:
 394        tape_put_device(device);
 395        return rc;
 396}
 397
 398/*
 399 * Block frontend tape device release function.
 400 *
 401 * Note: One reference to the tape device was made by the open function. So
 402 *       we just get the pointer here and release the reference.
 403 */
 404static int
 405tapeblock_release(struct gendisk *disk, fmode_t mode)
 406{
 407        struct tape_device *device = disk->private_data;
 408
 409        tape_state_set(device, TS_IN_USE);
 410        tape_release(device);
 411        tape_put_device(device);
 412
 413        return 0;
 414}
 415
 416/*
 417 * Support of some generic block device IOCTLs.
 418 */
 419static int
 420tapeblock_ioctl(
 421        struct block_device *   bdev,
 422        fmode_t                 mode,
 423        unsigned int            command,
 424        unsigned long           arg
 425) {
 426        int rc;
 427        int minor;
 428        struct gendisk *disk = bdev->bd_disk;
 429        struct tape_device *device;
 430
 431        rc     = 0;
 432        BUG_ON(!disk);
 433        device = disk->private_data;
 434        BUG_ON(!device);
 435        minor  = MINOR(bdev->bd_dev);
 436
 437        DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
 438        DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
 439
 440        switch (command) {
 441                /* Refuse some IOCTL calls without complaining (mount). */
 442                case 0x5310:            /* CDROMMULTISESSION */
 443                        rc = -EINVAL;
 444                        break;
 445                default:
 446                        rc = -EINVAL;
 447        }
 448
 449        return rc;
 450}
 451
 452/*
 453 * Initialize block device frontend.
 454 */
 455int
 456tapeblock_init(void)
 457{
 458        int rc;
 459
 460        /* Register the tape major number to the kernel */
 461        rc = register_blkdev(tapeblock_major, "tBLK");
 462        if (rc < 0)
 463                return rc;
 464
 465        if (tapeblock_major == 0)
 466                tapeblock_major = rc;
 467        return 0;
 468}
 469
 470/*
 471 * Deregister major for block device frontend
 472 */
 473void
 474tapeblock_exit(void)
 475{
 476        unregister_blkdev(tapeblock_major, "tBLK");
 477}
 478