linux/drivers/s390/block/dasd_eckd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
   4 *                  Horst Hummel <Horst.Hummel@de.ibm.com>
   5 *                  Carsten Otte <Cotte@de.ibm.com>
   6 *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 * Bugreports.to..: <Linux390@de.ibm.com>
   8 * Copyright IBM Corp. 1999, 2009
   9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
  10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
  11 */
  12
  13#define KMSG_COMPONENT "dasd-eckd"
  14
  15#include <linux/stddef.h>
  16#include <linux/kernel.h>
  17#include <linux/slab.h>
  18#include <linux/hdreg.h>        /* HDIO_GETGEO                      */
  19#include <linux/bio.h>
  20#include <linux/module.h>
  21#include <linux/compat.h>
  22#include <linux/init.h>
  23#include <linux/seq_file.h>
  24
  25#include <asm/css_chars.h>
  26#include <asm/debug.h>
  27#include <asm/idals.h>
  28#include <asm/ebcdic.h>
  29#include <asm/io.h>
  30#include <linux/uaccess.h>
  31#include <asm/cio.h>
  32#include <asm/ccwdev.h>
  33#include <asm/itcw.h>
  34#include <asm/schid.h>
  35#include <asm/chpid.h>
  36
  37#include "dasd_int.h"
  38#include "dasd_eckd.h"
  39
  40#ifdef PRINTK_HEADER
  41#undef PRINTK_HEADER
  42#endif                          /* PRINTK_HEADER */
  43#define PRINTK_HEADER "dasd(eckd):"
  44
  45/*
  46 * raw track access always map to 64k in memory
  47 * so it maps to 16 blocks of 4k per track
  48 */
  49#define DASD_RAW_BLOCK_PER_TRACK 16
  50#define DASD_RAW_BLOCKSIZE 4096
  51/* 64k are 128 x 512 byte sectors  */
  52#define DASD_RAW_SECTORS_PER_TRACK 128
  53
  54MODULE_LICENSE("GPL");
  55
  56static struct dasd_discipline dasd_eckd_discipline;
  57
  58/* The ccw bus type uses this table to find devices that it sends to
  59 * dasd_eckd_probe */
  60static struct ccw_device_id dasd_eckd_ids[] = {
  61        { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
  62        { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
  63        { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
  64        { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
  65        { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
  66        { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
  67        { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
  68        { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
  69        { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
  70        { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
  71        { /* end of list */ },
  72};
  73
  74MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
  75
  76static struct ccw_driver dasd_eckd_driver; /* see below */
  77
  78static void *rawpadpage;
  79
  80#define INIT_CQR_OK 0
  81#define INIT_CQR_UNFORMATTED 1
  82#define INIT_CQR_ERROR 2
  83
  84/* emergency request for reserve/release */
  85static struct {
  86        struct dasd_ccw_req cqr;
  87        struct ccw1 ccw;
  88        char data[32];
  89} *dasd_reserve_req;
  90static DEFINE_MUTEX(dasd_reserve_mutex);
  91
  92static struct {
  93        struct dasd_ccw_req cqr;
  94        struct ccw1 ccw[2];
  95        char data[40];
  96} *dasd_vol_info_req;
  97static DEFINE_MUTEX(dasd_vol_info_mutex);
  98
  99struct ext_pool_exhaust_work_data {
 100        struct work_struct worker;
 101        struct dasd_device *device;
 102        struct dasd_device *base;
 103};
 104
 105/* definitions for the path verification worker */
 106struct pe_handler_work_data {
 107        struct work_struct worker;
 108        struct dasd_device *device;
 109        struct dasd_ccw_req cqr;
 110        struct ccw1 ccw;
 111        __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
 112        int isglobal;
 113        __u8 tbvpm;
 114        __u8 fcsecpm;
 115};
 116static struct pe_handler_work_data *pe_handler_worker;
 117static DEFINE_MUTEX(dasd_pe_handler_mutex);
 118
 119struct check_attention_work_data {
 120        struct work_struct worker;
 121        struct dasd_device *device;
 122        __u8 lpum;
 123};
 124
 125static int dasd_eckd_ext_pool_id(struct dasd_device *);
 126static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
 127                        struct dasd_device *, struct dasd_device *,
 128                        unsigned int, int, unsigned int, unsigned int,
 129                        unsigned int, unsigned int);
 130
 131/* initial attempt at a probe function. this can be simplified once
 132 * the other detection code is gone */
 133static int
 134dasd_eckd_probe (struct ccw_device *cdev)
 135{
 136        int ret;
 137
 138        /* set ECKD specific ccw-device options */
 139        ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
 140                                     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
 141        if (ret) {
 142                DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
 143                                "dasd_eckd_probe: could not set "
 144                                "ccw-device options");
 145                return ret;
 146        }
 147        ret = dasd_generic_probe(cdev);
 148        return ret;
 149}
 150
 151static int
 152dasd_eckd_set_online(struct ccw_device *cdev)
 153{
 154        return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
 155}
 156
 157static const int sizes_trk0[] = { 28, 148, 84 };
 158#define LABEL_SIZE 140
 159
 160/* head and record addresses of count_area read in analysis ccw */
 161static const int count_area_head[] = { 0, 0, 0, 0, 1 };
 162static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
 163
 164static inline unsigned int
 165ceil_quot(unsigned int d1, unsigned int d2)
 166{
 167        return (d1 + (d2 - 1)) / d2;
 168}
 169
 170static unsigned int
 171recs_per_track(struct dasd_eckd_characteristics * rdc,
 172               unsigned int kl, unsigned int dl)
 173{
 174        int dn, kn;
 175
 176        switch (rdc->dev_type) {
 177        case 0x3380:
 178                if (kl)
 179                        return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
 180                                       ceil_quot(dl + 12, 32));
 181                else
 182                        return 1499 / (15 + ceil_quot(dl + 12, 32));
 183        case 0x3390:
 184                dn = ceil_quot(dl + 6, 232) + 1;
 185                if (kl) {
 186                        kn = ceil_quot(kl + 6, 232) + 1;
 187                        return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
 188                                       9 + ceil_quot(dl + 6 * dn, 34));
 189                } else
 190                        return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
 191        case 0x9345:
 192                dn = ceil_quot(dl + 6, 232) + 1;
 193                if (kl) {
 194                        kn = ceil_quot(kl + 6, 232) + 1;
 195                        return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
 196                                       ceil_quot(dl + 6 * dn, 34));
 197                } else
 198                        return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
 199        }
 200        return 0;
 201}
 202
 203static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
 204{
 205        geo->cyl = (__u16) cyl;
 206        geo->head = cyl >> 16;
 207        geo->head <<= 4;
 208        geo->head |= head;
 209}
 210
 211/*
 212 * calculate failing track from sense data depending if
 213 * it is an EAV device or not
 214 */
 215static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
 216                                    sector_t *track)
 217{
 218        struct dasd_eckd_private *private = device->private;
 219        u8 *sense = NULL;
 220        u32 cyl;
 221        u8 head;
 222
 223        sense = dasd_get_sense(irb);
 224        if (!sense) {
 225                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 226                              "ESE error no sense data\n");
 227                return -EINVAL;
 228        }
 229        if (!(sense[27] & DASD_SENSE_BIT_2)) {
 230                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 231                              "ESE error no valid track data\n");
 232                return -EINVAL;
 233        }
 234
 235        if (sense[27] & DASD_SENSE_BIT_3) {
 236                /* enhanced addressing */
 237                cyl = sense[30] << 20;
 238                cyl |= (sense[31] & 0xF0) << 12;
 239                cyl |= sense[28] << 8;
 240                cyl |= sense[29];
 241        } else {
 242                cyl = sense[29] << 8;
 243                cyl |= sense[30];
 244        }
 245        head = sense[31] & 0x0F;
 246        *track = cyl * private->rdc_data.trk_per_cyl + head;
 247        return 0;
 248}
 249
 250static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
 251                     struct dasd_device *device)
 252{
 253        struct dasd_eckd_private *private = device->private;
 254        int rc;
 255
 256        rc = get_phys_clock(&data->ep_sys_time);
 257        /*
 258         * Ignore return code if XRC is not supported or
 259         * sync clock is switched off
 260         */
 261        if ((rc && !private->rdc_data.facilities.XRC_supported) ||
 262            rc == -EOPNOTSUPP || rc == -EACCES)
 263                return 0;
 264
 265        /* switch on System Time Stamp - needed for XRC Support */
 266        data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
 267        data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
 268
 269        if (ccw) {
 270                ccw->count = sizeof(struct DE_eckd_data);
 271                ccw->flags |= CCW_FLAG_SLI;
 272        }
 273
 274        return rc;
 275}
 276
 277static int
 278define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
 279              unsigned int totrk, int cmd, struct dasd_device *device,
 280              int blksize)
 281{
 282        struct dasd_eckd_private *private = device->private;
 283        u16 heads, beghead, endhead;
 284        u32 begcyl, endcyl;
 285        int rc = 0;
 286
 287        if (ccw) {
 288                ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
 289                ccw->flags = 0;
 290                ccw->count = 16;
 291                ccw->cda = (__u32)__pa(data);
 292        }
 293
 294        memset(data, 0, sizeof(struct DE_eckd_data));
 295        switch (cmd) {
 296        case DASD_ECKD_CCW_READ_HOME_ADDRESS:
 297        case DASD_ECKD_CCW_READ_RECORD_ZERO:
 298        case DASD_ECKD_CCW_READ:
 299        case DASD_ECKD_CCW_READ_MT:
 300        case DASD_ECKD_CCW_READ_CKD:
 301        case DASD_ECKD_CCW_READ_CKD_MT:
 302        case DASD_ECKD_CCW_READ_KD:
 303        case DASD_ECKD_CCW_READ_KD_MT:
 304                data->mask.perm = 0x1;
 305                data->attributes.operation = private->attrib.operation;
 306                break;
 307        case DASD_ECKD_CCW_READ_COUNT:
 308                data->mask.perm = 0x1;
 309                data->attributes.operation = DASD_BYPASS_CACHE;
 310                break;
 311        case DASD_ECKD_CCW_READ_TRACK:
 312        case DASD_ECKD_CCW_READ_TRACK_DATA:
 313                data->mask.perm = 0x1;
 314                data->attributes.operation = private->attrib.operation;
 315                data->blk_size = 0;
 316                break;
 317        case DASD_ECKD_CCW_WRITE:
 318        case DASD_ECKD_CCW_WRITE_MT:
 319        case DASD_ECKD_CCW_WRITE_KD:
 320        case DASD_ECKD_CCW_WRITE_KD_MT:
 321                data->mask.perm = 0x02;
 322                data->attributes.operation = private->attrib.operation;
 323                rc = set_timestamp(ccw, data, device);
 324                break;
 325        case DASD_ECKD_CCW_WRITE_CKD:
 326        case DASD_ECKD_CCW_WRITE_CKD_MT:
 327                data->attributes.operation = DASD_BYPASS_CACHE;
 328                rc = set_timestamp(ccw, data, device);
 329                break;
 330        case DASD_ECKD_CCW_ERASE:
 331        case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
 332        case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
 333                data->mask.perm = 0x3;
 334                data->mask.auth = 0x1;
 335                data->attributes.operation = DASD_BYPASS_CACHE;
 336                rc = set_timestamp(ccw, data, device);
 337                break;
 338        case DASD_ECKD_CCW_WRITE_FULL_TRACK:
 339                data->mask.perm = 0x03;
 340                data->attributes.operation = private->attrib.operation;
 341                data->blk_size = 0;
 342                break;
 343        case DASD_ECKD_CCW_WRITE_TRACK_DATA:
 344                data->mask.perm = 0x02;
 345                data->attributes.operation = private->attrib.operation;
 346                data->blk_size = blksize;
 347                rc = set_timestamp(ccw, data, device);
 348                break;
 349        default:
 350                dev_err(&device->cdev->dev,
 351                        "0x%x is not a known command\n", cmd);
 352                break;
 353        }
 354
 355        data->attributes.mode = 0x3;    /* ECKD */
 356
 357        if ((private->rdc_data.cu_type == 0x2105 ||
 358             private->rdc_data.cu_type == 0x2107 ||
 359             private->rdc_data.cu_type == 0x1750)
 360            && !(private->uses_cdl && trk < 2))
 361                data->ga_extended |= 0x40; /* Regular Data Format Mode */
 362
 363        heads = private->rdc_data.trk_per_cyl;
 364        begcyl = trk / heads;
 365        beghead = trk % heads;
 366        endcyl = totrk / heads;
 367        endhead = totrk % heads;
 368
 369        /* check for sequential prestage - enhance cylinder range */
 370        if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
 371            data->attributes.operation == DASD_SEQ_ACCESS) {
 372
 373                if (endcyl + private->attrib.nr_cyl < private->real_cyl)
 374                        endcyl += private->attrib.nr_cyl;
 375                else
 376                        endcyl = (private->real_cyl - 1);
 377        }
 378
 379        set_ch_t(&data->beg_ext, begcyl, beghead);
 380        set_ch_t(&data->end_ext, endcyl, endhead);
 381        return rc;
 382}
 383
 384
 385static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
 386                              unsigned int trk, unsigned int rec_on_trk,
 387                              int count, int cmd, struct dasd_device *device,
 388                              unsigned int reclen, unsigned int tlf)
 389{
 390        struct dasd_eckd_private *private = device->private;
 391        int sector;
 392        int dn, d;
 393
 394        if (ccw) {
 395                ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
 396                ccw->flags = 0;
 397                if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
 398                        ccw->count = 22;
 399                else
 400                        ccw->count = 20;
 401                ccw->cda = (__u32)__pa(data);
 402        }
 403
 404        memset(data, 0, sizeof(*data));
 405        sector = 0;
 406        if (rec_on_trk) {
 407                switch (private->rdc_data.dev_type) {
 408                case 0x3390:
 409                        dn = ceil_quot(reclen + 6, 232);
 410                        d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
 411                        sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
 412                        break;
 413                case 0x3380:
 414                        d = 7 + ceil_quot(reclen + 12, 32);
 415                        sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
 416                        break;
 417                }
 418        }
 419        data->sector = sector;
 420        /* note: meaning of count depends on the operation
 421         *       for record based I/O it's the number of records, but for
 422         *       track based I/O it's the number of tracks
 423         */
 424        data->count = count;
 425        switch (cmd) {
 426        case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
 427                data->operation.orientation = 0x3;
 428                data->operation.operation = 0x03;
 429                break;
 430        case DASD_ECKD_CCW_READ_HOME_ADDRESS:
 431                data->operation.orientation = 0x3;
 432                data->operation.operation = 0x16;
 433                break;
 434        case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
 435                data->operation.orientation = 0x1;
 436                data->operation.operation = 0x03;
 437                data->count++;
 438                break;
 439        case DASD_ECKD_CCW_READ_RECORD_ZERO:
 440                data->operation.orientation = 0x3;
 441                data->operation.operation = 0x16;
 442                data->count++;
 443                break;
 444        case DASD_ECKD_CCW_WRITE:
 445        case DASD_ECKD_CCW_WRITE_MT:
 446        case DASD_ECKD_CCW_WRITE_KD:
 447        case DASD_ECKD_CCW_WRITE_KD_MT:
 448                data->auxiliary.length_valid = 0x1;
 449                data->length = reclen;
 450                data->operation.operation = 0x01;
 451                break;
 452        case DASD_ECKD_CCW_WRITE_CKD:
 453        case DASD_ECKD_CCW_WRITE_CKD_MT:
 454                data->auxiliary.length_valid = 0x1;
 455                data->length = reclen;
 456                data->operation.operation = 0x03;
 457                break;
 458        case DASD_ECKD_CCW_WRITE_FULL_TRACK:
 459                data->operation.orientation = 0x0;
 460                data->operation.operation = 0x3F;
 461                data->extended_operation = 0x11;
 462                data->length = 0;
 463                data->extended_parameter_length = 0x02;
 464                if (data->count > 8) {
 465                        data->extended_parameter[0] = 0xFF;
 466                        data->extended_parameter[1] = 0xFF;
 467                        data->extended_parameter[1] <<= (16 - count);
 468                } else {
 469                        data->extended_parameter[0] = 0xFF;
 470                        data->extended_parameter[0] <<= (8 - count);
 471                        data->extended_parameter[1] = 0x00;
 472                }
 473                data->sector = 0xFF;
 474                break;
 475        case DASD_ECKD_CCW_WRITE_TRACK_DATA:
 476                data->auxiliary.length_valid = 0x1;
 477                data->length = reclen;  /* not tlf, as one might think */
 478                data->operation.operation = 0x3F;
 479                data->extended_operation = 0x23;
 480                break;
 481        case DASD_ECKD_CCW_READ:
 482        case DASD_ECKD_CCW_READ_MT:
 483        case DASD_ECKD_CCW_READ_KD:
 484        case DASD_ECKD_CCW_READ_KD_MT:
 485                data->auxiliary.length_valid = 0x1;
 486                data->length = reclen;
 487                data->operation.operation = 0x06;
 488                break;
 489        case DASD_ECKD_CCW_READ_CKD:
 490        case DASD_ECKD_CCW_READ_CKD_MT:
 491                data->auxiliary.length_valid = 0x1;
 492                data->length = reclen;
 493                data->operation.operation = 0x16;
 494                break;
 495        case DASD_ECKD_CCW_READ_COUNT:
 496                data->operation.operation = 0x06;
 497                break;
 498        case DASD_ECKD_CCW_READ_TRACK:
 499                data->operation.orientation = 0x1;
 500                data->operation.operation = 0x0C;
 501                data->extended_parameter_length = 0;
 502                data->sector = 0xFF;
 503                break;
 504        case DASD_ECKD_CCW_READ_TRACK_DATA:
 505                data->auxiliary.length_valid = 0x1;
 506                data->length = tlf;
 507                data->operation.operation = 0x0C;
 508                break;
 509        case DASD_ECKD_CCW_ERASE:
 510                data->length = reclen;
 511                data->auxiliary.length_valid = 0x1;
 512                data->operation.operation = 0x0b;
 513                break;
 514        default:
 515                DBF_DEV_EVENT(DBF_ERR, device,
 516                            "fill LRE unknown opcode 0x%x", cmd);
 517                BUG();
 518        }
 519        set_ch_t(&data->seek_addr,
 520                 trk / private->rdc_data.trk_per_cyl,
 521                 trk % private->rdc_data.trk_per_cyl);
 522        data->search_arg.cyl = data->seek_addr.cyl;
 523        data->search_arg.head = data->seek_addr.head;
 524        data->search_arg.record = rec_on_trk;
 525}
 526
 527static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
 528                      unsigned int trk, unsigned int totrk, int cmd,
 529                      struct dasd_device *basedev, struct dasd_device *startdev,
 530                      unsigned int format, unsigned int rec_on_trk, int count,
 531                      unsigned int blksize, unsigned int tlf)
 532{
 533        struct dasd_eckd_private *basepriv, *startpriv;
 534        struct LRE_eckd_data *lredata;
 535        struct DE_eckd_data *dedata;
 536        int rc = 0;
 537
 538        basepriv = basedev->private;
 539        startpriv = startdev->private;
 540        dedata = &pfxdata->define_extent;
 541        lredata = &pfxdata->locate_record;
 542
 543        ccw->cmd_code = DASD_ECKD_CCW_PFX;
 544        ccw->flags = 0;
 545        if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
 546                ccw->count = sizeof(*pfxdata) + 2;
 547                ccw->cda = (__u32) __pa(pfxdata);
 548                memset(pfxdata, 0, sizeof(*pfxdata) + 2);
 549        } else {
 550                ccw->count = sizeof(*pfxdata);
 551                ccw->cda = (__u32) __pa(pfxdata);
 552                memset(pfxdata, 0, sizeof(*pfxdata));
 553        }
 554
 555        /* prefix data */
 556        if (format > 1) {
 557                DBF_DEV_EVENT(DBF_ERR, basedev,
 558                              "PFX LRE unknown format 0x%x", format);
 559                BUG();
 560                return -EINVAL;
 561        }
 562        pfxdata->format = format;
 563        pfxdata->base_address = basepriv->ned->unit_addr;
 564        pfxdata->base_lss = basepriv->ned->ID;
 565        pfxdata->validity.define_extent = 1;
 566
 567        /* private uid is kept up to date, conf_data may be outdated */
 568        if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
 569                pfxdata->validity.verify_base = 1;
 570
 571        if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
 572                pfxdata->validity.verify_base = 1;
 573                pfxdata->validity.hyper_pav = 1;
 574        }
 575
 576        rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
 577
 578        /*
 579         * For some commands the System Time Stamp is set in the define extent
 580         * data when XRC is supported. The validity of the time stamp must be
 581         * reflected in the prefix data as well.
 582         */
 583        if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
 584                pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
 585
 586        if (format == 1) {
 587                locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
 588                                  basedev, blksize, tlf);
 589        }
 590
 591        return rc;
 592}
 593
 594static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
 595                  unsigned int trk, unsigned int totrk, int cmd,
 596                  struct dasd_device *basedev, struct dasd_device *startdev)
 597{
 598        return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
 599                          0, 0, 0, 0, 0);
 600}
 601
 602static void
 603locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
 604              unsigned int rec_on_trk, int no_rec, int cmd,
 605              struct dasd_device * device, int reclen)
 606{
 607        struct dasd_eckd_private *private = device->private;
 608        int sector;
 609        int dn, d;
 610
 611        DBF_DEV_EVENT(DBF_INFO, device,
 612                  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
 613                  trk, rec_on_trk, no_rec, cmd, reclen);
 614
 615        ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
 616        ccw->flags = 0;
 617        ccw->count = 16;
 618        ccw->cda = (__u32) __pa(data);
 619
 620        memset(data, 0, sizeof(struct LO_eckd_data));
 621        sector = 0;
 622        if (rec_on_trk) {
 623                switch (private->rdc_data.dev_type) {
 624                case 0x3390:
 625                        dn = ceil_quot(reclen + 6, 232);
 626                        d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
 627                        sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
 628                        break;
 629                case 0x3380:
 630                        d = 7 + ceil_quot(reclen + 12, 32);
 631                        sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
 632                        break;
 633                }
 634        }
 635        data->sector = sector;
 636        data->count = no_rec;
 637        switch (cmd) {
 638        case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
 639                data->operation.orientation = 0x3;
 640                data->operation.operation = 0x03;
 641                break;
 642        case DASD_ECKD_CCW_READ_HOME_ADDRESS:
 643                data->operation.orientation = 0x3;
 644                data->operation.operation = 0x16;
 645                break;
 646        case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
 647                data->operation.orientation = 0x1;
 648                data->operation.operation = 0x03;
 649                data->count++;
 650                break;
 651        case DASD_ECKD_CCW_READ_RECORD_ZERO:
 652                data->operation.orientation = 0x3;
 653                data->operation.operation = 0x16;
 654                data->count++;
 655                break;
 656        case DASD_ECKD_CCW_WRITE:
 657        case DASD_ECKD_CCW_WRITE_MT:
 658        case DASD_ECKD_CCW_WRITE_KD:
 659        case DASD_ECKD_CCW_WRITE_KD_MT:
 660                data->auxiliary.last_bytes_used = 0x1;
 661                data->length = reclen;
 662                data->operation.operation = 0x01;
 663                break;
 664        case DASD_ECKD_CCW_WRITE_CKD:
 665        case DASD_ECKD_CCW_WRITE_CKD_MT:
 666                data->auxiliary.last_bytes_used = 0x1;
 667                data->length = reclen;
 668                data->operation.operation = 0x03;
 669                break;
 670        case DASD_ECKD_CCW_READ:
 671        case DASD_ECKD_CCW_READ_MT:
 672        case DASD_ECKD_CCW_READ_KD:
 673        case DASD_ECKD_CCW_READ_KD_MT:
 674                data->auxiliary.last_bytes_used = 0x1;
 675                data->length = reclen;
 676                data->operation.operation = 0x06;
 677                break;
 678        case DASD_ECKD_CCW_READ_CKD:
 679        case DASD_ECKD_CCW_READ_CKD_MT:
 680                data->auxiliary.last_bytes_used = 0x1;
 681                data->length = reclen;
 682                data->operation.operation = 0x16;
 683                break;
 684        case DASD_ECKD_CCW_READ_COUNT:
 685                data->operation.operation = 0x06;
 686                break;
 687        case DASD_ECKD_CCW_ERASE:
 688                data->length = reclen;
 689                data->auxiliary.last_bytes_used = 0x1;
 690                data->operation.operation = 0x0b;
 691                break;
 692        default:
 693                DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
 694                              "opcode 0x%x", cmd);
 695        }
 696        set_ch_t(&data->seek_addr,
 697                 trk / private->rdc_data.trk_per_cyl,
 698                 trk % private->rdc_data.trk_per_cyl);
 699        data->search_arg.cyl = data->seek_addr.cyl;
 700        data->search_arg.head = data->seek_addr.head;
 701        data->search_arg.record = rec_on_trk;
 702}
 703
 704/*
 705 * Returns 1 if the block is one of the special blocks that needs
 706 * to get read/written with the KD variant of the command.
 707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
 708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
 709 * Luckily the KD variants differ only by one bit (0x08) from the
 710 * normal variant. So don't wonder about code like:
 711 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
 712 *         ccw->cmd_code |= 0x8;
 713 */
 714static inline int
 715dasd_eckd_cdl_special(int blk_per_trk, int recid)
 716{
 717        if (recid < 3)
 718                return 1;
 719        if (recid < blk_per_trk)
 720                return 0;
 721        if (recid < 2 * blk_per_trk)
 722                return 1;
 723        return 0;
 724}
 725
 726/*
 727 * Returns the record size for the special blocks of the cdl format.
 728 * Only returns something useful if dasd_eckd_cdl_special is true
 729 * for the recid.
 730 */
 731static inline int
 732dasd_eckd_cdl_reclen(int recid)
 733{
 734        if (recid < 3)
 735                return sizes_trk0[recid];
 736        return LABEL_SIZE;
 737}
 738/* create unique id from private structure. */
 739static void create_uid(struct dasd_eckd_private *private)
 740{
 741        int count;
 742        struct dasd_uid *uid;
 743
 744        uid = &private->uid;
 745        memset(uid, 0, sizeof(struct dasd_uid));
 746        memcpy(uid->vendor, private->ned->HDA_manufacturer,
 747               sizeof(uid->vendor) - 1);
 748        EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
 749        memcpy(uid->serial, private->ned->HDA_location,
 750               sizeof(uid->serial) - 1);
 751        EBCASC(uid->serial, sizeof(uid->serial) - 1);
 752        uid->ssid = private->gneq->subsystemID;
 753        uid->real_unit_addr = private->ned->unit_addr;
 754        if (private->sneq) {
 755                uid->type = private->sneq->sua_flags;
 756                if (uid->type == UA_BASE_PAV_ALIAS)
 757                        uid->base_unit_addr = private->sneq->base_unit_addr;
 758        } else {
 759                uid->type = UA_BASE_DEVICE;
 760        }
 761        if (private->vdsneq) {
 762                for (count = 0; count < 16; count++) {
 763                        sprintf(uid->vduit+2*count, "%02x",
 764                                private->vdsneq->uit[count]);
 765                }
 766        }
 767}
 768
 769/*
 770 * Generate device unique id that specifies the physical device.
 771 */
 772static int dasd_eckd_generate_uid(struct dasd_device *device)
 773{
 774        struct dasd_eckd_private *private = device->private;
 775        unsigned long flags;
 776
 777        if (!private)
 778                return -ENODEV;
 779        if (!private->ned || !private->gneq)
 780                return -ENODEV;
 781        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 782        create_uid(private);
 783        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 784        return 0;
 785}
 786
 787static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
 788{
 789        struct dasd_eckd_private *private = device->private;
 790        unsigned long flags;
 791
 792        if (private) {
 793                spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 794                *uid = private->uid;
 795                spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 796                return 0;
 797        }
 798        return -EINVAL;
 799}
 800
 801/*
 802 * compare device UID with data of a given dasd_eckd_private structure
 803 * return 0 for match
 804 */
 805static int dasd_eckd_compare_path_uid(struct dasd_device *device,
 806                                      struct dasd_eckd_private *private)
 807{
 808        struct dasd_uid device_uid;
 809
 810        create_uid(private);
 811        dasd_eckd_get_uid(device, &device_uid);
 812
 813        return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
 814}
 815
 816static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
 817                                   struct dasd_ccw_req *cqr,
 818                                   __u8 *rcd_buffer,
 819                                   __u8 lpm)
 820{
 821        struct ccw1 *ccw;
 822        /*
 823         * buffer has to start with EBCDIC "V1.0" to show
 824         * support for virtual device SNEQ
 825         */
 826        rcd_buffer[0] = 0xE5;
 827        rcd_buffer[1] = 0xF1;
 828        rcd_buffer[2] = 0x4B;
 829        rcd_buffer[3] = 0xF0;
 830
 831        ccw = cqr->cpaddr;
 832        ccw->cmd_code = DASD_ECKD_CCW_RCD;
 833        ccw->flags = 0;
 834        ccw->cda = (__u32)(addr_t)rcd_buffer;
 835        ccw->count = DASD_ECKD_RCD_DATA_SIZE;
 836        cqr->magic = DASD_ECKD_MAGIC;
 837
 838        cqr->startdev = device;
 839        cqr->memdev = device;
 840        cqr->block = NULL;
 841        cqr->expires = 10*HZ;
 842        cqr->lpm = lpm;
 843        cqr->retries = 256;
 844        cqr->buildclk = get_tod_clock();
 845        cqr->status = DASD_CQR_FILLED;
 846        set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
 847}
 848
 849/*
 850 * Wakeup helper for read_conf
 851 * if the cqr is not done and needs some error recovery
 852 * the buffer has to be re-initialized with the EBCDIC "V1.0"
 853 * to show support for virtual device SNEQ
 854 */
 855static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
 856{
 857        struct ccw1 *ccw;
 858        __u8 *rcd_buffer;
 859
 860        if (cqr->status !=  DASD_CQR_DONE) {
 861                ccw = cqr->cpaddr;
 862                rcd_buffer = (__u8 *)((addr_t) ccw->cda);
 863                memset(rcd_buffer, 0, sizeof(*rcd_buffer));
 864
 865                rcd_buffer[0] = 0xE5;
 866                rcd_buffer[1] = 0xF1;
 867                rcd_buffer[2] = 0x4B;
 868                rcd_buffer[3] = 0xF0;
 869        }
 870        dasd_wakeup_cb(cqr, data);
 871}
 872
 873static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
 874                                           struct dasd_ccw_req *cqr,
 875                                           __u8 *rcd_buffer,
 876                                           __u8 lpm)
 877{
 878        struct ciw *ciw;
 879        int rc;
 880        /*
 881         * sanity check: scan for RCD command in extended SenseID data
 882         * some devices do not support RCD
 883         */
 884        ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
 885        if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
 886                return -EOPNOTSUPP;
 887
 888        dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
 889        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
 890        set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
 891        cqr->retries = 5;
 892        cqr->callback = read_conf_cb;
 893        rc = dasd_sleep_on_immediatly(cqr);
 894        return rc;
 895}
 896
 897static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
 898                                   void **rcd_buffer,
 899                                   int *rcd_buffer_size, __u8 lpm)
 900{
 901        struct ciw *ciw;
 902        char *rcd_buf = NULL;
 903        int ret;
 904        struct dasd_ccw_req *cqr;
 905
 906        /*
 907         * sanity check: scan for RCD command in extended SenseID data
 908         * some devices do not support RCD
 909         */
 910        ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
 911        if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
 912                ret = -EOPNOTSUPP;
 913                goto out_error;
 914        }
 915        rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
 916        if (!rcd_buf) {
 917                ret = -ENOMEM;
 918                goto out_error;
 919        }
 920        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
 921                                   0, /* use rcd_buf as data ara */
 922                                   device, NULL);
 923        if (IS_ERR(cqr)) {
 924                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 925                              "Could not allocate RCD request");
 926                ret = -ENOMEM;
 927                goto out_error;
 928        }
 929        dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
 930        cqr->callback = read_conf_cb;
 931        ret = dasd_sleep_on(cqr);
 932        /*
 933         * on success we update the user input parms
 934         */
 935        dasd_sfree_request(cqr, cqr->memdev);
 936        if (ret)
 937                goto out_error;
 938
 939        *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
 940        *rcd_buffer = rcd_buf;
 941        return 0;
 942out_error:
 943        kfree(rcd_buf);
 944        *rcd_buffer = NULL;
 945        *rcd_buffer_size = 0;
 946        return ret;
 947}
 948
 949static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
 950{
 951
 952        struct dasd_sneq *sneq;
 953        int i, count;
 954
 955        private->ned = NULL;
 956        private->sneq = NULL;
 957        private->vdsneq = NULL;
 958        private->gneq = NULL;
 959        count = private->conf_len / sizeof(struct dasd_sneq);
 960        sneq = (struct dasd_sneq *)private->conf_data;
 961        for (i = 0; i < count; ++i) {
 962                if (sneq->flags.identifier == 1 && sneq->format == 1)
 963                        private->sneq = sneq;
 964                else if (sneq->flags.identifier == 1 && sneq->format == 4)
 965                        private->vdsneq = (struct vd_sneq *)sneq;
 966                else if (sneq->flags.identifier == 2)
 967                        private->gneq = (struct dasd_gneq *)sneq;
 968                else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
 969                        private->ned = (struct dasd_ned *)sneq;
 970                sneq++;
 971        }
 972        if (!private->ned || !private->gneq) {
 973                private->ned = NULL;
 974                private->sneq = NULL;
 975                private->vdsneq = NULL;
 976                private->gneq = NULL;
 977                return -EINVAL;
 978        }
 979        return 0;
 980
 981};
 982
 983static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
 984{
 985        struct dasd_gneq *gneq;
 986        int i, count, found;
 987
 988        count = conf_len / sizeof(*gneq);
 989        gneq = (struct dasd_gneq *)conf_data;
 990        found = 0;
 991        for (i = 0; i < count; ++i) {
 992                if (gneq->flags.identifier == 2) {
 993                        found = 1;
 994                        break;
 995                }
 996                gneq++;
 997        }
 998        if (found)
 999                return ((char *)gneq)[18] & 0x07;
1000        else
1001                return 0;
1002}
1003
1004static void dasd_eckd_store_conf_data(struct dasd_device *device,
1005                                      struct dasd_conf_data *conf_data, int chp)
1006{
1007        struct channel_path_desc_fmt0 *chp_desc;
1008        struct subchannel_id sch_id;
1009
1010        ccw_device_get_schid(device->cdev, &sch_id);
1011        /*
1012         * path handling and read_conf allocate data
1013         * free it before replacing the pointer
1014         */
1015        kfree(device->path[chp].conf_data);
1016        device->path[chp].conf_data = conf_data;
1017        device->path[chp].cssid = sch_id.cssid;
1018        device->path[chp].ssid = sch_id.ssid;
1019        chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1020        if (chp_desc)
1021                device->path[chp].chpid = chp_desc->chpid;
1022        kfree(chp_desc);
1023}
1024
1025static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1026{
1027        struct dasd_eckd_private *private = device->private;
1028        int i;
1029
1030        private->conf_data = NULL;
1031        private->conf_len = 0;
1032        for (i = 0; i < 8; i++) {
1033                kfree(device->path[i].conf_data);
1034                device->path[i].conf_data = NULL;
1035                device->path[i].cssid = 0;
1036                device->path[i].ssid = 0;
1037                device->path[i].chpid = 0;
1038                dasd_path_notoper(device, i);
1039        }
1040}
1041
1042static void dasd_eckd_read_fc_security(struct dasd_device *device)
1043{
1044        struct dasd_eckd_private *private = device->private;
1045        u8 esm_valid;
1046        u8 esm[8];
1047        int chp;
1048        int rc;
1049
1050        rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1051        if (rc) {
1052                for (chp = 0; chp < 8; chp++)
1053                        device->path[chp].fc_security = 0;
1054                return;
1055        }
1056
1057        for (chp = 0; chp < 8; chp++) {
1058                if (esm_valid & (0x80 >> chp))
1059                        device->path[chp].fc_security = esm[chp];
1060                else
1061                        device->path[chp].fc_security = 0;
1062        }
1063}
1064
1065static int dasd_eckd_read_conf(struct dasd_device *device)
1066{
1067        void *conf_data;
1068        int conf_len, conf_data_saved;
1069        int rc, path_err, pos;
1070        __u8 lpm, opm;
1071        struct dasd_eckd_private *private, path_private;
1072        struct dasd_uid *uid;
1073        char print_path_uid[60], print_device_uid[60];
1074
1075        private = device->private;
1076        opm = ccw_device_get_path_mask(device->cdev);
1077        conf_data_saved = 0;
1078        path_err = 0;
1079        /* get configuration data per operational path */
1080        for (lpm = 0x80; lpm; lpm>>= 1) {
1081                if (!(lpm & opm))
1082                        continue;
1083                rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1084                                             &conf_len, lpm);
1085                if (rc && rc != -EOPNOTSUPP) {  /* -EOPNOTSUPP is ok */
1086                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1087                                        "Read configuration data returned "
1088                                        "error %d", rc);
1089                        return rc;
1090                }
1091                if (conf_data == NULL) {
1092                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1093                                        "No configuration data "
1094                                        "retrieved");
1095                        /* no further analysis possible */
1096                        dasd_path_add_opm(device, opm);
1097                        continue;       /* no error */
1098                }
1099                /* save first valid configuration data */
1100                if (!conf_data_saved) {
1101                        /* initially clear previously stored conf_data */
1102                        dasd_eckd_clear_conf_data(device);
1103                        private->conf_data = conf_data;
1104                        private->conf_len = conf_len;
1105                        if (dasd_eckd_identify_conf_parts(private)) {
1106                                private->conf_data = NULL;
1107                                private->conf_len = 0;
1108                                kfree(conf_data);
1109                                continue;
1110                        }
1111                        /*
1112                         * build device UID that other path data
1113                         * can be compared to it
1114                         */
1115                        dasd_eckd_generate_uid(device);
1116                        conf_data_saved++;
1117                } else {
1118                        path_private.conf_data = conf_data;
1119                        path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1120                        if (dasd_eckd_identify_conf_parts(
1121                                    &path_private)) {
1122                                path_private.conf_data = NULL;
1123                                path_private.conf_len = 0;
1124                                kfree(conf_data);
1125                                continue;
1126                        }
1127                        if (dasd_eckd_compare_path_uid(
1128                                    device, &path_private)) {
1129                                uid = &path_private.uid;
1130                                if (strlen(uid->vduit) > 0)
1131                                        snprintf(print_path_uid,
1132                                                 sizeof(print_path_uid),
1133                                                 "%s.%s.%04x.%02x.%s",
1134                                                 uid->vendor, uid->serial,
1135                                                 uid->ssid, uid->real_unit_addr,
1136                                                 uid->vduit);
1137                                else
1138                                        snprintf(print_path_uid,
1139                                                 sizeof(print_path_uid),
1140                                                 "%s.%s.%04x.%02x",
1141                                                 uid->vendor, uid->serial,
1142                                                 uid->ssid,
1143                                                 uid->real_unit_addr);
1144                                uid = &private->uid;
1145                                if (strlen(uid->vduit) > 0)
1146                                        snprintf(print_device_uid,
1147                                                 sizeof(print_device_uid),
1148                                                 "%s.%s.%04x.%02x.%s",
1149                                                 uid->vendor, uid->serial,
1150                                                 uid->ssid, uid->real_unit_addr,
1151                                                 uid->vduit);
1152                                else
1153                                        snprintf(print_device_uid,
1154                                                 sizeof(print_device_uid),
1155                                                 "%s.%s.%04x.%02x",
1156                                                 uid->vendor, uid->serial,
1157                                                 uid->ssid,
1158                                                 uid->real_unit_addr);
1159                                dev_err(&device->cdev->dev,
1160                                        "Not all channel paths lead to "
1161                                        "the same device, path %02X leads to "
1162                                        "device %s instead of %s\n", lpm,
1163                                        print_path_uid, print_device_uid);
1164                                path_err = -EINVAL;
1165                                dasd_path_add_cablepm(device, lpm);
1166                                continue;
1167                        }
1168                        path_private.conf_data = NULL;
1169                        path_private.conf_len = 0;
1170                }
1171
1172                pos = pathmask_to_pos(lpm);
1173                dasd_eckd_store_conf_data(device, conf_data, pos);
1174
1175                switch (dasd_eckd_path_access(conf_data, conf_len)) {
1176                case 0x02:
1177                        dasd_path_add_nppm(device, lpm);
1178                        break;
1179                case 0x03:
1180                        dasd_path_add_ppm(device, lpm);
1181                        break;
1182                }
1183                if (!dasd_path_get_opm(device)) {
1184                        dasd_path_set_opm(device, lpm);
1185                        dasd_generic_path_operational(device);
1186                } else {
1187                        dasd_path_add_opm(device, lpm);
1188                }
1189        }
1190
1191        dasd_eckd_read_fc_security(device);
1192
1193        return path_err;
1194}
1195
1196static u32 get_fcx_max_data(struct dasd_device *device)
1197{
1198        struct dasd_eckd_private *private = device->private;
1199        int fcx_in_css, fcx_in_gneq, fcx_in_features;
1200        unsigned int mdc;
1201        int tpm;
1202
1203        if (dasd_nofcx)
1204                return 0;
1205        /* is transport mode supported? */
1206        fcx_in_css = css_general_characteristics.fcx;
1207        fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1208        fcx_in_features = private->features.feature[40] & 0x80;
1209        tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1210
1211        if (!tpm)
1212                return 0;
1213
1214        mdc = ccw_device_get_mdc(device->cdev, 0);
1215        if (mdc == 0) {
1216                dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1217                return 0;
1218        } else {
1219                return (u32)mdc * FCX_MAX_DATA_FACTOR;
1220        }
1221}
1222
1223static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1224{
1225        struct dasd_eckd_private *private = device->private;
1226        unsigned int mdc;
1227        u32 fcx_max_data;
1228
1229        if (private->fcx_max_data) {
1230                mdc = ccw_device_get_mdc(device->cdev, lpm);
1231                if (mdc == 0) {
1232                        dev_warn(&device->cdev->dev,
1233                                 "Detecting the maximum data size for zHPF "
1234                                 "requests failed (rc=%d) for a new path %x\n",
1235                                 mdc, lpm);
1236                        return mdc;
1237                }
1238                fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1239                if (fcx_max_data < private->fcx_max_data) {
1240                        dev_warn(&device->cdev->dev,
1241                                 "The maximum data size for zHPF requests %u "
1242                                 "on a new path %x is below the active maximum "
1243                                 "%u\n", fcx_max_data, lpm,
1244                                 private->fcx_max_data);
1245                        return -EACCES;
1246                }
1247        }
1248        return 0;
1249}
1250
1251static int rebuild_device_uid(struct dasd_device *device,
1252                              struct pe_handler_work_data *data)
1253{
1254        struct dasd_eckd_private *private = device->private;
1255        __u8 lpm, opm = dasd_path_get_opm(device);
1256        int rc = -ENODEV;
1257
1258        for (lpm = 0x80; lpm; lpm >>= 1) {
1259                if (!(lpm & opm))
1260                        continue;
1261                memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1262                memset(&data->cqr, 0, sizeof(data->cqr));
1263                data->cqr.cpaddr = &data->ccw;
1264                rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1265                                                     data->rcd_buffer,
1266                                                     lpm);
1267
1268                if (rc) {
1269                        if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1270                                continue;
1271                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1272                                        "Read configuration data "
1273                                        "returned error %d", rc);
1274                        break;
1275                }
1276                memcpy(private->conf_data, data->rcd_buffer,
1277                       DASD_ECKD_RCD_DATA_SIZE);
1278                if (dasd_eckd_identify_conf_parts(private)) {
1279                        rc = -ENODEV;
1280                } else /* first valid path is enough */
1281                        break;
1282        }
1283
1284        if (!rc)
1285                rc = dasd_eckd_generate_uid(device);
1286
1287        return rc;
1288}
1289
1290static void dasd_eckd_path_available_action(struct dasd_device *device,
1291                                            struct pe_handler_work_data *data)
1292{
1293        struct dasd_eckd_private path_private;
1294        struct dasd_uid *uid;
1295        __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1296        __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1297        struct dasd_conf_data *conf_data;
1298        unsigned long flags;
1299        char print_uid[60];
1300        int rc, pos;
1301
1302        opm = 0;
1303        npm = 0;
1304        ppm = 0;
1305        epm = 0;
1306        hpfpm = 0;
1307        cablepm = 0;
1308
1309        for (lpm = 0x80; lpm; lpm >>= 1) {
1310                if (!(lpm & data->tbvpm))
1311                        continue;
1312                memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1313                memset(&data->cqr, 0, sizeof(data->cqr));
1314                data->cqr.cpaddr = &data->ccw;
1315                rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1316                                                     data->rcd_buffer,
1317                                                     lpm);
1318                if (!rc) {
1319                        switch (dasd_eckd_path_access(data->rcd_buffer,
1320                                                      DASD_ECKD_RCD_DATA_SIZE)
1321                                ) {
1322                        case 0x02:
1323                                npm |= lpm;
1324                                break;
1325                        case 0x03:
1326                                ppm |= lpm;
1327                                break;
1328                        }
1329                        opm |= lpm;
1330                } else if (rc == -EOPNOTSUPP) {
1331                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1332                                        "path verification: No configuration "
1333                                        "data retrieved");
1334                        opm |= lpm;
1335                } else if (rc == -EAGAIN) {
1336                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1337                                        "path verification: device is stopped,"
1338                                        " try again later");
1339                        epm |= lpm;
1340                } else {
1341                        dev_warn(&device->cdev->dev,
1342                                 "Reading device feature codes failed "
1343                                 "(rc=%d) for new path %x\n", rc, lpm);
1344                        continue;
1345                }
1346                if (verify_fcx_max_data(device, lpm)) {
1347                        opm &= ~lpm;
1348                        npm &= ~lpm;
1349                        ppm &= ~lpm;
1350                        hpfpm |= lpm;
1351                        continue;
1352                }
1353
1354                /*
1355                 * save conf_data for comparison after
1356                 * rebuild_device_uid may have changed
1357                 * the original data
1358                 */
1359                memcpy(&path_rcd_buf, data->rcd_buffer,
1360                       DASD_ECKD_RCD_DATA_SIZE);
1361                path_private.conf_data = (void *) &path_rcd_buf;
1362                path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1363                if (dasd_eckd_identify_conf_parts(&path_private)) {
1364                        path_private.conf_data = NULL;
1365                        path_private.conf_len = 0;
1366                        continue;
1367                }
1368
1369                /*
1370                 * compare path UID with device UID only if at least
1371                 * one valid path is left
1372                 * in other case the device UID may have changed and
1373                 * the first working path UID will be used as device UID
1374                 */
1375                if (dasd_path_get_opm(device) &&
1376                    dasd_eckd_compare_path_uid(device, &path_private)) {
1377                        /*
1378                         * the comparison was not successful
1379                         * rebuild the device UID with at least one
1380                         * known path in case a z/VM hyperswap command
1381                         * has changed the device
1382                         *
1383                         * after this compare again
1384                         *
1385                         * if either the rebuild or the recompare fails
1386                         * the path can not be used
1387                         */
1388                        if (rebuild_device_uid(device, data) ||
1389                            dasd_eckd_compare_path_uid(
1390                                    device, &path_private)) {
1391                                uid = &path_private.uid;
1392                                if (strlen(uid->vduit) > 0)
1393                                        snprintf(print_uid, sizeof(print_uid),
1394                                                 "%s.%s.%04x.%02x.%s",
1395                                                 uid->vendor, uid->serial,
1396                                                 uid->ssid, uid->real_unit_addr,
1397                                                 uid->vduit);
1398                                else
1399                                        snprintf(print_uid, sizeof(print_uid),
1400                                                 "%s.%s.%04x.%02x",
1401                                                 uid->vendor, uid->serial,
1402                                                 uid->ssid,
1403                                                 uid->real_unit_addr);
1404                                dev_err(&device->cdev->dev,
1405                                        "The newly added channel path %02X "
1406                                        "will not be used because it leads "
1407                                        "to a different device %s\n",
1408                                        lpm, print_uid);
1409                                opm &= ~lpm;
1410                                npm &= ~lpm;
1411                                ppm &= ~lpm;
1412                                cablepm |= lpm;
1413                                continue;
1414                        }
1415                }
1416
1417                conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1418                if (conf_data) {
1419                        memcpy(conf_data, data->rcd_buffer,
1420                               DASD_ECKD_RCD_DATA_SIZE);
1421                }
1422                pos = pathmask_to_pos(lpm);
1423                dasd_eckd_store_conf_data(device, conf_data, pos);
1424
1425                /*
1426                 * There is a small chance that a path is lost again between
1427                 * above path verification and the following modification of
1428                 * the device opm mask. We could avoid that race here by using
1429                 * yet another path mask, but we rather deal with this unlikely
1430                 * situation in dasd_start_IO.
1431                 */
1432                spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1433                if (!dasd_path_get_opm(device) && opm) {
1434                        dasd_path_set_opm(device, opm);
1435                        dasd_generic_path_operational(device);
1436                } else {
1437                        dasd_path_add_opm(device, opm);
1438                }
1439                dasd_path_add_nppm(device, npm);
1440                dasd_path_add_ppm(device, ppm);
1441                dasd_path_add_tbvpm(device, epm);
1442                dasd_path_add_cablepm(device, cablepm);
1443                dasd_path_add_nohpfpm(device, hpfpm);
1444                spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1445
1446                dasd_path_create_kobj(device, pos);
1447        }
1448}
1449
1450static void do_pe_handler_work(struct work_struct *work)
1451{
1452        struct pe_handler_work_data *data;
1453        struct dasd_device *device;
1454
1455        data = container_of(work, struct pe_handler_work_data, worker);
1456        device = data->device;
1457
1458        /* delay path verification until device was resumed */
1459        if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1460                schedule_work(work);
1461                return;
1462        }
1463        /* check if path verification already running and delay if so */
1464        if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1465                schedule_work(work);
1466                return;
1467        }
1468
1469        if (data->tbvpm)
1470                dasd_eckd_path_available_action(device, data);
1471        if (data->fcsecpm)
1472                dasd_eckd_read_fc_security(device);
1473
1474        clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1475        dasd_put_device(device);
1476        if (data->isglobal)
1477                mutex_unlock(&dasd_pe_handler_mutex);
1478        else
1479                kfree(data);
1480}
1481
1482static int dasd_eckd_pe_handler(struct dasd_device *device,
1483                                __u8 tbvpm, __u8 fcsecpm)
1484{
1485        struct pe_handler_work_data *data;
1486
1487        data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1488        if (!data) {
1489                if (mutex_trylock(&dasd_pe_handler_mutex)) {
1490                        data = pe_handler_worker;
1491                        data->isglobal = 1;
1492                } else {
1493                        return -ENOMEM;
1494                }
1495        } else {
1496                memset(data, 0, sizeof(*data));
1497                data->isglobal = 0;
1498        }
1499        INIT_WORK(&data->worker, do_pe_handler_work);
1500        dasd_get_device(device);
1501        data->device = device;
1502        data->tbvpm = tbvpm;
1503        data->fcsecpm = fcsecpm;
1504        schedule_work(&data->worker);
1505        return 0;
1506}
1507
1508static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1509{
1510        struct dasd_eckd_private *private = device->private;
1511        unsigned long flags;
1512
1513        if (!private->fcx_max_data)
1514                private->fcx_max_data = get_fcx_max_data(device);
1515        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1516        dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1517        dasd_schedule_device_bh(device);
1518        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1519}
1520
1521static int dasd_eckd_read_features(struct dasd_device *device)
1522{
1523        struct dasd_eckd_private *private = device->private;
1524        struct dasd_psf_prssd_data *prssdp;
1525        struct dasd_rssd_features *features;
1526        struct dasd_ccw_req *cqr;
1527        struct ccw1 *ccw;
1528        int rc;
1529
1530        memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1531        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1532                                   (sizeof(struct dasd_psf_prssd_data) +
1533                                    sizeof(struct dasd_rssd_features)),
1534                                   device, NULL);
1535        if (IS_ERR(cqr)) {
1536                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1537                                "allocate initialization request");
1538                return PTR_ERR(cqr);
1539        }
1540        cqr->startdev = device;
1541        cqr->memdev = device;
1542        cqr->block = NULL;
1543        cqr->retries = 256;
1544        cqr->expires = 10 * HZ;
1545
1546        /* Prepare for Read Subsystem Data */
1547        prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1548        memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1549        prssdp->order = PSF_ORDER_PRSSD;
1550        prssdp->suborder = 0x41;        /* Read Feature Codes */
1551        /* all other bytes of prssdp must be zero */
1552
1553        ccw = cqr->cpaddr;
1554        ccw->cmd_code = DASD_ECKD_CCW_PSF;
1555        ccw->count = sizeof(struct dasd_psf_prssd_data);
1556        ccw->flags |= CCW_FLAG_CC;
1557        ccw->cda = (__u32)(addr_t) prssdp;
1558
1559        /* Read Subsystem Data - feature codes */
1560        features = (struct dasd_rssd_features *) (prssdp + 1);
1561        memset(features, 0, sizeof(struct dasd_rssd_features));
1562
1563        ccw++;
1564        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1565        ccw->count = sizeof(struct dasd_rssd_features);
1566        ccw->cda = (__u32)(addr_t) features;
1567
1568        cqr->buildclk = get_tod_clock();
1569        cqr->status = DASD_CQR_FILLED;
1570        rc = dasd_sleep_on(cqr);
1571        if (rc == 0) {
1572                prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1573                features = (struct dasd_rssd_features *) (prssdp + 1);
1574                memcpy(&private->features, features,
1575                       sizeof(struct dasd_rssd_features));
1576        } else
1577                dev_warn(&device->cdev->dev, "Reading device feature codes"
1578                         " failed with rc=%d\n", rc);
1579        dasd_sfree_request(cqr, cqr->memdev);
1580        return rc;
1581}
1582
1583/* Read Volume Information - Volume Storage Query */
1584static int dasd_eckd_read_vol_info(struct dasd_device *device)
1585{
1586        struct dasd_eckd_private *private = device->private;
1587        struct dasd_psf_prssd_data *prssdp;
1588        struct dasd_rssd_vsq *vsq;
1589        struct dasd_ccw_req *cqr;
1590        struct ccw1 *ccw;
1591        int useglobal;
1592        int rc;
1593
1594        /* This command cannot be executed on an alias device */
1595        if (private->uid.type == UA_BASE_PAV_ALIAS ||
1596            private->uid.type == UA_HYPER_PAV_ALIAS)
1597                return 0;
1598
1599        useglobal = 0;
1600        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1601                                   sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1602        if (IS_ERR(cqr)) {
1603                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1604                                "Could not allocate initialization request");
1605                mutex_lock(&dasd_vol_info_mutex);
1606                useglobal = 1;
1607                cqr = &dasd_vol_info_req->cqr;
1608                memset(cqr, 0, sizeof(*cqr));
1609                memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1610                cqr->cpaddr = &dasd_vol_info_req->ccw;
1611                cqr->data = &dasd_vol_info_req->data;
1612                cqr->magic = DASD_ECKD_MAGIC;
1613        }
1614
1615        /* Prepare for Read Subsystem Data */
1616        prssdp = cqr->data;
1617        prssdp->order = PSF_ORDER_PRSSD;
1618        prssdp->suborder = PSF_SUBORDER_VSQ;    /* Volume Storage Query */
1619        prssdp->lss = private->ned->ID;
1620        prssdp->volume = private->ned->unit_addr;
1621
1622        ccw = cqr->cpaddr;
1623        ccw->cmd_code = DASD_ECKD_CCW_PSF;
1624        ccw->count = sizeof(*prssdp);
1625        ccw->flags |= CCW_FLAG_CC;
1626        ccw->cda = (__u32)(addr_t)prssdp;
1627
1628        /* Read Subsystem Data - Volume Storage Query */
1629        vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1630        memset(vsq, 0, sizeof(*vsq));
1631
1632        ccw++;
1633        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1634        ccw->count = sizeof(*vsq);
1635        ccw->flags |= CCW_FLAG_SLI;
1636        ccw->cda = (__u32)(addr_t)vsq;
1637
1638        cqr->buildclk = get_tod_clock();
1639        cqr->status = DASD_CQR_FILLED;
1640        cqr->startdev = device;
1641        cqr->memdev = device;
1642        cqr->block = NULL;
1643        cqr->retries = 256;
1644        cqr->expires = device->default_expires * HZ;
1645        /* The command might not be supported. Suppress the error output */
1646        __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1647
1648        rc = dasd_sleep_on_interruptible(cqr);
1649        if (rc == 0) {
1650                memcpy(&private->vsq, vsq, sizeof(*vsq));
1651        } else {
1652                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1653                                "Reading the volume storage information failed with rc=%d", rc);
1654        }
1655
1656        if (useglobal)
1657                mutex_unlock(&dasd_vol_info_mutex);
1658        else
1659                dasd_sfree_request(cqr, cqr->memdev);
1660
1661        return rc;
1662}
1663
1664static int dasd_eckd_is_ese(struct dasd_device *device)
1665{
1666        struct dasd_eckd_private *private = device->private;
1667
1668        return private->vsq.vol_info.ese;
1669}
1670
1671static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1672{
1673        struct dasd_eckd_private *private = device->private;
1674
1675        return private->vsq.extent_pool_id;
1676}
1677
1678/*
1679 * This value represents the total amount of available space. As more space is
1680 * allocated by ESE volumes, this value will decrease.
1681 * The data for this value is therefore updated on any call.
1682 */
1683static int dasd_eckd_space_configured(struct dasd_device *device)
1684{
1685        struct dasd_eckd_private *private = device->private;
1686        int rc;
1687
1688        rc = dasd_eckd_read_vol_info(device);
1689
1690        return rc ? : private->vsq.space_configured;
1691}
1692
1693/*
1694 * The value of space allocated by an ESE volume may have changed and is
1695 * therefore updated on any call.
1696 */
1697static int dasd_eckd_space_allocated(struct dasd_device *device)
1698{
1699        struct dasd_eckd_private *private = device->private;
1700        int rc;
1701
1702        rc = dasd_eckd_read_vol_info(device);
1703
1704        return rc ? : private->vsq.space_allocated;
1705}
1706
1707static int dasd_eckd_logical_capacity(struct dasd_device *device)
1708{
1709        struct dasd_eckd_private *private = device->private;
1710
1711        return private->vsq.logical_capacity;
1712}
1713
1714static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1715{
1716        struct ext_pool_exhaust_work_data *data;
1717        struct dasd_device *device;
1718        struct dasd_device *base;
1719
1720        data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1721        device = data->device;
1722        base = data->base;
1723
1724        if (!base)
1725                base = device;
1726        if (dasd_eckd_space_configured(base) != 0) {
1727                dasd_generic_space_avail(device);
1728        } else {
1729                dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1730                DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1731        }
1732
1733        dasd_put_device(device);
1734        kfree(data);
1735}
1736
1737static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1738                                      struct dasd_ccw_req *cqr)
1739{
1740        struct ext_pool_exhaust_work_data *data;
1741
1742        data = kzalloc(sizeof(*data), GFP_ATOMIC);
1743        if (!data)
1744                return -ENOMEM;
1745        INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1746        dasd_get_device(device);
1747        data->device = device;
1748
1749        if (cqr->block)
1750                data->base = cqr->block->base;
1751        else if (cqr->basedev)
1752                data->base = cqr->basedev;
1753        else
1754                data->base = NULL;
1755
1756        schedule_work(&data->worker);
1757
1758        return 0;
1759}
1760
1761static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1762                                        struct dasd_rssd_lcq *lcq)
1763{
1764        struct dasd_eckd_private *private = device->private;
1765        int pool_id = dasd_eckd_ext_pool_id(device);
1766        struct dasd_ext_pool_sum eps;
1767        int i;
1768
1769        for (i = 0; i < lcq->pool_count; i++) {
1770                eps = lcq->ext_pool_sum[i];
1771                if (eps.pool_id == pool_id) {
1772                        memcpy(&private->eps, &eps,
1773                               sizeof(struct dasd_ext_pool_sum));
1774                }
1775        }
1776}
1777
1778/* Read Extent Pool Information - Logical Configuration Query */
1779static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1780{
1781        struct dasd_eckd_private *private = device->private;
1782        struct dasd_psf_prssd_data *prssdp;
1783        struct dasd_rssd_lcq *lcq;
1784        struct dasd_ccw_req *cqr;
1785        struct ccw1 *ccw;
1786        int rc;
1787
1788        /* This command cannot be executed on an alias device */
1789        if (private->uid.type == UA_BASE_PAV_ALIAS ||
1790            private->uid.type == UA_HYPER_PAV_ALIAS)
1791                return 0;
1792
1793        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1794                                   sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1795        if (IS_ERR(cqr)) {
1796                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1797                                "Could not allocate initialization request");
1798                return PTR_ERR(cqr);
1799        }
1800
1801        /* Prepare for Read Subsystem Data */
1802        prssdp = cqr->data;
1803        memset(prssdp, 0, sizeof(*prssdp));
1804        prssdp->order = PSF_ORDER_PRSSD;
1805        prssdp->suborder = PSF_SUBORDER_LCQ;    /* Logical Configuration Query */
1806
1807        ccw = cqr->cpaddr;
1808        ccw->cmd_code = DASD_ECKD_CCW_PSF;
1809        ccw->count = sizeof(*prssdp);
1810        ccw->flags |= CCW_FLAG_CC;
1811        ccw->cda = (__u32)(addr_t)prssdp;
1812
1813        lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1814        memset(lcq, 0, sizeof(*lcq));
1815
1816        ccw++;
1817        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1818        ccw->count = sizeof(*lcq);
1819        ccw->flags |= CCW_FLAG_SLI;
1820        ccw->cda = (__u32)(addr_t)lcq;
1821
1822        cqr->buildclk = get_tod_clock();
1823        cqr->status = DASD_CQR_FILLED;
1824        cqr->startdev = device;
1825        cqr->memdev = device;
1826        cqr->block = NULL;
1827        cqr->retries = 256;
1828        cqr->expires = device->default_expires * HZ;
1829        /* The command might not be supported. Suppress the error output */
1830        __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1831
1832        rc = dasd_sleep_on_interruptible(cqr);
1833        if (rc == 0) {
1834                dasd_eckd_cpy_ext_pool_data(device, lcq);
1835        } else {
1836                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1837                                "Reading the logical configuration failed with rc=%d", rc);
1838        }
1839
1840        dasd_sfree_request(cqr, cqr->memdev);
1841
1842        return rc;
1843}
1844
1845/*
1846 * Depending on the device type, the extent size is specified either as
1847 * cylinders per extent (CKD) or size per extent (FBA)
1848 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1849 */
1850static int dasd_eckd_ext_size(struct dasd_device *device)
1851{
1852        struct dasd_eckd_private *private = device->private;
1853        struct dasd_ext_pool_sum eps = private->eps;
1854
1855        if (!eps.flags.extent_size_valid)
1856                return 0;
1857        if (eps.extent_size.size_1G)
1858                return 1113;
1859        if (eps.extent_size.size_16M)
1860                return 21;
1861
1862        return 0;
1863}
1864
1865static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1866{
1867        struct dasd_eckd_private *private = device->private;
1868
1869        return private->eps.warn_thrshld;
1870}
1871
1872static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1873{
1874        struct dasd_eckd_private *private = device->private;
1875
1876        return private->eps.flags.capacity_at_warnlevel;
1877}
1878
1879/*
1880 * Extent Pool out of space
1881 */
1882static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1883{
1884        struct dasd_eckd_private *private = device->private;
1885
1886        return private->eps.flags.pool_oos;
1887}
1888
1889/*
1890 * Build CP for Perform Subsystem Function - SSC.
1891 */
1892static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1893                                                    int enable_pav)
1894{
1895        struct dasd_ccw_req *cqr;
1896        struct dasd_psf_ssc_data *psf_ssc_data;
1897        struct ccw1 *ccw;
1898
1899        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1900                                  sizeof(struct dasd_psf_ssc_data),
1901                                   device, NULL);
1902
1903        if (IS_ERR(cqr)) {
1904                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1905                           "Could not allocate PSF-SSC request");
1906                return cqr;
1907        }
1908        psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1909        psf_ssc_data->order = PSF_ORDER_SSC;
1910        psf_ssc_data->suborder = 0xc0;
1911        if (enable_pav) {
1912                psf_ssc_data->suborder |= 0x08;
1913                psf_ssc_data->reserved[0] = 0x88;
1914        }
1915        ccw = cqr->cpaddr;
1916        ccw->cmd_code = DASD_ECKD_CCW_PSF;
1917        ccw->cda = (__u32)(addr_t)psf_ssc_data;
1918        ccw->count = 66;
1919
1920        cqr->startdev = device;
1921        cqr->memdev = device;
1922        cqr->block = NULL;
1923        cqr->retries = 256;
1924        cqr->expires = 10*HZ;
1925        cqr->buildclk = get_tod_clock();
1926        cqr->status = DASD_CQR_FILLED;
1927        return cqr;
1928}
1929
1930/*
1931 * Perform Subsystem Function.
1932 * It is necessary to trigger CIO for channel revalidation since this
1933 * call might change behaviour of DASD devices.
1934 */
1935static int
1936dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1937                  unsigned long flags)
1938{
1939        struct dasd_ccw_req *cqr;
1940        int rc;
1941
1942        cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1943        if (IS_ERR(cqr))
1944                return PTR_ERR(cqr);
1945
1946        /*
1947         * set flags e.g. turn on failfast, to prevent blocking
1948         * the calling function should handle failed requests
1949         */
1950        cqr->flags |= flags;
1951
1952        rc = dasd_sleep_on(cqr);
1953        if (!rc)
1954                /* trigger CIO to reprobe devices */
1955                css_schedule_reprobe();
1956        else if (cqr->intrc == -EAGAIN)
1957                rc = -EAGAIN;
1958
1959        dasd_sfree_request(cqr, cqr->memdev);
1960        return rc;
1961}
1962
1963/*
1964 * Valide storage server of current device.
1965 */
1966static int dasd_eckd_validate_server(struct dasd_device *device,
1967                                     unsigned long flags)
1968{
1969        struct dasd_eckd_private *private = device->private;
1970        int enable_pav, rc;
1971
1972        if (private->uid.type == UA_BASE_PAV_ALIAS ||
1973            private->uid.type == UA_HYPER_PAV_ALIAS)
1974                return 0;
1975        if (dasd_nopav || MACHINE_IS_VM)
1976                enable_pav = 0;
1977        else
1978                enable_pav = 1;
1979        rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1980
1981        /* may be requested feature is not available on server,
1982         * therefore just report error and go ahead */
1983        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1984                        "returned rc=%d", private->uid.ssid, rc);
1985        return rc;
1986}
1987
1988/*
1989 * worker to do a validate server in case of a lost pathgroup
1990 */
1991static void dasd_eckd_do_validate_server(struct work_struct *work)
1992{
1993        struct dasd_device *device = container_of(work, struct dasd_device,
1994                                                  kick_validate);
1995        unsigned long flags = 0;
1996
1997        set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1998        if (dasd_eckd_validate_server(device, flags)
1999            == -EAGAIN) {
2000                /* schedule worker again if failed */
2001                schedule_work(&device->kick_validate);
2002                return;
2003        }
2004
2005        dasd_put_device(device);
2006}
2007
2008static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2009{
2010        dasd_get_device(device);
2011        /* exit if device not online or in offline processing */
2012        if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2013           device->state < DASD_STATE_ONLINE) {
2014                dasd_put_device(device);
2015                return;
2016        }
2017        /* queue call to do_validate_server to the kernel event daemon. */
2018        if (!schedule_work(&device->kick_validate))
2019                dasd_put_device(device);
2020}
2021
2022/*
2023 * Check device characteristics.
2024 * If the device is accessible using ECKD discipline, the device is enabled.
2025 */
2026static int
2027dasd_eckd_check_characteristics(struct dasd_device *device)
2028{
2029        struct dasd_eckd_private *private = device->private;
2030        struct dasd_block *block;
2031        struct dasd_uid temp_uid;
2032        int rc, i;
2033        int readonly;
2034        unsigned long value;
2035
2036        /* setup work queue for validate server*/
2037        INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2038        /* setup work queue for summary unit check */
2039        INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2040
2041        if (!ccw_device_is_pathgroup(device->cdev)) {
2042                dev_warn(&device->cdev->dev,
2043                         "A channel path group could not be established\n");
2044                return -EIO;
2045        }
2046        if (!ccw_device_is_multipath(device->cdev)) {
2047                dev_info(&device->cdev->dev,
2048                         "The DASD is not operating in multipath mode\n");
2049        }
2050        if (!private) {
2051                private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2052                if (!private) {
2053                        dev_warn(&device->cdev->dev,
2054                                 "Allocating memory for private DASD data "
2055                                 "failed\n");
2056                        return -ENOMEM;
2057                }
2058                device->private = private;
2059        } else {
2060                memset(private, 0, sizeof(*private));
2061        }
2062        /* Invalidate status of initial analysis. */
2063        private->init_cqr_status = -1;
2064        /* Set default cache operations. */
2065        private->attrib.operation = DASD_NORMAL_CACHE;
2066        private->attrib.nr_cyl = 0;
2067
2068        /* Read Configuration Data */
2069        rc = dasd_eckd_read_conf(device);
2070        if (rc)
2071                goto out_err1;
2072
2073        /* set some default values */
2074        device->default_expires = DASD_EXPIRES;
2075        device->default_retries = DASD_RETRIES;
2076        device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2077        device->path_interval = DASD_ECKD_PATH_INTERVAL;
2078
2079        if (private->gneq) {
2080                value = 1;
2081                for (i = 0; i < private->gneq->timeout.value; i++)
2082                        value = 10 * value;
2083                value = value * private->gneq->timeout.number;
2084                /* do not accept useless values */
2085                if (value != 0 && value <= DASD_EXPIRES_MAX)
2086                        device->default_expires = value;
2087        }
2088
2089        dasd_eckd_get_uid(device, &temp_uid);
2090        if (temp_uid.type == UA_BASE_DEVICE) {
2091                block = dasd_alloc_block();
2092                if (IS_ERR(block)) {
2093                        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2094                                        "could not allocate dasd "
2095                                        "block structure");
2096                        rc = PTR_ERR(block);
2097                        goto out_err1;
2098                }
2099                device->block = block;
2100                block->base = device;
2101        }
2102
2103        /* register lcu with alias handling, enable PAV */
2104        rc = dasd_alias_make_device_known_to_lcu(device);
2105        if (rc)
2106                goto out_err2;
2107
2108        dasd_eckd_validate_server(device, 0);
2109
2110        /* device may report different configuration data after LCU setup */
2111        rc = dasd_eckd_read_conf(device);
2112        if (rc)
2113                goto out_err3;
2114
2115        dasd_path_create_kobjects(device);
2116
2117        /* Read Feature Codes */
2118        dasd_eckd_read_features(device);
2119
2120        /* Read Volume Information */
2121        dasd_eckd_read_vol_info(device);
2122
2123        /* Read Extent Pool Information */
2124        dasd_eckd_read_ext_pool_info(device);
2125
2126        /* Read Device Characteristics */
2127        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2128                                         &private->rdc_data, 64);
2129        if (rc) {
2130                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2131                                "Read device characteristic failed, rc=%d", rc);
2132                goto out_err3;
2133        }
2134
2135        if ((device->features & DASD_FEATURE_USERAW) &&
2136            !(private->rdc_data.facilities.RT_in_LR)) {
2137                dev_err(&device->cdev->dev, "The storage server does not "
2138                        "support raw-track access\n");
2139                rc = -EINVAL;
2140                goto out_err3;
2141        }
2142
2143        /* find the valid cylinder size */
2144        if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2145            private->rdc_data.long_no_cyl)
2146                private->real_cyl = private->rdc_data.long_no_cyl;
2147        else
2148                private->real_cyl = private->rdc_data.no_cyl;
2149
2150        private->fcx_max_data = get_fcx_max_data(device);
2151
2152        readonly = dasd_device_is_ro(device);
2153        if (readonly)
2154                set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2155
2156        dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2157                 "with %d cylinders, %d heads, %d sectors%s\n",
2158                 private->rdc_data.dev_type,
2159                 private->rdc_data.dev_model,
2160                 private->rdc_data.cu_type,
2161                 private->rdc_data.cu_model.model,
2162                 private->real_cyl,
2163                 private->rdc_data.trk_per_cyl,
2164                 private->rdc_data.sec_per_trk,
2165                 readonly ? ", read-only device" : "");
2166        return 0;
2167
2168out_err3:
2169        dasd_alias_disconnect_device_from_lcu(device);
2170out_err2:
2171        dasd_free_block(device->block);
2172        device->block = NULL;
2173out_err1:
2174        dasd_eckd_clear_conf_data(device);
2175        dasd_path_remove_kobjects(device);
2176        kfree(device->private);
2177        device->private = NULL;
2178        return rc;
2179}
2180
2181static void dasd_eckd_uncheck_device(struct dasd_device *device)
2182{
2183        struct dasd_eckd_private *private = device->private;
2184
2185        if (!private)
2186                return;
2187
2188        dasd_alias_disconnect_device_from_lcu(device);
2189        private->ned = NULL;
2190        private->sneq = NULL;
2191        private->vdsneq = NULL;
2192        private->gneq = NULL;
2193        dasd_eckd_clear_conf_data(device);
2194        dasd_path_remove_kobjects(device);
2195}
2196
2197static struct dasd_ccw_req *
2198dasd_eckd_analysis_ccw(struct dasd_device *device)
2199{
2200        struct dasd_eckd_private *private = device->private;
2201        struct eckd_count *count_data;
2202        struct LO_eckd_data *LO_data;
2203        struct dasd_ccw_req *cqr;
2204        struct ccw1 *ccw;
2205        int cplength, datasize;
2206        int i;
2207
2208        cplength = 8;
2209        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2210        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2211                                   NULL);
2212        if (IS_ERR(cqr))
2213                return cqr;
2214        ccw = cqr->cpaddr;
2215        /* Define extent for the first 2 tracks. */
2216        define_extent(ccw++, cqr->data, 0, 1,
2217                      DASD_ECKD_CCW_READ_COUNT, device, 0);
2218        LO_data = cqr->data + sizeof(struct DE_eckd_data);
2219        /* Locate record for the first 4 records on track 0. */
2220        ccw[-1].flags |= CCW_FLAG_CC;
2221        locate_record(ccw++, LO_data++, 0, 0, 4,
2222                      DASD_ECKD_CCW_READ_COUNT, device, 0);
2223
2224        count_data = private->count_area;
2225        for (i = 0; i < 4; i++) {
2226                ccw[-1].flags |= CCW_FLAG_CC;
2227                ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2228                ccw->flags = 0;
2229                ccw->count = 8;
2230                ccw->cda = (__u32)(addr_t) count_data;
2231                ccw++;
2232                count_data++;
2233        }
2234
2235        /* Locate record for the first record on track 1. */
2236        ccw[-1].flags |= CCW_FLAG_CC;
2237        locate_record(ccw++, LO_data++, 1, 0, 1,
2238                      DASD_ECKD_CCW_READ_COUNT, device, 0);
2239        /* Read count ccw. */
2240        ccw[-1].flags |= CCW_FLAG_CC;
2241        ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2242        ccw->flags = 0;
2243        ccw->count = 8;
2244        ccw->cda = (__u32)(addr_t) count_data;
2245
2246        cqr->block = NULL;
2247        cqr->startdev = device;
2248        cqr->memdev = device;
2249        cqr->retries = 255;
2250        cqr->buildclk = get_tod_clock();
2251        cqr->status = DASD_CQR_FILLED;
2252        /* Set flags to suppress output for expected errors */
2253        set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2254
2255        return cqr;
2256}
2257
2258/* differentiate between 'no record found' and any other error */
2259static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2260{
2261        char *sense;
2262        if (init_cqr->status == DASD_CQR_DONE)
2263                return INIT_CQR_OK;
2264        else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2265                 init_cqr->status == DASD_CQR_FAILED) {
2266                sense = dasd_get_sense(&init_cqr->irb);
2267                if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2268                        return INIT_CQR_UNFORMATTED;
2269                else
2270                        return INIT_CQR_ERROR;
2271        } else
2272                return INIT_CQR_ERROR;
2273}
2274
2275/*
2276 * This is the callback function for the init_analysis cqr. It saves
2277 * the status of the initial analysis ccw before it frees it and kicks
2278 * the device to continue the startup sequence. This will call
2279 * dasd_eckd_do_analysis again (if the devices has not been marked
2280 * for deletion in the meantime).
2281 */
2282static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2283                                        void *data)
2284{
2285        struct dasd_device *device = init_cqr->startdev;
2286        struct dasd_eckd_private *private = device->private;
2287
2288        private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2289        dasd_sfree_request(init_cqr, device);
2290        dasd_kick_device(device);
2291}
2292
2293static int dasd_eckd_start_analysis(struct dasd_block *block)
2294{
2295        struct dasd_ccw_req *init_cqr;
2296
2297        init_cqr = dasd_eckd_analysis_ccw(block->base);
2298        if (IS_ERR(init_cqr))
2299                return PTR_ERR(init_cqr);
2300        init_cqr->callback = dasd_eckd_analysis_callback;
2301        init_cqr->callback_data = NULL;
2302        init_cqr->expires = 5*HZ;
2303        /* first try without ERP, so we can later handle unformatted
2304         * devices as special case
2305         */
2306        clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2307        init_cqr->retries = 0;
2308        dasd_add_request_head(init_cqr);
2309        return -EAGAIN;
2310}
2311
2312static int dasd_eckd_end_analysis(struct dasd_block *block)
2313{
2314        struct dasd_device *device = block->base;
2315        struct dasd_eckd_private *private = device->private;
2316        struct eckd_count *count_area;
2317        unsigned int sb, blk_per_trk;
2318        int status, i;
2319        struct dasd_ccw_req *init_cqr;
2320
2321        status = private->init_cqr_status;
2322        private->init_cqr_status = -1;
2323        if (status == INIT_CQR_ERROR) {
2324                /* try again, this time with full ERP */
2325                init_cqr = dasd_eckd_analysis_ccw(device);
2326                dasd_sleep_on(init_cqr);
2327                status = dasd_eckd_analysis_evaluation(init_cqr);
2328                dasd_sfree_request(init_cqr, device);
2329        }
2330
2331        if (device->features & DASD_FEATURE_USERAW) {
2332                block->bp_block = DASD_RAW_BLOCKSIZE;
2333                blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2334                block->s2b_shift = 3;
2335                goto raw;
2336        }
2337
2338        if (status == INIT_CQR_UNFORMATTED) {
2339                dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2340                return -EMEDIUMTYPE;
2341        } else if (status == INIT_CQR_ERROR) {
2342                dev_err(&device->cdev->dev,
2343                        "Detecting the DASD disk layout failed because "
2344                        "of an I/O error\n");
2345                return -EIO;
2346        }
2347
2348        private->uses_cdl = 1;
2349        /* Check Track 0 for Compatible Disk Layout */
2350        count_area = NULL;
2351        for (i = 0; i < 3; i++) {
2352                if (private->count_area[i].kl != 4 ||
2353                    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2354                    private->count_area[i].cyl != 0 ||
2355                    private->count_area[i].head != count_area_head[i] ||
2356                    private->count_area[i].record != count_area_rec[i]) {
2357                        private->uses_cdl = 0;
2358                        break;
2359                }
2360        }
2361        if (i == 3)
2362                count_area = &private->count_area[3];
2363
2364        if (private->uses_cdl == 0) {
2365                for (i = 0; i < 5; i++) {
2366                        if ((private->count_area[i].kl != 0) ||
2367                            (private->count_area[i].dl !=
2368                             private->count_area[0].dl) ||
2369                            private->count_area[i].cyl !=  0 ||
2370                            private->count_area[i].head != count_area_head[i] ||
2371                            private->count_area[i].record != count_area_rec[i])
2372                                break;
2373                }
2374                if (i == 5)
2375                        count_area = &private->count_area[0];
2376        } else {
2377                if (private->count_area[3].record == 1)
2378                        dev_warn(&device->cdev->dev,
2379                                 "Track 0 has no records following the VTOC\n");
2380        }
2381
2382        if (count_area != NULL && count_area->kl == 0) {
2383                /* we found notthing violating our disk layout */
2384                if (dasd_check_blocksize(count_area->dl) == 0)
2385                        block->bp_block = count_area->dl;
2386        }
2387        if (block->bp_block == 0) {
2388                dev_warn(&device->cdev->dev,
2389                         "The disk layout of the DASD is not supported\n");
2390                return -EMEDIUMTYPE;
2391        }
2392        block->s2b_shift = 0;   /* bits to shift 512 to get a block */
2393        for (sb = 512; sb < block->bp_block; sb = sb << 1)
2394                block->s2b_shift++;
2395
2396        blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2397
2398raw:
2399        block->blocks = (private->real_cyl *
2400                          private->rdc_data.trk_per_cyl *
2401                          blk_per_trk);
2402
2403        dev_info(&device->cdev->dev,
2404                 "DASD with %d KB/block, %d KB total size, %d KB/track, "
2405                 "%s\n", (block->bp_block >> 10),
2406                 ((private->real_cyl *
2407                   private->rdc_data.trk_per_cyl *
2408                   blk_per_trk * (block->bp_block >> 9)) >> 1),
2409                 ((blk_per_trk * block->bp_block) >> 10),
2410                 private->uses_cdl ?
2411                 "compatible disk layout" : "linux disk layout");
2412
2413        return 0;
2414}
2415
2416static int dasd_eckd_do_analysis(struct dasd_block *block)
2417{
2418        struct dasd_eckd_private *private = block->base->private;
2419
2420        if (private->init_cqr_status < 0)
2421                return dasd_eckd_start_analysis(block);
2422        else
2423                return dasd_eckd_end_analysis(block);
2424}
2425
2426static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2427{
2428        return dasd_alias_add_device(device);
2429};
2430
2431static int dasd_eckd_online_to_ready(struct dasd_device *device)
2432{
2433        cancel_work_sync(&device->reload_device);
2434        cancel_work_sync(&device->kick_validate);
2435        return 0;
2436};
2437
2438static int dasd_eckd_basic_to_known(struct dasd_device *device)
2439{
2440        return dasd_alias_remove_device(device);
2441};
2442
2443static int
2444dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2445{
2446        struct dasd_eckd_private *private = block->base->private;
2447
2448        if (dasd_check_blocksize(block->bp_block) == 0) {
2449                geo->sectors = recs_per_track(&private->rdc_data,
2450                                              0, block->bp_block);
2451        }
2452        geo->cylinders = private->rdc_data.no_cyl;
2453        geo->heads = private->rdc_data.trk_per_cyl;
2454        return 0;
2455}
2456
2457/*
2458 * Build the TCW request for the format check
2459 */
2460static struct dasd_ccw_req *
2461dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2462                          int enable_pav, struct eckd_count *fmt_buffer,
2463                          int rpt)
2464{
2465        struct dasd_eckd_private *start_priv;
2466        struct dasd_device *startdev = NULL;
2467        struct tidaw *last_tidaw = NULL;
2468        struct dasd_ccw_req *cqr;
2469        struct itcw *itcw;
2470        int itcw_size;
2471        int count;
2472        int rc;
2473        int i;
2474
2475        if (enable_pav)
2476                startdev = dasd_alias_get_start_dev(base);
2477
2478        if (!startdev)
2479                startdev = base;
2480
2481        start_priv = startdev->private;
2482
2483        count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2484
2485        /*
2486         * we're adding 'count' amount of tidaw to the itcw.
2487         * calculate the corresponding itcw_size
2488         */
2489        itcw_size = itcw_calc_size(0, count, 0);
2490
2491        cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2492        if (IS_ERR(cqr))
2493                return cqr;
2494
2495        start_priv->count++;
2496
2497        itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2498        if (IS_ERR(itcw)) {
2499                rc = -EINVAL;
2500                goto out_err;
2501        }
2502
2503        cqr->cpaddr = itcw_get_tcw(itcw);
2504        rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2505                          DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2506                          sizeof(struct eckd_count),
2507                          count * sizeof(struct eckd_count), 0, rpt);
2508        if (rc)
2509                goto out_err;
2510
2511        for (i = 0; i < count; i++) {
2512                last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2513                                            sizeof(struct eckd_count));
2514                if (IS_ERR(last_tidaw)) {
2515                        rc = -EINVAL;
2516                        goto out_err;
2517                }
2518        }
2519
2520        last_tidaw->flags |= TIDAW_FLAGS_LAST;
2521        itcw_finalize(itcw);
2522
2523        cqr->cpmode = 1;
2524        cqr->startdev = startdev;
2525        cqr->memdev = startdev;
2526        cqr->basedev = base;
2527        cqr->retries = startdev->default_retries;
2528        cqr->expires = startdev->default_expires * HZ;
2529        cqr->buildclk = get_tod_clock();
2530        cqr->status = DASD_CQR_FILLED;
2531        /* Set flags to suppress output for expected errors */
2532        set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2533        set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2534
2535        return cqr;
2536
2537out_err:
2538        dasd_sfree_request(cqr, startdev);
2539
2540        return ERR_PTR(rc);
2541}
2542
2543/*
2544 * Build the CCW request for the format check
2545 */
2546static struct dasd_ccw_req *
2547dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2548                      int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2549{
2550        struct dasd_eckd_private *start_priv;
2551        struct dasd_eckd_private *base_priv;
2552        struct dasd_device *startdev = NULL;
2553        struct dasd_ccw_req *cqr;
2554        struct ccw1 *ccw;
2555        void *data;
2556        int cplength, datasize;
2557        int use_prefix;
2558        int count;
2559        int i;
2560
2561        if (enable_pav)
2562                startdev = dasd_alias_get_start_dev(base);
2563
2564        if (!startdev)
2565                startdev = base;
2566
2567        start_priv = startdev->private;
2568        base_priv = base->private;
2569
2570        count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2571
2572        use_prefix = base_priv->features.feature[8] & 0x01;
2573
2574        if (use_prefix) {
2575                cplength = 1;
2576                datasize = sizeof(struct PFX_eckd_data);
2577        } else {
2578                cplength = 2;
2579                datasize = sizeof(struct DE_eckd_data) +
2580                        sizeof(struct LO_eckd_data);
2581        }
2582        cplength += count;
2583
2584        cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2585        if (IS_ERR(cqr))
2586                return cqr;
2587
2588        start_priv->count++;
2589        data = cqr->data;
2590        ccw = cqr->cpaddr;
2591
2592        if (use_prefix) {
2593                prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2594                           DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2595                           count, 0, 0);
2596        } else {
2597                define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2598                              DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2599
2600                data += sizeof(struct DE_eckd_data);
2601                ccw[-1].flags |= CCW_FLAG_CC;
2602
2603                locate_record(ccw++, data, fdata->start_unit, 0, count,
2604                              DASD_ECKD_CCW_READ_COUNT, base, 0);
2605        }
2606
2607        for (i = 0; i < count; i++) {
2608                ccw[-1].flags |= CCW_FLAG_CC;
2609                ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2610                ccw->flags = CCW_FLAG_SLI;
2611                ccw->count = 8;
2612                ccw->cda = (__u32)(addr_t) fmt_buffer;
2613                ccw++;
2614                fmt_buffer++;
2615        }
2616
2617        cqr->startdev = startdev;
2618        cqr->memdev = startdev;
2619        cqr->basedev = base;
2620        cqr->retries = DASD_RETRIES;
2621        cqr->expires = startdev->default_expires * HZ;
2622        cqr->buildclk = get_tod_clock();
2623        cqr->status = DASD_CQR_FILLED;
2624        /* Set flags to suppress output for expected errors */
2625        set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2626
2627        return cqr;
2628}
2629
2630static struct dasd_ccw_req *
2631dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2632                       struct format_data_t *fdata, int enable_pav)
2633{
2634        struct dasd_eckd_private *base_priv;
2635        struct dasd_eckd_private *start_priv;
2636        struct dasd_ccw_req *fcp;
2637        struct eckd_count *ect;
2638        struct ch_t address;
2639        struct ccw1 *ccw;
2640        void *data;
2641        int rpt;
2642        int cplength, datasize;
2643        int i, j;
2644        int intensity = 0;
2645        int r0_perm;
2646        int nr_tracks;
2647        int use_prefix;
2648
2649        if (enable_pav)
2650                startdev = dasd_alias_get_start_dev(base);
2651
2652        if (!startdev)
2653                startdev = base;
2654
2655        start_priv = startdev->private;
2656        base_priv = base->private;
2657
2658        rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2659
2660        nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2661
2662        /*
2663         * fdata->intensity is a bit string that tells us what to do:
2664         *   Bit 0: write record zero
2665         *   Bit 1: write home address, currently not supported
2666         *   Bit 2: invalidate tracks
2667         *   Bit 3: use OS/390 compatible disk layout (cdl)
2668         *   Bit 4: do not allow storage subsystem to modify record zero
2669         * Only some bit combinations do make sense.
2670         */
2671        if (fdata->intensity & 0x10) {
2672                r0_perm = 0;
2673                intensity = fdata->intensity & ~0x10;
2674        } else {
2675                r0_perm = 1;
2676                intensity = fdata->intensity;
2677        }
2678
2679        use_prefix = base_priv->features.feature[8] & 0x01;
2680
2681        switch (intensity) {
2682        case 0x00:      /* Normal format */
2683        case 0x08:      /* Normal format, use cdl. */
2684                cplength = 2 + (rpt*nr_tracks);
2685                if (use_prefix)
2686                        datasize = sizeof(struct PFX_eckd_data) +
2687                                sizeof(struct LO_eckd_data) +
2688                                rpt * nr_tracks * sizeof(struct eckd_count);
2689                else
2690                        datasize = sizeof(struct DE_eckd_data) +
2691                                sizeof(struct LO_eckd_data) +
2692                                rpt * nr_tracks * sizeof(struct eckd_count);
2693                break;
2694        case 0x01:      /* Write record zero and format track. */
2695        case 0x09:      /* Write record zero and format track, use cdl. */
2696                cplength = 2 + rpt * nr_tracks;
2697                if (use_prefix)
2698                        datasize = sizeof(struct PFX_eckd_data) +
2699                                sizeof(struct LO_eckd_data) +
2700                                sizeof(struct eckd_count) +
2701                                rpt * nr_tracks * sizeof(struct eckd_count);
2702                else
2703                        datasize = sizeof(struct DE_eckd_data) +
2704                                sizeof(struct LO_eckd_data) +
2705                                sizeof(struct eckd_count) +
2706                                rpt * nr_tracks * sizeof(struct eckd_count);
2707                break;
2708        case 0x04:      /* Invalidate track. */
2709        case 0x0c:      /* Invalidate track, use cdl. */
2710                cplength = 3;
2711                if (use_prefix)
2712                        datasize = sizeof(struct PFX_eckd_data) +
2713                                sizeof(struct LO_eckd_data) +
2714                                sizeof(struct eckd_count);
2715                else
2716                        datasize = sizeof(struct DE_eckd_data) +
2717                                sizeof(struct LO_eckd_data) +
2718                                sizeof(struct eckd_count);
2719                break;
2720        default:
2721                dev_warn(&startdev->cdev->dev,
2722                         "An I/O control call used incorrect flags 0x%x\n",
2723                         fdata->intensity);
2724                return ERR_PTR(-EINVAL);
2725        }
2726
2727        fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2728        if (IS_ERR(fcp))
2729                return fcp;
2730
2731        start_priv->count++;
2732        data = fcp->data;
2733        ccw = fcp->cpaddr;
2734
2735        switch (intensity & ~0x08) {
2736        case 0x00: /* Normal format. */
2737                if (use_prefix) {
2738                        prefix(ccw++, (struct PFX_eckd_data *) data,
2739                               fdata->start_unit, fdata->stop_unit,
2740                               DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2741                        /* grant subsystem permission to format R0 */
2742                        if (r0_perm)
2743                                ((struct PFX_eckd_data *)data)
2744                                        ->define_extent.ga_extended |= 0x04;
2745                        data += sizeof(struct PFX_eckd_data);
2746                } else {
2747                        define_extent(ccw++, (struct DE_eckd_data *) data,
2748                                      fdata->start_unit, fdata->stop_unit,
2749                                      DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2750                        /* grant subsystem permission to format R0 */
2751                        if (r0_perm)
2752                                ((struct DE_eckd_data *) data)
2753                                        ->ga_extended |= 0x04;
2754                        data += sizeof(struct DE_eckd_data);
2755                }
2756                ccw[-1].flags |= CCW_FLAG_CC;
2757                locate_record(ccw++, (struct LO_eckd_data *) data,
2758                              fdata->start_unit, 0, rpt*nr_tracks,
2759                              DASD_ECKD_CCW_WRITE_CKD, base,
2760                              fdata->blksize);
2761                data += sizeof(struct LO_eckd_data);
2762                break;
2763        case 0x01: /* Write record zero + format track. */
2764                if (use_prefix) {
2765                        prefix(ccw++, (struct PFX_eckd_data *) data,
2766                               fdata->start_unit, fdata->stop_unit,
2767                               DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2768                               base, startdev);
2769                        data += sizeof(struct PFX_eckd_data);
2770                } else {
2771                        define_extent(ccw++, (struct DE_eckd_data *) data,
2772                               fdata->start_unit, fdata->stop_unit,
2773                               DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2774                        data += sizeof(struct DE_eckd_data);
2775                }
2776                ccw[-1].flags |= CCW_FLAG_CC;
2777                locate_record(ccw++, (struct LO_eckd_data *) data,
2778                              fdata->start_unit, 0, rpt * nr_tracks + 1,
2779                              DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2780                              base->block->bp_block);
2781                data += sizeof(struct LO_eckd_data);
2782                break;
2783        case 0x04: /* Invalidate track. */
2784                if (use_prefix) {
2785                        prefix(ccw++, (struct PFX_eckd_data *) data,
2786                               fdata->start_unit, fdata->stop_unit,
2787                               DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2788                        data += sizeof(struct PFX_eckd_data);
2789                } else {
2790                        define_extent(ccw++, (struct DE_eckd_data *) data,
2791                               fdata->start_unit, fdata->stop_unit,
2792                               DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2793                        data += sizeof(struct DE_eckd_data);
2794                }
2795                ccw[-1].flags |= CCW_FLAG_CC;
2796                locate_record(ccw++, (struct LO_eckd_data *) data,
2797                              fdata->start_unit, 0, 1,
2798                              DASD_ECKD_CCW_WRITE_CKD, base, 8);
2799                data += sizeof(struct LO_eckd_data);
2800                break;
2801        }
2802
2803        for (j = 0; j < nr_tracks; j++) {
2804                /* calculate cylinder and head for the current track */
2805                set_ch_t(&address,
2806                         (fdata->start_unit + j) /
2807                         base_priv->rdc_data.trk_per_cyl,
2808                         (fdata->start_unit + j) %
2809                         base_priv->rdc_data.trk_per_cyl);
2810                if (intensity & 0x01) { /* write record zero */
2811                        ect = (struct eckd_count *) data;
2812                        data += sizeof(struct eckd_count);
2813                        ect->cyl = address.cyl;
2814                        ect->head = address.head;
2815                        ect->record = 0;
2816                        ect->kl = 0;
2817                        ect->dl = 8;
2818                        ccw[-1].flags |= CCW_FLAG_CC;
2819                        ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2820                        ccw->flags = CCW_FLAG_SLI;
2821                        ccw->count = 8;
2822                        ccw->cda = (__u32)(addr_t) ect;
2823                        ccw++;
2824                }
2825                if ((intensity & ~0x08) & 0x04) {       /* erase track */
2826                        ect = (struct eckd_count *) data;
2827                        data += sizeof(struct eckd_count);
2828                        ect->cyl = address.cyl;
2829                        ect->head = address.head;
2830                        ect->record = 1;
2831                        ect->kl = 0;
2832                        ect->dl = 0;
2833                        ccw[-1].flags |= CCW_FLAG_CC;
2834                        ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2835                        ccw->flags = CCW_FLAG_SLI;
2836                        ccw->count = 8;
2837                        ccw->cda = (__u32)(addr_t) ect;
2838                } else {                /* write remaining records */
2839                        for (i = 0; i < rpt; i++) {
2840                                ect = (struct eckd_count *) data;
2841                                data += sizeof(struct eckd_count);
2842                                ect->cyl = address.cyl;
2843                                ect->head = address.head;
2844                                ect->record = i + 1;
2845                                ect->kl = 0;
2846                                ect->dl = fdata->blksize;
2847                                /*
2848                                 * Check for special tracks 0-1
2849                                 * when formatting CDL
2850                                 */
2851                                if ((intensity & 0x08) &&
2852                                    address.cyl == 0 && address.head == 0) {
2853                                        if (i < 3) {
2854                                                ect->kl = 4;
2855                                                ect->dl = sizes_trk0[i] - 4;
2856                                        }
2857                                }
2858                                if ((intensity & 0x08) &&
2859                                    address.cyl == 0 && address.head == 1) {
2860                                        ect->kl = 44;
2861                                        ect->dl = LABEL_SIZE - 44;
2862                                }
2863                                ccw[-1].flags |= CCW_FLAG_CC;
2864                                if (i != 0 || j == 0)
2865                                        ccw->cmd_code =
2866                                                DASD_ECKD_CCW_WRITE_CKD;
2867                                else
2868                                        ccw->cmd_code =
2869                                                DASD_ECKD_CCW_WRITE_CKD_MT;
2870                                ccw->flags = CCW_FLAG_SLI;
2871                                ccw->count = 8;
2872                                ccw->cda = (__u32)(addr_t) ect;
2873                                ccw++;
2874                        }
2875                }
2876        }
2877
2878        fcp->startdev = startdev;
2879        fcp->memdev = startdev;
2880        fcp->basedev = base;
2881        fcp->retries = 256;
2882        fcp->expires = startdev->default_expires * HZ;
2883        fcp->buildclk = get_tod_clock();
2884        fcp->status = DASD_CQR_FILLED;
2885
2886        return fcp;
2887}
2888
2889/*
2890 * Wrapper function to build a CCW request depending on input data
2891 */
2892static struct dasd_ccw_req *
2893dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2894                               struct format_data_t *fdata, int enable_pav,
2895                               int tpm, struct eckd_count *fmt_buffer, int rpt)
2896{
2897        struct dasd_ccw_req *ccw_req;
2898
2899        if (!fmt_buffer) {
2900                ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2901        } else {
2902                if (tpm)
2903                        ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2904                                                            enable_pav,
2905                                                            fmt_buffer, rpt);
2906                else
2907                        ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2908                                                        fmt_buffer, rpt);
2909        }
2910
2911        return ccw_req;
2912}
2913
2914/*
2915 * Sanity checks on format_data
2916 */
2917static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2918                                          struct format_data_t *fdata)
2919{
2920        struct dasd_eckd_private *private = base->private;
2921
2922        if (fdata->start_unit >=
2923            (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2924                dev_warn(&base->cdev->dev,
2925                         "Start track number %u used in formatting is too big\n",
2926                         fdata->start_unit);
2927                return -EINVAL;
2928        }
2929        if (fdata->stop_unit >=
2930            (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2931                dev_warn(&base->cdev->dev,
2932                         "Stop track number %u used in formatting is too big\n",
2933                         fdata->stop_unit);
2934                return -EINVAL;
2935        }
2936        if (fdata->start_unit > fdata->stop_unit) {
2937                dev_warn(&base->cdev->dev,
2938                         "Start track %u used in formatting exceeds end track\n",
2939                         fdata->start_unit);
2940                return -EINVAL;
2941        }
2942        if (dasd_check_blocksize(fdata->blksize) != 0) {
2943                dev_warn(&base->cdev->dev,
2944                         "The DASD cannot be formatted with block size %u\n",
2945                         fdata->blksize);
2946                return -EINVAL;
2947        }
2948        return 0;
2949}
2950
2951/*
2952 * This function will process format_data originally coming from an IOCTL
2953 */
2954static int dasd_eckd_format_process_data(struct dasd_device *base,
2955                                         struct format_data_t *fdata,
2956                                         int enable_pav, int tpm,
2957                                         struct eckd_count *fmt_buffer, int rpt,
2958                                         struct irb *irb)
2959{
2960        struct dasd_eckd_private *private = base->private;
2961        struct dasd_ccw_req *cqr, *n;
2962        struct list_head format_queue;
2963        struct dasd_device *device;
2964        char *sense = NULL;
2965        int old_start, old_stop, format_step;
2966        int step, retry;
2967        int rc;
2968
2969        rc = dasd_eckd_format_sanity_checks(base, fdata);
2970        if (rc)
2971                return rc;
2972
2973        INIT_LIST_HEAD(&format_queue);
2974
2975        old_start = fdata->start_unit;
2976        old_stop = fdata->stop_unit;
2977
2978        if (!tpm && fmt_buffer != NULL) {
2979                /* Command Mode / Format Check */
2980                format_step = 1;
2981        } else if (tpm && fmt_buffer != NULL) {
2982                /* Transport Mode / Format Check */
2983                format_step = DASD_CQR_MAX_CCW / rpt;
2984        } else {
2985                /* Normal Formatting */
2986                format_step = DASD_CQR_MAX_CCW /
2987                        recs_per_track(&private->rdc_data, 0, fdata->blksize);
2988        }
2989
2990        do {
2991                retry = 0;
2992                while (fdata->start_unit <= old_stop) {
2993                        step = fdata->stop_unit - fdata->start_unit + 1;
2994                        if (step > format_step) {
2995                                fdata->stop_unit =
2996                                        fdata->start_unit + format_step - 1;
2997                        }
2998
2999                        cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3000                                                             enable_pav, tpm,
3001                                                             fmt_buffer, rpt);
3002                        if (IS_ERR(cqr)) {
3003                                rc = PTR_ERR(cqr);
3004                                if (rc == -ENOMEM) {
3005                                        if (list_empty(&format_queue))
3006                                                goto out;
3007                                        /*
3008                                         * not enough memory available, start
3009                                         * requests retry after first requests
3010                                         * were finished
3011                                         */
3012                                        retry = 1;
3013                                        break;
3014                                }
3015                                goto out_err;
3016                        }
3017                        list_add_tail(&cqr->blocklist, &format_queue);
3018
3019                        if (fmt_buffer) {
3020                                step = fdata->stop_unit - fdata->start_unit + 1;
3021                                fmt_buffer += rpt * step;
3022                        }
3023                        fdata->start_unit = fdata->stop_unit + 1;
3024                        fdata->stop_unit = old_stop;
3025                }
3026
3027                rc = dasd_sleep_on_queue(&format_queue);
3028
3029out_err:
3030                list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3031                        device = cqr->startdev;
3032                        private = device->private;
3033
3034                        if (cqr->status == DASD_CQR_FAILED) {
3035                                /*
3036                                 * Only get sense data if called by format
3037                                 * check
3038                                 */
3039                                if (fmt_buffer && irb) {
3040                                        sense = dasd_get_sense(&cqr->irb);
3041                                        memcpy(irb, &cqr->irb, sizeof(*irb));
3042                                }
3043                                rc = -EIO;
3044                        }
3045                        list_del_init(&cqr->blocklist);
3046                        dasd_ffree_request(cqr, device);
3047                        private->count--;
3048                }
3049
3050                if (rc && rc != -EIO)
3051                        goto out;
3052                if (rc == -EIO) {
3053                        /*
3054                         * In case fewer than the expected records are on the
3055                         * track, we will most likely get a 'No Record Found'
3056                         * error (in command mode) or a 'File Protected' error
3057                         * (in transport mode). Those particular cases shouldn't
3058                         * pass the -EIO to the IOCTL, therefore reset the rc
3059                         * and continue.
3060                         */
3061                        if (sense &&
3062                            (sense[1] & SNS1_NO_REC_FOUND ||
3063                             sense[1] & SNS1_FILE_PROTECTED))
3064                                retry = 1;
3065                        else
3066                                goto out;
3067                }
3068
3069        } while (retry);
3070
3071out:
3072        fdata->start_unit = old_start;
3073        fdata->stop_unit = old_stop;
3074
3075        return rc;
3076}
3077
3078static int dasd_eckd_format_device(struct dasd_device *base,
3079                                   struct format_data_t *fdata, int enable_pav)
3080{
3081        return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3082                                             0, NULL);
3083}
3084
3085static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3086                                      struct dasd_block *block)
3087{
3088        struct dasd_format_entry *format;
3089        unsigned long flags;
3090        bool rc = false;
3091
3092        spin_lock_irqsave(&block->format_lock, flags);
3093        list_for_each_entry(format, &block->format_list, list) {
3094                if (format->track == to_format->track) {
3095                        rc = true;
3096                        goto out;
3097                }
3098        }
3099        list_add_tail(&to_format->list, &block->format_list);
3100
3101out:
3102        spin_unlock_irqrestore(&block->format_lock, flags);
3103        return rc;
3104}
3105
3106static void clear_format_track(struct dasd_format_entry *format,
3107                              struct dasd_block *block)
3108{
3109        unsigned long flags;
3110
3111        spin_lock_irqsave(&block->format_lock, flags);
3112        list_del_init(&format->list);
3113        spin_unlock_irqrestore(&block->format_lock, flags);
3114}
3115
3116/*
3117 * Callback function to free ESE format requests.
3118 */
3119static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3120{
3121        struct dasd_device *device = cqr->startdev;
3122        struct dasd_eckd_private *private = device->private;
3123        struct dasd_format_entry *format = data;
3124
3125        clear_format_track(format, cqr->basedev->block);
3126        private->count--;
3127        dasd_ffree_request(cqr, device);
3128}
3129
3130static struct dasd_ccw_req *
3131dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3132                     struct irb *irb)
3133{
3134        struct dasd_eckd_private *private;
3135        struct dasd_format_entry *format;
3136        struct format_data_t fdata;
3137        unsigned int recs_per_trk;
3138        struct dasd_ccw_req *fcqr;
3139        struct dasd_device *base;
3140        struct dasd_block *block;
3141        unsigned int blksize;
3142        struct request *req;
3143        sector_t first_trk;
3144        sector_t last_trk;
3145        sector_t curr_trk;
3146        int rc;
3147
3148        req = cqr->callback_data;
3149        block = cqr->block;
3150        base = block->base;
3151        private = base->private;
3152        blksize = block->bp_block;
3153        recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3154        format = &startdev->format_entry;
3155
3156        first_trk = blk_rq_pos(req) >> block->s2b_shift;
3157        sector_div(first_trk, recs_per_trk);
3158        last_trk =
3159                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3160        sector_div(last_trk, recs_per_trk);
3161        rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3162        if (rc)
3163                return ERR_PTR(rc);
3164
3165        if (curr_trk < first_trk || curr_trk > last_trk) {
3166                DBF_DEV_EVENT(DBF_WARNING, startdev,
3167                              "ESE error track %llu not within range %llu - %llu\n",
3168                              curr_trk, first_trk, last_trk);
3169                return ERR_PTR(-EINVAL);
3170        }
3171        format->track = curr_trk;
3172        /* test if track is already in formatting by another thread */
3173        if (test_and_set_format_track(format, block))
3174                return ERR_PTR(-EEXIST);
3175
3176        fdata.start_unit = curr_trk;
3177        fdata.stop_unit = curr_trk;
3178        fdata.blksize = blksize;
3179        fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3180
3181        rc = dasd_eckd_format_sanity_checks(base, &fdata);
3182        if (rc)
3183                return ERR_PTR(-EINVAL);
3184
3185        /*
3186         * We're building the request with PAV disabled as we're reusing
3187         * the former startdev.
3188         */
3189        fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3190        if (IS_ERR(fcqr))
3191                return fcqr;
3192
3193        fcqr->callback = dasd_eckd_ese_format_cb;
3194        fcqr->callback_data = (void *) format;
3195
3196        return fcqr;
3197}
3198
3199/*
3200 * When data is read from an unformatted area of an ESE volume, this function
3201 * returns zeroed data and thereby mimics a read of zero data.
3202 *
3203 * The first unformatted track is the one that got the NRF error, the address is
3204 * encoded in the sense data.
3205 *
3206 * All tracks before have returned valid data and should not be touched.
3207 * All tracks after the unformatted track might be formatted or not. This is
3208 * currently not known, remember the processed data and return the remainder of
3209 * the request to the blocklayer in __dasd_cleanup_cqr().
3210 */
3211static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3212{
3213        struct dasd_eckd_private *private;
3214        sector_t first_trk, last_trk;
3215        sector_t first_blk, last_blk;
3216        unsigned int blksize, off;
3217        unsigned int recs_per_trk;
3218        struct dasd_device *base;
3219        struct req_iterator iter;
3220        struct dasd_block *block;
3221        unsigned int skip_block;
3222        unsigned int blk_count;
3223        struct request *req;
3224        struct bio_vec bv;
3225        sector_t curr_trk;
3226        sector_t end_blk;
3227        char *dst;
3228        int rc;
3229
3230        req = (struct request *) cqr->callback_data;
3231        base = cqr->block->base;
3232        blksize = base->block->bp_block;
3233        block =  cqr->block;
3234        private = base->private;
3235        skip_block = 0;
3236        blk_count = 0;
3237
3238        recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3239        first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3240        sector_div(first_trk, recs_per_trk);
3241        last_trk = last_blk =
3242                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3243        sector_div(last_trk, recs_per_trk);
3244        rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3245        if (rc)
3246                return rc;
3247
3248        /* sanity check if the current track from sense data is valid */
3249        if (curr_trk < first_trk || curr_trk > last_trk) {
3250                DBF_DEV_EVENT(DBF_WARNING, base,
3251                              "ESE error track %llu not within range %llu - %llu\n",
3252                              curr_trk, first_trk, last_trk);
3253                return -EINVAL;
3254        }
3255
3256        /*
3257         * if not the first track got the NRF error we have to skip over valid
3258         * blocks
3259         */
3260        if (curr_trk != first_trk)
3261                skip_block = curr_trk * recs_per_trk - first_blk;
3262
3263        /* we have no information beyond the current track */
3264        end_blk = (curr_trk + 1) * recs_per_trk;
3265
3266        rq_for_each_segment(bv, req, iter) {
3267                dst = page_address(bv.bv_page) + bv.bv_offset;
3268                for (off = 0; off < bv.bv_len; off += blksize) {
3269                        if (first_blk + blk_count >= end_blk) {
3270                                cqr->proc_bytes = blk_count * blksize;
3271                                return 0;
3272                        }
3273                        if (dst && !skip_block) {
3274                                dst += off;
3275                                memset(dst, 0, blksize);
3276                        } else {
3277                                skip_block--;
3278                        }
3279                        blk_count++;
3280                }
3281        }
3282        return 0;
3283}
3284
3285/*
3286 * Helper function to count consecutive records of a single track.
3287 */
3288static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3289                                   int max)
3290{
3291        int head;
3292        int i;
3293
3294        head = fmt_buffer[start].head;
3295
3296        /*
3297         * There are 3 conditions where we stop counting:
3298         * - if data reoccurs (same head and record may reoccur), which may
3299         *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3300         * - when the head changes, because we're iterating over several tracks
3301         *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3302         * - when we've reached the end of sensible data in the buffer (the
3303         *   record will be 0 then)
3304         */
3305        for (i = start; i < max; i++) {
3306                if (i > start) {
3307                        if ((fmt_buffer[i].head == head &&
3308                            fmt_buffer[i].record == 1) ||
3309                            fmt_buffer[i].head != head ||
3310                            fmt_buffer[i].record == 0)
3311                                break;
3312                }
3313        }
3314
3315        return i - start;
3316}
3317
3318/*
3319 * Evaluate a given range of tracks. Data like number of records, blocksize,
3320 * record ids, and key length are compared with expected data.
3321 *
3322 * If a mismatch occurs, the corresponding error bit is set, as well as
3323 * additional information, depending on the error.
3324 */
3325static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3326                                             struct format_check_t *cdata,
3327                                             int rpt_max, int rpt_exp,
3328                                             int trk_per_cyl, int tpm)
3329{
3330        struct ch_t geo;
3331        int max_entries;
3332        int count = 0;
3333        int trkcount;
3334        int blksize;
3335        int pos = 0;
3336        int i, j;
3337        int kl;
3338
3339        trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3340        max_entries = trkcount * rpt_max;
3341
3342        for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3343                /* Calculate the correct next starting position in the buffer */
3344                if (tpm) {
3345                        while (fmt_buffer[pos].record == 0 &&
3346                               fmt_buffer[pos].dl == 0) {
3347                                if (pos++ > max_entries)
3348                                        break;
3349                        }
3350                } else {
3351                        if (i != cdata->expect.start_unit)
3352                                pos += rpt_max - count;
3353                }
3354
3355                /* Calculate the expected geo values for the current track */
3356                set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3357
3358                /* Count and check number of records */
3359                count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3360
3361                if (count < rpt_exp) {
3362                        cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3363                        break;
3364                }
3365                if (count > rpt_exp) {
3366                        cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3367                        break;
3368                }
3369
3370                for (j = 0; j < count; j++, pos++) {
3371                        blksize = cdata->expect.blksize;
3372                        kl = 0;
3373
3374                        /*
3375                         * Set special values when checking CDL formatted
3376                         * devices.
3377                         */
3378                        if ((cdata->expect.intensity & 0x08) &&
3379                            geo.cyl == 0 && geo.head == 0) {
3380                                if (j < 3) {
3381                                        blksize = sizes_trk0[j] - 4;
3382                                        kl = 4;
3383                                }
3384                        }
3385                        if ((cdata->expect.intensity & 0x08) &&
3386                            geo.cyl == 0 && geo.head == 1) {
3387                                blksize = LABEL_SIZE - 44;
3388                                kl = 44;
3389                        }
3390
3391                        /* Check blocksize */
3392                        if (fmt_buffer[pos].dl != blksize) {
3393                                cdata->result = DASD_FMT_ERR_BLKSIZE;
3394                                goto out;
3395                        }
3396                        /* Check if key length is 0 */
3397                        if (fmt_buffer[pos].kl != kl) {
3398                                cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3399                                goto out;
3400                        }
3401                        /* Check if record_id is correct */
3402                        if (fmt_buffer[pos].cyl != geo.cyl ||
3403                            fmt_buffer[pos].head != geo.head ||
3404                            fmt_buffer[pos].record != (j + 1)) {
3405                                cdata->result = DASD_FMT_ERR_RECORD_ID;
3406                                goto out;
3407                        }
3408                }
3409        }
3410
3411out:
3412        /*
3413         * In case of no errors, we need to decrease by one
3414         * to get the correct positions.
3415         */
3416        if (!cdata->result) {
3417                i--;
3418                pos--;
3419        }
3420
3421        cdata->unit = i;
3422        cdata->num_records = count;
3423        cdata->rec = fmt_buffer[pos].record;
3424        cdata->blksize = fmt_buffer[pos].dl;
3425        cdata->key_length = fmt_buffer[pos].kl;
3426}
3427
3428/*
3429 * Check the format of a range of tracks of a DASD.
3430 */
3431static int dasd_eckd_check_device_format(struct dasd_device *base,
3432                                         struct format_check_t *cdata,
3433                                         int enable_pav)
3434{
3435        struct dasd_eckd_private *private = base->private;
3436        struct eckd_count *fmt_buffer;
3437        struct irb irb;
3438        int rpt_max, rpt_exp;
3439        int fmt_buffer_size;
3440        int trk_per_cyl;
3441        int trkcount;
3442        int tpm = 0;
3443        int rc;
3444
3445        trk_per_cyl = private->rdc_data.trk_per_cyl;
3446
3447        /* Get maximum and expected amount of records per track */
3448        rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3449        rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3450
3451        trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3452        fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3453
3454        fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3455        if (!fmt_buffer)
3456                return -ENOMEM;
3457
3458        /*
3459         * A certain FICON feature subset is needed to operate in transport
3460         * mode. Additionally, the support for transport mode is implicitly
3461         * checked by comparing the buffer size with fcx_max_data. As long as
3462         * the buffer size is smaller we can operate in transport mode and
3463         * process multiple tracks. If not, only one track at once is being
3464         * processed using command mode.
3465         */
3466        if ((private->features.feature[40] & 0x04) &&
3467            fmt_buffer_size <= private->fcx_max_data)
3468                tpm = 1;
3469
3470        rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3471                                           tpm, fmt_buffer, rpt_max, &irb);
3472        if (rc && rc != -EIO)
3473                goto out;
3474        if (rc == -EIO) {
3475                /*
3476                 * If our first attempt with transport mode enabled comes back
3477                 * with an incorrect length error, we're going to retry the
3478                 * check with command mode.
3479                 */
3480                if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3481                        tpm = 0;
3482                        rc = dasd_eckd_format_process_data(base, &cdata->expect,
3483                                                           enable_pav, tpm,
3484                                                           fmt_buffer, rpt_max,
3485                                                           &irb);
3486                        if (rc)
3487                                goto out;
3488                } else {
3489                        goto out;
3490                }
3491        }
3492
3493        dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3494                                         trk_per_cyl, tpm);
3495
3496out:
3497        kfree(fmt_buffer);
3498
3499        return rc;
3500}
3501
3502static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3503{
3504        if (cqr->retries < 0) {
3505                cqr->status = DASD_CQR_FAILED;
3506                return;
3507        }
3508        cqr->status = DASD_CQR_FILLED;
3509        if (cqr->block && (cqr->startdev != cqr->block->base)) {
3510                dasd_eckd_reset_ccw_to_base_io(cqr);
3511                cqr->startdev = cqr->block->base;
3512                cqr->lpm = dasd_path_get_opm(cqr->block->base);
3513        }
3514};
3515
3516static dasd_erp_fn_t
3517dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3518{
3519        struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3520        struct ccw_device *cdev = device->cdev;
3521
3522        switch (cdev->id.cu_type) {
3523        case 0x3990:
3524        case 0x2105:
3525        case 0x2107:
3526        case 0x1750:
3527                return dasd_3990_erp_action;
3528        case 0x9343:
3529        case 0x3880:
3530        default:
3531                return dasd_default_erp_action;
3532        }
3533}
3534
3535static dasd_erp_fn_t
3536dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3537{
3538        return dasd_default_erp_postaction;
3539}
3540
3541static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3542                                              struct dasd_ccw_req *cqr,
3543                                              struct irb *irb)
3544{
3545        char mask;
3546        char *sense = NULL;
3547        struct dasd_eckd_private *private = device->private;
3548
3549        /* first of all check for state change pending interrupt */
3550        mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3551        if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3552                /*
3553                 * for alias only, not in offline processing
3554                 * and only if not suspended
3555                 */
3556                if (!device->block && private->lcu &&
3557                    device->state == DASD_STATE_ONLINE &&
3558                    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3559                    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3560                        /* schedule worker to reload device */
3561                        dasd_reload_device(device);
3562                }
3563                dasd_generic_handle_state_change(device);
3564                return;
3565        }
3566
3567        sense = dasd_get_sense(irb);
3568        if (!sense)
3569                return;
3570
3571        /* summary unit check */
3572        if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3573            (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3574                if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3575                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3576                                      "eckd suc: device already notified");
3577                        return;
3578                }
3579                sense = dasd_get_sense(irb);
3580                if (!sense) {
3581                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3582                                      "eckd suc: no reason code available");
3583                        clear_bit(DASD_FLAG_SUC, &device->flags);
3584                        return;
3585
3586                }
3587                private->suc_reason = sense[8];
3588                DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3589                              "eckd handle summary unit check: reason",
3590                              private->suc_reason);
3591                dasd_get_device(device);
3592                if (!schedule_work(&device->suc_work))
3593                        dasd_put_device(device);
3594
3595                return;
3596        }
3597
3598        /* service information message SIM */
3599        if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3600            ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3601                dasd_3990_erp_handle_sim(device, sense);
3602                return;
3603        }
3604
3605        /* loss of device reservation is handled via base devices only
3606         * as alias devices may be used with several bases
3607         */
3608        if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3609            (sense[7] == 0x3F) &&
3610            (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3611            test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3612                if (device->features & DASD_FEATURE_FAILONSLCK)
3613                        set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3614                clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3615                dev_err(&device->cdev->dev,
3616                        "The device reservation was lost\n");
3617        }
3618}
3619
3620static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3621                                       unsigned int first_trk,
3622                                       unsigned int last_trk)
3623{
3624        struct dasd_eckd_private *private = device->private;
3625        unsigned int trks_per_vol;
3626        int rc = 0;
3627
3628        trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3629
3630        if (first_trk >= trks_per_vol) {
3631                dev_warn(&device->cdev->dev,
3632                         "Start track number %u used in the space release command is too big\n",
3633                         first_trk);
3634                rc = -EINVAL;
3635        } else if (last_trk >= trks_per_vol) {
3636                dev_warn(&device->cdev->dev,
3637                         "Stop track number %u used in the space release command is too big\n",
3638                         last_trk);
3639                rc = -EINVAL;
3640        } else if (first_trk > last_trk) {
3641                dev_warn(&device->cdev->dev,
3642                         "Start track %u used in the space release command exceeds the end track\n",
3643                         first_trk);
3644                rc = -EINVAL;
3645        }
3646        return rc;
3647}
3648
3649/*
3650 * Helper function to count the amount of involved extents within a given range
3651 * with extent alignment in mind.
3652 */
3653static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3654{
3655        int cur_pos = 0;
3656        int count = 0;
3657        int tmp;
3658
3659        if (from == to)
3660                return 1;
3661
3662        /* Count first partial extent */
3663        if (from % trks_per_ext != 0) {
3664                tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3665                if (tmp > to)
3666                        tmp = to;
3667                cur_pos = tmp - from + 1;
3668                count++;
3669        }
3670        /* Count full extents */
3671        if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3672                tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3673                count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3674                cur_pos = tmp;
3675        }
3676        /* Count last partial extent */
3677        if (cur_pos < to)
3678                count++;
3679
3680        return count;
3681}
3682
3683/*
3684 * Release allocated space for a given range or an entire volume.
3685 */
3686static struct dasd_ccw_req *
3687dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3688                  struct request *req, unsigned int first_trk,
3689                  unsigned int last_trk, int by_extent)
3690{
3691        struct dasd_eckd_private *private = device->private;
3692        struct dasd_dso_ras_ext_range *ras_range;
3693        struct dasd_rssd_features *features;
3694        struct dasd_dso_ras_data *ras_data;
3695        u16 heads, beg_head, end_head;
3696        int cur_to_trk, cur_from_trk;
3697        struct dasd_ccw_req *cqr;
3698        u32 beg_cyl, end_cyl;
3699        struct ccw1 *ccw;
3700        int trks_per_ext;
3701        size_t ras_size;
3702        size_t size;
3703        int nr_exts;
3704        void *rq;
3705        int i;
3706
3707        if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3708                return ERR_PTR(-EINVAL);
3709
3710        rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3711
3712        features = &private->features;
3713
3714        trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3715        nr_exts = 0;
3716        if (by_extent)
3717                nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3718        ras_size = sizeof(*ras_data);
3719        size = ras_size + (nr_exts * sizeof(*ras_range));
3720
3721        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3722        if (IS_ERR(cqr)) {
3723                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3724                                "Could not allocate RAS request");
3725                return cqr;
3726        }
3727
3728        ras_data = cqr->data;
3729        memset(ras_data, 0, size);
3730
3731        ras_data->order = DSO_ORDER_RAS;
3732        ras_data->flags.vol_type = 0; /* CKD volume */
3733        /* Release specified extents or entire volume */
3734        ras_data->op_flags.by_extent = by_extent;
3735        /*
3736         * This bit guarantees initialisation of tracks within an extent that is
3737         * not fully specified, but is only supported with a certain feature
3738         * subset.
3739         */
3740        ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3741        ras_data->lss = private->ned->ID;
3742        ras_data->dev_addr = private->ned->unit_addr;
3743        ras_data->nr_exts = nr_exts;
3744
3745        if (by_extent) {
3746                heads = private->rdc_data.trk_per_cyl;
3747                cur_from_trk = first_trk;
3748                cur_to_trk = first_trk + trks_per_ext -
3749                        (first_trk % trks_per_ext) - 1;
3750                if (cur_to_trk > last_trk)
3751                        cur_to_trk = last_trk;
3752                ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3753
3754                for (i = 0; i < nr_exts; i++) {
3755                        beg_cyl = cur_from_trk / heads;
3756                        beg_head = cur_from_trk % heads;
3757                        end_cyl = cur_to_trk / heads;
3758                        end_head = cur_to_trk % heads;
3759
3760                        set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3761                        set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3762
3763                        cur_from_trk = cur_to_trk + 1;
3764                        cur_to_trk = cur_from_trk + trks_per_ext - 1;
3765                        if (cur_to_trk > last_trk)
3766                                cur_to_trk = last_trk;
3767                        ras_range++;
3768                }
3769        }
3770
3771        ccw = cqr->cpaddr;
3772        ccw->cda = (__u32)(addr_t)cqr->data;
3773        ccw->cmd_code = DASD_ECKD_CCW_DSO;
3774        ccw->count = size;
3775
3776        cqr->startdev = device;
3777        cqr->memdev = device;
3778        cqr->block = block;
3779        cqr->retries = 256;
3780        cqr->expires = device->default_expires * HZ;
3781        cqr->buildclk = get_tod_clock();
3782        cqr->status = DASD_CQR_FILLED;
3783
3784        return cqr;
3785}
3786
3787static int dasd_eckd_release_space_full(struct dasd_device *device)
3788{
3789        struct dasd_ccw_req *cqr;
3790        int rc;
3791
3792        cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3793        if (IS_ERR(cqr))
3794                return PTR_ERR(cqr);
3795
3796        rc = dasd_sleep_on_interruptible(cqr);
3797
3798        dasd_sfree_request(cqr, cqr->memdev);
3799
3800        return rc;
3801}
3802
3803static int dasd_eckd_release_space_trks(struct dasd_device *device,
3804                                        unsigned int from, unsigned int to)
3805{
3806        struct dasd_eckd_private *private = device->private;
3807        struct dasd_block *block = device->block;
3808        struct dasd_ccw_req *cqr, *n;
3809        struct list_head ras_queue;
3810        unsigned int device_exts;
3811        int trks_per_ext;
3812        int stop, step;
3813        int cur_pos;
3814        int rc = 0;
3815        int retry;
3816
3817        INIT_LIST_HEAD(&ras_queue);
3818
3819        device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3820        trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3821
3822        /* Make sure device limits are not exceeded */
3823        step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3824        cur_pos = from;
3825
3826        do {
3827                retry = 0;
3828                while (cur_pos < to) {
3829                        stop = cur_pos + step -
3830                                ((cur_pos + step) % trks_per_ext) - 1;
3831                        if (stop > to)
3832                                stop = to;
3833
3834                        cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3835                        if (IS_ERR(cqr)) {
3836                                rc = PTR_ERR(cqr);
3837                                if (rc == -ENOMEM) {
3838                                        if (list_empty(&ras_queue))
3839                                                goto out;
3840                                        retry = 1;
3841                                        break;
3842                                }
3843                                goto err_out;
3844                        }
3845
3846                        spin_lock_irq(&block->queue_lock);
3847                        list_add_tail(&cqr->blocklist, &ras_queue);
3848                        spin_unlock_irq(&block->queue_lock);
3849                        cur_pos = stop + 1;
3850                }
3851
3852                rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3853
3854err_out:
3855                list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3856                        device = cqr->startdev;
3857                        private = device->private;
3858
3859                        spin_lock_irq(&block->queue_lock);
3860                        list_del_init(&cqr->blocklist);
3861                        spin_unlock_irq(&block->queue_lock);
3862                        dasd_sfree_request(cqr, device);
3863                        private->count--;
3864                }
3865        } while (retry);
3866
3867out:
3868        return rc;
3869}
3870
3871static int dasd_eckd_release_space(struct dasd_device *device,
3872                                   struct format_data_t *rdata)
3873{
3874        if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3875                return dasd_eckd_release_space_full(device);
3876        else if (rdata->intensity == 0)
3877                return dasd_eckd_release_space_trks(device, rdata->start_unit,
3878                                                    rdata->stop_unit);
3879        else
3880                return -EINVAL;
3881}
3882
3883static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3884                                               struct dasd_device *startdev,
3885                                               struct dasd_block *block,
3886                                               struct request *req,
3887                                               sector_t first_rec,
3888                                               sector_t last_rec,
3889                                               sector_t first_trk,
3890                                               sector_t last_trk,
3891                                               unsigned int first_offs,
3892                                               unsigned int last_offs,
3893                                               unsigned int blk_per_trk,
3894                                               unsigned int blksize)
3895{
3896        struct dasd_eckd_private *private;
3897        unsigned long *idaws;
3898        struct LO_eckd_data *LO_data;
3899        struct dasd_ccw_req *cqr;
3900        struct ccw1 *ccw;
3901        struct req_iterator iter;
3902        struct bio_vec bv;
3903        char *dst;
3904        unsigned int off;
3905        int count, cidaw, cplength, datasize;
3906        sector_t recid;
3907        unsigned char cmd, rcmd;
3908        int use_prefix;
3909        struct dasd_device *basedev;
3910
3911        basedev = block->base;
3912        private = basedev->private;
3913        if (rq_data_dir(req) == READ)
3914                cmd = DASD_ECKD_CCW_READ_MT;
3915        else if (rq_data_dir(req) == WRITE)
3916                cmd = DASD_ECKD_CCW_WRITE_MT;
3917        else
3918                return ERR_PTR(-EINVAL);
3919
3920        /* Check struct bio and count the number of blocks for the request. */
3921        count = 0;
3922        cidaw = 0;
3923        rq_for_each_segment(bv, req, iter) {
3924                if (bv.bv_len & (blksize - 1))
3925                        /* Eckd can only do full blocks. */
3926                        return ERR_PTR(-EINVAL);
3927                count += bv.bv_len >> (block->s2b_shift + 9);
3928                if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3929                        cidaw += bv.bv_len >> (block->s2b_shift + 9);
3930        }
3931        /* Paranoia. */
3932        if (count != last_rec - first_rec + 1)
3933                return ERR_PTR(-EINVAL);
3934
3935        /* use the prefix command if available */
3936        use_prefix = private->features.feature[8] & 0x01;
3937        if (use_prefix) {
3938                /* 1x prefix + number of blocks */
3939                cplength = 2 + count;
3940                /* 1x prefix + cidaws*sizeof(long) */
3941                datasize = sizeof(struct PFX_eckd_data) +
3942                        sizeof(struct LO_eckd_data) +
3943                        cidaw * sizeof(unsigned long);
3944        } else {
3945                /* 1x define extent + 1x locate record + number of blocks */
3946                cplength = 2 + count;
3947                /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3948                datasize = sizeof(struct DE_eckd_data) +
3949                        sizeof(struct LO_eckd_data) +
3950                        cidaw * sizeof(unsigned long);
3951        }
3952        /* Find out the number of additional locate record ccws for cdl. */
3953        if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3954                if (last_rec >= 2*blk_per_trk)
3955                        count = 2*blk_per_trk - first_rec;
3956                cplength += count;
3957                datasize += count*sizeof(struct LO_eckd_data);
3958        }
3959        /* Allocate the ccw request. */
3960        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3961                                   startdev, blk_mq_rq_to_pdu(req));
3962        if (IS_ERR(cqr))
3963                return cqr;
3964        ccw = cqr->cpaddr;
3965        /* First ccw is define extent or prefix. */
3966        if (use_prefix) {
3967                if (prefix(ccw++, cqr->data, first_trk,
3968                           last_trk, cmd, basedev, startdev) == -EAGAIN) {
3969                        /* Clock not in sync and XRC is enabled.
3970                         * Try again later.
3971                         */
3972                        dasd_sfree_request(cqr, startdev);
3973                        return ERR_PTR(-EAGAIN);
3974                }
3975                idaws = (unsigned long *) (cqr->data +
3976                                           sizeof(struct PFX_eckd_data));
3977        } else {
3978                if (define_extent(ccw++, cqr->data, first_trk,
3979                                  last_trk, cmd, basedev, 0) == -EAGAIN) {
3980                        /* Clock not in sync and XRC is enabled.
3981                         * Try again later.
3982                         */
3983                        dasd_sfree_request(cqr, startdev);
3984                        return ERR_PTR(-EAGAIN);
3985                }
3986                idaws = (unsigned long *) (cqr->data +
3987                                           sizeof(struct DE_eckd_data));
3988        }
3989        /* Build locate_record+read/write/ccws. */
3990        LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3991        recid = first_rec;
3992        if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3993                /* Only standard blocks so there is just one locate record. */
3994                ccw[-1].flags |= CCW_FLAG_CC;
3995                locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3996                              last_rec - recid + 1, cmd, basedev, blksize);
3997        }
3998        rq_for_each_segment(bv, req, iter) {
3999                dst = page_address(bv.bv_page) + bv.bv_offset;
4000                if (dasd_page_cache) {
4001                        char *copy = kmem_cache_alloc(dasd_page_cache,
4002                                                      GFP_DMA | __GFP_NOWARN);
4003                        if (copy && rq_data_dir(req) == WRITE)
4004                                memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4005                        if (copy)
4006                                dst = copy + bv.bv_offset;
4007                }
4008                for (off = 0; off < bv.bv_len; off += blksize) {
4009                        sector_t trkid = recid;
4010                        unsigned int recoffs = sector_div(trkid, blk_per_trk);
4011                        rcmd = cmd;
4012                        count = blksize;
4013                        /* Locate record for cdl special block ? */
4014                        if (private->uses_cdl && recid < 2*blk_per_trk) {
4015                                if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4016                                        rcmd |= 0x8;
4017                                        count = dasd_eckd_cdl_reclen(recid);
4018                                        if (count < blksize &&
4019                                            rq_data_dir(req) == READ)
4020                                                memset(dst + count, 0xe5,
4021                                                       blksize - count);
4022                                }
4023                                ccw[-1].flags |= CCW_FLAG_CC;
4024                                locate_record(ccw++, LO_data++,
4025                                              trkid, recoffs + 1,
4026                                              1, rcmd, basedev, count);
4027                        }
4028                        /* Locate record for standard blocks ? */
4029                        if (private->uses_cdl && recid == 2*blk_per_trk) {
4030                                ccw[-1].flags |= CCW_FLAG_CC;
4031                                locate_record(ccw++, LO_data++,
4032                                              trkid, recoffs + 1,
4033                                              last_rec - recid + 1,
4034                                              cmd, basedev, count);
4035                        }
4036                        /* Read/write ccw. */
4037                        ccw[-1].flags |= CCW_FLAG_CC;
4038                        ccw->cmd_code = rcmd;
4039                        ccw->count = count;
4040                        if (idal_is_needed(dst, blksize)) {
4041                                ccw->cda = (__u32)(addr_t) idaws;
4042                                ccw->flags = CCW_FLAG_IDA;
4043                                idaws = idal_create_words(idaws, dst, blksize);
4044                        } else {
4045                                ccw->cda = (__u32)(addr_t) dst;
4046                                ccw->flags = 0;
4047                        }
4048                        ccw++;
4049                        dst += blksize;
4050                        recid++;
4051                }
4052        }
4053        if (blk_noretry_request(req) ||
4054            block->base->features & DASD_FEATURE_FAILFAST)
4055                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4056        cqr->startdev = startdev;
4057        cqr->memdev = startdev;
4058        cqr->block = block;
4059        cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4060        cqr->lpm = dasd_path_get_ppm(startdev);
4061        cqr->retries = startdev->default_retries;
4062        cqr->buildclk = get_tod_clock();
4063        cqr->status = DASD_CQR_FILLED;
4064
4065        /* Set flags to suppress output for expected errors */
4066        if (dasd_eckd_is_ese(basedev)) {
4067                set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4068                set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4069                set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4070        }
4071
4072        return cqr;
4073}
4074
4075static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4076                                               struct dasd_device *startdev,
4077                                               struct dasd_block *block,
4078                                               struct request *req,
4079                                               sector_t first_rec,
4080                                               sector_t last_rec,
4081                                               sector_t first_trk,
4082                                               sector_t last_trk,
4083                                               unsigned int first_offs,
4084                                               unsigned int last_offs,
4085                                               unsigned int blk_per_trk,
4086                                               unsigned int blksize)
4087{
4088        unsigned long *idaws;
4089        struct dasd_ccw_req *cqr;
4090        struct ccw1 *ccw;
4091        struct req_iterator iter;
4092        struct bio_vec bv;
4093        char *dst, *idaw_dst;
4094        unsigned int cidaw, cplength, datasize;
4095        unsigned int tlf;
4096        sector_t recid;
4097        unsigned char cmd;
4098        struct dasd_device *basedev;
4099        unsigned int trkcount, count, count_to_trk_end;
4100        unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4101        unsigned char new_track, end_idaw;
4102        sector_t trkid;
4103        unsigned int recoffs;
4104
4105        basedev = block->base;
4106        if (rq_data_dir(req) == READ)
4107                cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4108        else if (rq_data_dir(req) == WRITE)
4109                cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4110        else
4111                return ERR_PTR(-EINVAL);
4112
4113        /* Track based I/O needs IDAWs for each page, and not just for
4114         * 64 bit addresses. We need additional idals for pages
4115         * that get filled from two tracks, so we use the number
4116         * of records as upper limit.
4117         */
4118        cidaw = last_rec - first_rec + 1;
4119        trkcount = last_trk - first_trk + 1;
4120
4121        /* 1x prefix + one read/write ccw per track */
4122        cplength = 1 + trkcount;
4123
4124        datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4125
4126        /* Allocate the ccw request. */
4127        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4128                                   startdev, blk_mq_rq_to_pdu(req));
4129        if (IS_ERR(cqr))
4130                return cqr;
4131        ccw = cqr->cpaddr;
4132        /* transfer length factor: how many bytes to read from the last track */
4133        if (first_trk == last_trk)
4134                tlf = last_offs - first_offs + 1;
4135        else
4136                tlf = last_offs + 1;
4137        tlf *= blksize;
4138
4139        if (prefix_LRE(ccw++, cqr->data, first_trk,
4140                       last_trk, cmd, basedev, startdev,
4141                       1 /* format */, first_offs + 1,
4142                       trkcount, blksize,
4143                       tlf) == -EAGAIN) {
4144                /* Clock not in sync and XRC is enabled.
4145                 * Try again later.
4146                 */
4147                dasd_sfree_request(cqr, startdev);
4148                return ERR_PTR(-EAGAIN);
4149        }
4150
4151        /*
4152         * The translation of request into ccw programs must meet the
4153         * following conditions:
4154         * - all idaws but the first and the last must address full pages
4155         *   (or 2K blocks on 31-bit)
4156         * - the scope of a ccw and it's idal ends with the track boundaries
4157         */
4158        idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4159        recid = first_rec;
4160        new_track = 1;
4161        end_idaw = 0;
4162        len_to_track_end = 0;
4163        idaw_dst = NULL;
4164        idaw_len = 0;
4165        rq_for_each_segment(bv, req, iter) {
4166                dst = page_address(bv.bv_page) + bv.bv_offset;
4167                seg_len = bv.bv_len;
4168                while (seg_len) {
4169                        if (new_track) {
4170                                trkid = recid;
4171                                recoffs = sector_div(trkid, blk_per_trk);
4172                                count_to_trk_end = blk_per_trk - recoffs;
4173                                count = min((last_rec - recid + 1),
4174                                            (sector_t)count_to_trk_end);
4175                                len_to_track_end = count * blksize;
4176                                ccw[-1].flags |= CCW_FLAG_CC;
4177                                ccw->cmd_code = cmd;
4178                                ccw->count = len_to_track_end;
4179                                ccw->cda = (__u32)(addr_t)idaws;
4180                                ccw->flags = CCW_FLAG_IDA;
4181                                ccw++;
4182                                recid += count;
4183                                new_track = 0;
4184                                /* first idaw for a ccw may start anywhere */
4185                                if (!idaw_dst)
4186                                        idaw_dst = dst;
4187                        }
4188                        /* If we start a new idaw, we must make sure that it
4189                         * starts on an IDA_BLOCK_SIZE boundary.
4190                         * If we continue an idaw, we must make sure that the
4191                         * current segment begins where the so far accumulated
4192                         * idaw ends
4193                         */
4194                        if (!idaw_dst) {
4195                                if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4196                                        dasd_sfree_request(cqr, startdev);
4197                                        return ERR_PTR(-ERANGE);
4198                                } else
4199                                        idaw_dst = dst;
4200                        }
4201                        if ((idaw_dst + idaw_len) != dst) {
4202                                dasd_sfree_request(cqr, startdev);
4203                                return ERR_PTR(-ERANGE);
4204                        }
4205                        part_len = min(seg_len, len_to_track_end);
4206                        seg_len -= part_len;
4207                        dst += part_len;
4208                        idaw_len += part_len;
4209                        len_to_track_end -= part_len;
4210                        /* collected memory area ends on an IDA_BLOCK border,
4211                         * -> create an idaw
4212                         * idal_create_words will handle cases where idaw_len
4213                         * is larger then IDA_BLOCK_SIZE
4214                         */
4215                        if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4216                                end_idaw = 1;
4217                        /* We also need to end the idaw at track end */
4218                        if (!len_to_track_end) {
4219                                new_track = 1;
4220                                end_idaw = 1;
4221                        }
4222                        if (end_idaw) {
4223                                idaws = idal_create_words(idaws, idaw_dst,
4224                                                          idaw_len);
4225                                idaw_dst = NULL;
4226                                idaw_len = 0;
4227                                end_idaw = 0;
4228                        }
4229                }
4230        }
4231
4232        if (blk_noretry_request(req) ||
4233            block->base->features & DASD_FEATURE_FAILFAST)
4234                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4235        cqr->startdev = startdev;
4236        cqr->memdev = startdev;
4237        cqr->block = block;
4238        cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4239        cqr->lpm = dasd_path_get_ppm(startdev);
4240        cqr->retries = startdev->default_retries;
4241        cqr->buildclk = get_tod_clock();
4242        cqr->status = DASD_CQR_FILLED;
4243
4244        /* Set flags to suppress output for expected errors */
4245        if (dasd_eckd_is_ese(basedev))
4246                set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4247
4248        return cqr;
4249}
4250
4251static int prepare_itcw(struct itcw *itcw,
4252                        unsigned int trk, unsigned int totrk, int cmd,
4253                        struct dasd_device *basedev,
4254                        struct dasd_device *startdev,
4255                        unsigned int rec_on_trk, int count,
4256                        unsigned int blksize,
4257                        unsigned int total_data_size,
4258                        unsigned int tlf,
4259                        unsigned int blk_per_trk)
4260{
4261        struct PFX_eckd_data pfxdata;
4262        struct dasd_eckd_private *basepriv, *startpriv;
4263        struct DE_eckd_data *dedata;
4264        struct LRE_eckd_data *lredata;
4265        struct dcw *dcw;
4266
4267        u32 begcyl, endcyl;
4268        u16 heads, beghead, endhead;
4269        u8 pfx_cmd;
4270
4271        int rc = 0;
4272        int sector = 0;
4273        int dn, d;
4274
4275
4276        /* setup prefix data */
4277        basepriv = basedev->private;
4278        startpriv = startdev->private;
4279        dedata = &pfxdata.define_extent;
4280        lredata = &pfxdata.locate_record;
4281
4282        memset(&pfxdata, 0, sizeof(pfxdata));
4283        pfxdata.format = 1; /* PFX with LRE */
4284        pfxdata.base_address = basepriv->ned->unit_addr;
4285        pfxdata.base_lss = basepriv->ned->ID;
4286        pfxdata.validity.define_extent = 1;
4287
4288        /* private uid is kept up to date, conf_data may be outdated */
4289        if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4290                pfxdata.validity.verify_base = 1;
4291
4292        if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4293                pfxdata.validity.verify_base = 1;
4294                pfxdata.validity.hyper_pav = 1;
4295        }
4296
4297        switch (cmd) {
4298        case DASD_ECKD_CCW_READ_TRACK_DATA:
4299                dedata->mask.perm = 0x1;
4300                dedata->attributes.operation = basepriv->attrib.operation;
4301                dedata->blk_size = blksize;
4302                dedata->ga_extended |= 0x42;
4303                lredata->operation.orientation = 0x0;
4304                lredata->operation.operation = 0x0C;
4305                lredata->auxiliary.check_bytes = 0x01;
4306                pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4307                break;
4308        case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4309                dedata->mask.perm = 0x02;
4310                dedata->attributes.operation = basepriv->attrib.operation;
4311                dedata->blk_size = blksize;
4312                rc = set_timestamp(NULL, dedata, basedev);
4313                dedata->ga_extended |= 0x42;
4314                lredata->operation.orientation = 0x0;
4315                lredata->operation.operation = 0x3F;
4316                lredata->extended_operation = 0x23;
4317                lredata->auxiliary.check_bytes = 0x2;
4318                /*
4319                 * If XRC is supported the System Time Stamp is set. The
4320                 * validity of the time stamp must be reflected in the prefix
4321                 * data as well.
4322                 */
4323                if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4324                        pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4325                pfx_cmd = DASD_ECKD_CCW_PFX;
4326                break;
4327        case DASD_ECKD_CCW_READ_COUNT_MT:
4328                dedata->mask.perm = 0x1;
4329                dedata->attributes.operation = DASD_BYPASS_CACHE;
4330                dedata->ga_extended |= 0x42;
4331                dedata->blk_size = blksize;
4332                lredata->operation.orientation = 0x2;
4333                lredata->operation.operation = 0x16;
4334                lredata->auxiliary.check_bytes = 0x01;
4335                pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4336                break;
4337        default:
4338                DBF_DEV_EVENT(DBF_ERR, basedev,
4339                              "prepare itcw, unknown opcode 0x%x", cmd);
4340                BUG();
4341                break;
4342        }
4343        if (rc)
4344                return rc;
4345
4346        dedata->attributes.mode = 0x3;  /* ECKD */
4347
4348        heads = basepriv->rdc_data.trk_per_cyl;
4349        begcyl = trk / heads;
4350        beghead = trk % heads;
4351        endcyl = totrk / heads;
4352        endhead = totrk % heads;
4353
4354        /* check for sequential prestage - enhance cylinder range */
4355        if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4356            dedata->attributes.operation == DASD_SEQ_ACCESS) {
4357
4358                if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4359                        endcyl += basepriv->attrib.nr_cyl;
4360                else
4361                        endcyl = (basepriv->real_cyl - 1);
4362        }
4363
4364        set_ch_t(&dedata->beg_ext, begcyl, beghead);
4365        set_ch_t(&dedata->end_ext, endcyl, endhead);
4366
4367        dedata->ep_format = 0x20; /* records per track is valid */
4368        dedata->ep_rec_per_track = blk_per_trk;
4369
4370        if (rec_on_trk) {
4371                switch (basepriv->rdc_data.dev_type) {
4372                case 0x3390:
4373                        dn = ceil_quot(blksize + 6, 232);
4374                        d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4375                        sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4376                        break;
4377                case 0x3380:
4378                        d = 7 + ceil_quot(blksize + 12, 32);
4379                        sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4380                        break;
4381                }
4382        }
4383
4384        if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4385                lredata->auxiliary.length_valid = 0;
4386                lredata->auxiliary.length_scope = 0;
4387                lredata->sector = 0xff;
4388        } else {
4389                lredata->auxiliary.length_valid = 1;
4390                lredata->auxiliary.length_scope = 1;
4391                lredata->sector = sector;
4392        }
4393        lredata->auxiliary.imbedded_ccw_valid = 1;
4394        lredata->length = tlf;
4395        lredata->imbedded_ccw = cmd;
4396        lredata->count = count;
4397        set_ch_t(&lredata->seek_addr, begcyl, beghead);
4398        lredata->search_arg.cyl = lredata->seek_addr.cyl;
4399        lredata->search_arg.head = lredata->seek_addr.head;
4400        lredata->search_arg.record = rec_on_trk;
4401
4402        dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4403                     &pfxdata, sizeof(pfxdata), total_data_size);
4404        return PTR_RET(dcw);
4405}
4406
4407static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4408                                               struct dasd_device *startdev,
4409                                               struct dasd_block *block,
4410                                               struct request *req,
4411                                               sector_t first_rec,
4412                                               sector_t last_rec,
4413                                               sector_t first_trk,
4414                                               sector_t last_trk,
4415                                               unsigned int first_offs,
4416                                               unsigned int last_offs,
4417                                               unsigned int blk_per_trk,
4418                                               unsigned int blksize)
4419{
4420        struct dasd_ccw_req *cqr;
4421        struct req_iterator iter;
4422        struct bio_vec bv;
4423        char *dst;
4424        unsigned int trkcount, ctidaw;
4425        unsigned char cmd;
4426        struct dasd_device *basedev;
4427        unsigned int tlf;
4428        struct itcw *itcw;
4429        struct tidaw *last_tidaw = NULL;
4430        int itcw_op;
4431        size_t itcw_size;
4432        u8 tidaw_flags;
4433        unsigned int seg_len, part_len, len_to_track_end;
4434        unsigned char new_track;
4435        sector_t recid, trkid;
4436        unsigned int offs;
4437        unsigned int count, count_to_trk_end;
4438        int ret;
4439
4440        basedev = block->base;
4441        if (rq_data_dir(req) == READ) {
4442                cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4443                itcw_op = ITCW_OP_READ;
4444        } else if (rq_data_dir(req) == WRITE) {
4445                cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4446                itcw_op = ITCW_OP_WRITE;
4447        } else
4448                return ERR_PTR(-EINVAL);
4449
4450        /* trackbased I/O needs address all memory via TIDAWs,
4451         * not just for 64 bit addresses. This allows us to map
4452         * each segment directly to one tidaw.
4453         * In the case of write requests, additional tidaws may
4454         * be needed when a segment crosses a track boundary.
4455         */
4456        trkcount = last_trk - first_trk + 1;
4457        ctidaw = 0;
4458        rq_for_each_segment(bv, req, iter) {
4459                ++ctidaw;
4460        }
4461        if (rq_data_dir(req) == WRITE)
4462                ctidaw += (last_trk - first_trk);
4463
4464        /* Allocate the ccw request. */
4465        itcw_size = itcw_calc_size(0, ctidaw, 0);
4466        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4467                                   blk_mq_rq_to_pdu(req));
4468        if (IS_ERR(cqr))
4469                return cqr;
4470
4471        /* transfer length factor: how many bytes to read from the last track */
4472        if (first_trk == last_trk)
4473                tlf = last_offs - first_offs + 1;
4474        else
4475                tlf = last_offs + 1;
4476        tlf *= blksize;
4477
4478        itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4479        if (IS_ERR(itcw)) {
4480                ret = -EINVAL;
4481                goto out_error;
4482        }
4483        cqr->cpaddr = itcw_get_tcw(itcw);
4484        if (prepare_itcw(itcw, first_trk, last_trk,
4485                         cmd, basedev, startdev,
4486                         first_offs + 1,
4487                         trkcount, blksize,
4488                         (last_rec - first_rec + 1) * blksize,
4489                         tlf, blk_per_trk) == -EAGAIN) {
4490                /* Clock not in sync and XRC is enabled.
4491                 * Try again later.
4492                 */
4493                ret = -EAGAIN;
4494                goto out_error;
4495        }
4496        len_to_track_end = 0;
4497        /*
4498         * A tidaw can address 4k of memory, but must not cross page boundaries
4499         * We can let the block layer handle this by setting
4500         * blk_queue_segment_boundary to page boundaries and
4501         * blk_max_segment_size to page size when setting up the request queue.
4502         * For write requests, a TIDAW must not cross track boundaries, because
4503         * we have to set the CBC flag on the last tidaw for each track.
4504         */
4505        if (rq_data_dir(req) == WRITE) {
4506                new_track = 1;
4507                recid = first_rec;
4508                rq_for_each_segment(bv, req, iter) {
4509                        dst = page_address(bv.bv_page) + bv.bv_offset;
4510                        seg_len = bv.bv_len;
4511                        while (seg_len) {
4512                                if (new_track) {
4513                                        trkid = recid;
4514                                        offs = sector_div(trkid, blk_per_trk);
4515                                        count_to_trk_end = blk_per_trk - offs;
4516                                        count = min((last_rec - recid + 1),
4517                                                    (sector_t)count_to_trk_end);
4518                                        len_to_track_end = count * blksize;
4519                                        recid += count;
4520                                        new_track = 0;
4521                                }
4522                                part_len = min(seg_len, len_to_track_end);
4523                                seg_len -= part_len;
4524                                len_to_track_end -= part_len;
4525                                /* We need to end the tidaw at track end */
4526                                if (!len_to_track_end) {
4527                                        new_track = 1;
4528                                        tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4529                                } else
4530                                        tidaw_flags = 0;
4531                                last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4532                                                            dst, part_len);
4533                                if (IS_ERR(last_tidaw)) {
4534                                        ret = -EINVAL;
4535                                        goto out_error;
4536                                }
4537                                dst += part_len;
4538                        }
4539                }
4540        } else {
4541                rq_for_each_segment(bv, req, iter) {
4542                        dst = page_address(bv.bv_page) + bv.bv_offset;
4543                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
4544                                                    dst, bv.bv_len);
4545                        if (IS_ERR(last_tidaw)) {
4546                                ret = -EINVAL;
4547                                goto out_error;
4548                        }
4549                }
4550        }
4551        last_tidaw->flags |= TIDAW_FLAGS_LAST;
4552        last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4553        itcw_finalize(itcw);
4554
4555        if (blk_noretry_request(req) ||
4556            block->base->features & DASD_FEATURE_FAILFAST)
4557                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4558        cqr->cpmode = 1;
4559        cqr->startdev = startdev;
4560        cqr->memdev = startdev;
4561        cqr->block = block;
4562        cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4563        cqr->lpm = dasd_path_get_ppm(startdev);
4564        cqr->retries = startdev->default_retries;
4565        cqr->buildclk = get_tod_clock();
4566        cqr->status = DASD_CQR_FILLED;
4567
4568        /* Set flags to suppress output for expected errors */
4569        if (dasd_eckd_is_ese(basedev)) {
4570                set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4571                set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4572                set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4573        }
4574
4575        return cqr;
4576out_error:
4577        dasd_sfree_request(cqr, startdev);
4578        return ERR_PTR(ret);
4579}
4580
4581static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4582                                               struct dasd_block *block,
4583                                               struct request *req)
4584{
4585        int cmdrtd, cmdwtd;
4586        int use_prefix;
4587        int fcx_multitrack;
4588        struct dasd_eckd_private *private;
4589        struct dasd_device *basedev;
4590        sector_t first_rec, last_rec;
4591        sector_t first_trk, last_trk;
4592        unsigned int first_offs, last_offs;
4593        unsigned int blk_per_trk, blksize;
4594        int cdlspecial;
4595        unsigned int data_size;
4596        struct dasd_ccw_req *cqr;
4597
4598        basedev = block->base;
4599        private = basedev->private;
4600
4601        /* Calculate number of blocks/records per track. */
4602        blksize = block->bp_block;
4603        blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4604        if (blk_per_trk == 0)
4605                return ERR_PTR(-EINVAL);
4606        /* Calculate record id of first and last block. */
4607        first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4608        first_offs = sector_div(first_trk, blk_per_trk);
4609        last_rec = last_trk =
4610                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4611        last_offs = sector_div(last_trk, blk_per_trk);
4612        cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4613
4614        fcx_multitrack = private->features.feature[40] & 0x20;
4615        data_size = blk_rq_bytes(req);
4616        if (data_size % blksize)
4617                return ERR_PTR(-EINVAL);
4618        /* tpm write request add CBC data on each track boundary */
4619        if (rq_data_dir(req) == WRITE)
4620                data_size += (last_trk - first_trk) * 4;
4621
4622        /* is read track data and write track data in command mode supported? */
4623        cmdrtd = private->features.feature[9] & 0x20;
4624        cmdwtd = private->features.feature[12] & 0x40;
4625        use_prefix = private->features.feature[8] & 0x01;
4626
4627        cqr = NULL;
4628        if (cdlspecial || dasd_page_cache) {
4629                /* do nothing, just fall through to the cmd mode single case */
4630        } else if ((data_size <= private->fcx_max_data)
4631                   && (fcx_multitrack || (first_trk == last_trk))) {
4632                cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4633                                                    first_rec, last_rec,
4634                                                    first_trk, last_trk,
4635                                                    first_offs, last_offs,
4636                                                    blk_per_trk, blksize);
4637                if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4638                    (PTR_ERR(cqr) != -ENOMEM))
4639                        cqr = NULL;
4640        } else if (use_prefix &&
4641                   (((rq_data_dir(req) == READ) && cmdrtd) ||
4642                    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4643                cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4644                                                   first_rec, last_rec,
4645                                                   first_trk, last_trk,
4646                                                   first_offs, last_offs,
4647                                                   blk_per_trk, blksize);
4648                if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4649                    (PTR_ERR(cqr) != -ENOMEM))
4650                        cqr = NULL;
4651        }
4652        if (!cqr)
4653                cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4654                                                    first_rec, last_rec,
4655                                                    first_trk, last_trk,
4656                                                    first_offs, last_offs,
4657                                                    blk_per_trk, blksize);
4658        return cqr;
4659}
4660
4661static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4662                                                   struct dasd_block *block,
4663                                                   struct request *req)
4664{
4665        sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4666        unsigned int seg_len, len_to_track_end;
4667        unsigned int cidaw, cplength, datasize;
4668        sector_t first_trk, last_trk, sectors;
4669        struct dasd_eckd_private *base_priv;
4670        struct dasd_device *basedev;
4671        struct req_iterator iter;
4672        struct dasd_ccw_req *cqr;
4673        unsigned int first_offs;
4674        unsigned int trkcount;
4675        unsigned long *idaws;
4676        unsigned int size;
4677        unsigned char cmd;
4678        struct bio_vec bv;
4679        struct ccw1 *ccw;
4680        int use_prefix;
4681        void *data;
4682        char *dst;
4683
4684        /*
4685         * raw track access needs to be mutiple of 64k and on 64k boundary
4686         * For read requests we can fix an incorrect alignment by padding
4687         * the request with dummy pages.
4688         */
4689        start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4690        end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4691                DASD_RAW_SECTORS_PER_TRACK;
4692        end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4693                DASD_RAW_SECTORS_PER_TRACK;
4694        basedev = block->base;
4695        if ((start_padding_sectors || end_padding_sectors) &&
4696            (rq_data_dir(req) == WRITE)) {
4697                DBF_DEV_EVENT(DBF_ERR, basedev,
4698                              "raw write not track aligned (%llu,%llu) req %p",
4699                              start_padding_sectors, end_padding_sectors, req);
4700                return ERR_PTR(-EINVAL);
4701        }
4702
4703        first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4704        last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4705                DASD_RAW_SECTORS_PER_TRACK;
4706        trkcount = last_trk - first_trk + 1;
4707        first_offs = 0;
4708
4709        if (rq_data_dir(req) == READ)
4710                cmd = DASD_ECKD_CCW_READ_TRACK;
4711        else if (rq_data_dir(req) == WRITE)
4712                cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4713        else
4714                return ERR_PTR(-EINVAL);
4715
4716        /*
4717         * Raw track based I/O needs IDAWs for each page,
4718         * and not just for 64 bit addresses.
4719         */
4720        cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4721
4722        /*
4723         * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4724         * of extended parameter. This is needed for write full track.
4725         */
4726        base_priv = basedev->private;
4727        use_prefix = base_priv->features.feature[8] & 0x01;
4728        if (use_prefix) {
4729                cplength = 1 + trkcount;
4730                size = sizeof(struct PFX_eckd_data) + 2;
4731        } else {
4732                cplength = 2 + trkcount;
4733                size = sizeof(struct DE_eckd_data) +
4734                        sizeof(struct LRE_eckd_data) + 2;
4735        }
4736        size = ALIGN(size, 8);
4737
4738        datasize = size + cidaw * sizeof(unsigned long);
4739
4740        /* Allocate the ccw request. */
4741        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4742                                   datasize, startdev, blk_mq_rq_to_pdu(req));
4743        if (IS_ERR(cqr))
4744                return cqr;
4745
4746        ccw = cqr->cpaddr;
4747        data = cqr->data;
4748
4749        if (use_prefix) {
4750                prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4751                           startdev, 1, first_offs + 1, trkcount, 0, 0);
4752        } else {
4753                define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4754                ccw[-1].flags |= CCW_FLAG_CC;
4755
4756                data += sizeof(struct DE_eckd_data);
4757                locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4758                                  trkcount, cmd, basedev, 0, 0);
4759        }
4760
4761        idaws = (unsigned long *)(cqr->data + size);
4762        len_to_track_end = 0;
4763        if (start_padding_sectors) {
4764                ccw[-1].flags |= CCW_FLAG_CC;
4765                ccw->cmd_code = cmd;
4766                /* maximum 3390 track size */
4767                ccw->count = 57326;
4768                /* 64k map to one track */
4769                len_to_track_end = 65536 - start_padding_sectors * 512;
4770                ccw->cda = (__u32)(addr_t)idaws;
4771                ccw->flags |= CCW_FLAG_IDA;
4772                ccw->flags |= CCW_FLAG_SLI;
4773                ccw++;
4774                for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4775                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4776        }
4777        rq_for_each_segment(bv, req, iter) {
4778                dst = page_address(bv.bv_page) + bv.bv_offset;
4779                seg_len = bv.bv_len;
4780                if (cmd == DASD_ECKD_CCW_READ_TRACK)
4781                        memset(dst, 0, seg_len);
4782                if (!len_to_track_end) {
4783                        ccw[-1].flags |= CCW_FLAG_CC;
4784                        ccw->cmd_code = cmd;
4785                        /* maximum 3390 track size */
4786                        ccw->count = 57326;
4787                        /* 64k map to one track */
4788                        len_to_track_end = 65536;
4789                        ccw->cda = (__u32)(addr_t)idaws;
4790                        ccw->flags |= CCW_FLAG_IDA;
4791                        ccw->flags |= CCW_FLAG_SLI;
4792                        ccw++;
4793                }
4794                len_to_track_end -= seg_len;
4795                idaws = idal_create_words(idaws, dst, seg_len);
4796        }
4797        for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4798                idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4799        if (blk_noretry_request(req) ||
4800            block->base->features & DASD_FEATURE_FAILFAST)
4801                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4802        cqr->startdev = startdev;
4803        cqr->memdev = startdev;
4804        cqr->block = block;
4805        cqr->expires = startdev->default_expires * HZ;
4806        cqr->lpm = dasd_path_get_ppm(startdev);
4807        cqr->retries = startdev->default_retries;
4808        cqr->buildclk = get_tod_clock();
4809        cqr->status = DASD_CQR_FILLED;
4810
4811        return cqr;
4812}
4813
4814
4815static int
4816dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4817{
4818        struct dasd_eckd_private *private;
4819        struct ccw1 *ccw;
4820        struct req_iterator iter;
4821        struct bio_vec bv;
4822        char *dst, *cda;
4823        unsigned int blksize, blk_per_trk, off;
4824        sector_t recid;
4825        int status;
4826
4827        if (!dasd_page_cache)
4828                goto out;
4829        private = cqr->block->base->private;
4830        blksize = cqr->block->bp_block;
4831        blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4832        recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4833        ccw = cqr->cpaddr;
4834        /* Skip over define extent & locate record. */
4835        ccw++;
4836        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4837                ccw++;
4838        rq_for_each_segment(bv, req, iter) {
4839                dst = page_address(bv.bv_page) + bv.bv_offset;
4840                for (off = 0; off < bv.bv_len; off += blksize) {
4841                        /* Skip locate record. */
4842                        if (private->uses_cdl && recid <= 2*blk_per_trk)
4843                                ccw++;
4844                        if (dst) {
4845                                if (ccw->flags & CCW_FLAG_IDA)
4846                                        cda = *((char **)((addr_t) ccw->cda));
4847                                else
4848                                        cda = (char *)((addr_t) ccw->cda);
4849                                if (dst != cda) {
4850                                        if (rq_data_dir(req) == READ)
4851                                                memcpy(dst, cda, bv.bv_len);
4852                                        kmem_cache_free(dasd_page_cache,
4853                                            (void *)((addr_t)cda & PAGE_MASK));
4854                                }
4855                                dst = NULL;
4856                        }
4857                        ccw++;
4858                        recid++;
4859                }
4860        }
4861out:
4862        status = cqr->status == DASD_CQR_DONE;
4863        dasd_sfree_request(cqr, cqr->memdev);
4864        return status;
4865}
4866
4867/*
4868 * Modify ccw/tcw in cqr so it can be started on a base device.
4869 *
4870 * Note that this is not enough to restart the cqr!
4871 * Either reset cqr->startdev as well (summary unit check handling)
4872 * or restart via separate cqr (as in ERP handling).
4873 */
4874void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4875{
4876        struct ccw1 *ccw;
4877        struct PFX_eckd_data *pfxdata;
4878        struct tcw *tcw;
4879        struct tccb *tccb;
4880        struct dcw *dcw;
4881
4882        if (cqr->cpmode == 1) {
4883                tcw = cqr->cpaddr;
4884                tccb = tcw_get_tccb(tcw);
4885                dcw = (struct dcw *)&tccb->tca[0];
4886                pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4887                pfxdata->validity.verify_base = 0;
4888                pfxdata->validity.hyper_pav = 0;
4889        } else {
4890                ccw = cqr->cpaddr;
4891                pfxdata = cqr->data;
4892                if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4893                        pfxdata->validity.verify_base = 0;
4894                        pfxdata->validity.hyper_pav = 0;
4895                }
4896        }
4897}
4898
4899#define DASD_ECKD_CHANQ_MAX_SIZE 4
4900
4901static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4902                                                     struct dasd_block *block,
4903                                                     struct request *req)
4904{
4905        struct dasd_eckd_private *private;
4906        struct dasd_device *startdev;
4907        unsigned long flags;
4908        struct dasd_ccw_req *cqr;
4909
4910        startdev = dasd_alias_get_start_dev(base);
4911        if (!startdev)
4912                startdev = base;
4913        private = startdev->private;
4914        if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4915                return ERR_PTR(-EBUSY);
4916
4917        spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4918        private->count++;
4919        if ((base->features & DASD_FEATURE_USERAW))
4920                cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4921        else
4922                cqr = dasd_eckd_build_cp(startdev, block, req);
4923        if (IS_ERR(cqr))
4924                private->count--;
4925        spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4926        return cqr;
4927}
4928
4929static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4930                                   struct request *req)
4931{
4932        struct dasd_eckd_private *private;
4933        unsigned long flags;
4934
4935        spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4936        private = cqr->memdev->private;
4937        private->count--;
4938        spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4939        return dasd_eckd_free_cp(cqr, req);
4940}
4941
4942static int
4943dasd_eckd_fill_info(struct dasd_device * device,
4944                    struct dasd_information2_t * info)
4945{
4946        struct dasd_eckd_private *private = device->private;
4947
4948        info->label_block = 2;
4949        info->FBA_layout = private->uses_cdl ? 0 : 1;
4950        info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4951        info->characteristics_size = sizeof(private->rdc_data);
4952        memcpy(info->characteristics, &private->rdc_data,
4953               sizeof(private->rdc_data));
4954        info->confdata_size = min((unsigned long)private->conf_len,
4955                                  sizeof(info->configuration_data));
4956        memcpy(info->configuration_data, private->conf_data,
4957               info->confdata_size);
4958        return 0;
4959}
4960
4961/*
4962 * SECTION: ioctl functions for eckd devices.
4963 */
4964
4965/*
4966 * Release device ioctl.
4967 * Buils a channel programm to releases a prior reserved
4968 * (see dasd_eckd_reserve) device.
4969 */
4970static int
4971dasd_eckd_release(struct dasd_device *device)
4972{
4973        struct dasd_ccw_req *cqr;
4974        int rc;
4975        struct ccw1 *ccw;
4976        int useglobal;
4977
4978        if (!capable(CAP_SYS_ADMIN))
4979                return -EACCES;
4980
4981        useglobal = 0;
4982        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4983        if (IS_ERR(cqr)) {
4984                mutex_lock(&dasd_reserve_mutex);
4985                useglobal = 1;
4986                cqr = &dasd_reserve_req->cqr;
4987                memset(cqr, 0, sizeof(*cqr));
4988                memset(&dasd_reserve_req->ccw, 0,
4989                       sizeof(dasd_reserve_req->ccw));
4990                cqr->cpaddr = &dasd_reserve_req->ccw;
4991                cqr->data = &dasd_reserve_req->data;
4992                cqr->magic = DASD_ECKD_MAGIC;
4993        }
4994        ccw = cqr->cpaddr;
4995        ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4996        ccw->flags |= CCW_FLAG_SLI;
4997        ccw->count = 32;
4998        ccw->cda = (__u32)(addr_t) cqr->data;
4999        cqr->startdev = device;
5000        cqr->memdev = device;
5001        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5002        set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5003        cqr->retries = 2;       /* set retry counter to enable basic ERP */
5004        cqr->expires = 2 * HZ;
5005        cqr->buildclk = get_tod_clock();
5006        cqr->status = DASD_CQR_FILLED;
5007
5008        rc = dasd_sleep_on_immediatly(cqr);
5009        if (!rc)
5010                clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5011
5012        if (useglobal)
5013                mutex_unlock(&dasd_reserve_mutex);
5014        else
5015                dasd_sfree_request(cqr, cqr->memdev);
5016        return rc;
5017}
5018
5019/*
5020 * Reserve device ioctl.
5021 * Options are set to 'synchronous wait for interrupt' and
5022 * 'timeout the request'. This leads to a terminate IO if
5023 * the interrupt is outstanding for a certain time.
5024 */
5025static int
5026dasd_eckd_reserve(struct dasd_device *device)
5027{
5028        struct dasd_ccw_req *cqr;
5029        int rc;
5030        struct ccw1 *ccw;
5031        int useglobal;
5032
5033        if (!capable(CAP_SYS_ADMIN))
5034                return -EACCES;
5035
5036        useglobal = 0;
5037        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5038        if (IS_ERR(cqr)) {
5039                mutex_lock(&dasd_reserve_mutex);
5040                useglobal = 1;
5041                cqr = &dasd_reserve_req->cqr;
5042                memset(cqr, 0, sizeof(*cqr));
5043                memset(&dasd_reserve_req->ccw, 0,
5044                       sizeof(dasd_reserve_req->ccw));
5045                cqr->cpaddr = &dasd_reserve_req->ccw;
5046                cqr->data = &dasd_reserve_req->data;
5047                cqr->magic = DASD_ECKD_MAGIC;
5048        }
5049        ccw = cqr->cpaddr;
5050        ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5051        ccw->flags |= CCW_FLAG_SLI;
5052        ccw->count = 32;
5053        ccw->cda = (__u32)(addr_t) cqr->data;
5054        cqr->startdev = device;
5055        cqr->memdev = device;
5056        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5057        set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5058        cqr->retries = 2;       /* set retry counter to enable basic ERP */
5059        cqr->expires = 2 * HZ;
5060        cqr->buildclk = get_tod_clock();
5061        cqr->status = DASD_CQR_FILLED;
5062
5063        rc = dasd_sleep_on_immediatly(cqr);
5064        if (!rc)
5065                set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5066
5067        if (useglobal)
5068                mutex_unlock(&dasd_reserve_mutex);
5069        else
5070                dasd_sfree_request(cqr, cqr->memdev);
5071        return rc;
5072}
5073
5074/*
5075 * Steal lock ioctl - unconditional reserve device.
5076 * Buils a channel programm to break a device's reservation.
5077 * (unconditional reserve)
5078 */
5079static int
5080dasd_eckd_steal_lock(struct dasd_device *device)
5081{
5082        struct dasd_ccw_req *cqr;
5083        int rc;
5084        struct ccw1 *ccw;
5085        int useglobal;
5086
5087        if (!capable(CAP_SYS_ADMIN))
5088                return -EACCES;
5089
5090        useglobal = 0;
5091        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5092        if (IS_ERR(cqr)) {
5093                mutex_lock(&dasd_reserve_mutex);
5094                useglobal = 1;
5095                cqr = &dasd_reserve_req->cqr;
5096                memset(cqr, 0, sizeof(*cqr));
5097                memset(&dasd_reserve_req->ccw, 0,
5098                       sizeof(dasd_reserve_req->ccw));
5099                cqr->cpaddr = &dasd_reserve_req->ccw;
5100                cqr->data = &dasd_reserve_req->data;
5101                cqr->magic = DASD_ECKD_MAGIC;
5102        }
5103        ccw = cqr->cpaddr;
5104        ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5105        ccw->flags |= CCW_FLAG_SLI;
5106        ccw->count = 32;
5107        ccw->cda = (__u32)(addr_t) cqr->data;
5108        cqr->startdev = device;
5109        cqr->memdev = device;
5110        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5111        set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5112        cqr->retries = 2;       /* set retry counter to enable basic ERP */
5113        cqr->expires = 2 * HZ;
5114        cqr->buildclk = get_tod_clock();
5115        cqr->status = DASD_CQR_FILLED;
5116
5117        rc = dasd_sleep_on_immediatly(cqr);
5118        if (!rc)
5119                set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5120
5121        if (useglobal)
5122                mutex_unlock(&dasd_reserve_mutex);
5123        else
5124                dasd_sfree_request(cqr, cqr->memdev);
5125        return rc;
5126}
5127
5128/*
5129 * SNID - Sense Path Group ID
5130 * This ioctl may be used in situations where I/O is stalled due to
5131 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5132 * preallocated dasd_reserve_req.
5133 */
5134static int dasd_eckd_snid(struct dasd_device *device,
5135                          void __user *argp)
5136{
5137        struct dasd_ccw_req *cqr;
5138        int rc;
5139        struct ccw1 *ccw;
5140        int useglobal;
5141        struct dasd_snid_ioctl_data usrparm;
5142
5143        if (!capable(CAP_SYS_ADMIN))
5144                return -EACCES;
5145
5146        if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5147                return -EFAULT;
5148
5149        useglobal = 0;
5150        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5151                                   sizeof(struct dasd_snid_data), device,
5152                                   NULL);
5153        if (IS_ERR(cqr)) {
5154                mutex_lock(&dasd_reserve_mutex);
5155                useglobal = 1;
5156                cqr = &dasd_reserve_req->cqr;
5157                memset(cqr, 0, sizeof(*cqr));
5158                memset(&dasd_reserve_req->ccw, 0,
5159                       sizeof(dasd_reserve_req->ccw));
5160                cqr->cpaddr = &dasd_reserve_req->ccw;
5161                cqr->data = &dasd_reserve_req->data;
5162                cqr->magic = DASD_ECKD_MAGIC;
5163        }
5164        ccw = cqr->cpaddr;
5165        ccw->cmd_code = DASD_ECKD_CCW_SNID;
5166        ccw->flags |= CCW_FLAG_SLI;
5167        ccw->count = 12;
5168        ccw->cda = (__u32)(addr_t) cqr->data;
5169        cqr->startdev = device;
5170        cqr->memdev = device;
5171        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5172        set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5173        set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5174        cqr->retries = 5;
5175        cqr->expires = 10 * HZ;
5176        cqr->buildclk = get_tod_clock();
5177        cqr->status = DASD_CQR_FILLED;
5178        cqr->lpm = usrparm.path_mask;
5179
5180        rc = dasd_sleep_on_immediatly(cqr);
5181        /* verify that I/O processing didn't modify the path mask */
5182        if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5183                rc = -EIO;
5184        if (!rc) {
5185                usrparm.data = *((struct dasd_snid_data *)cqr->data);
5186                if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5187                        rc = -EFAULT;
5188        }
5189
5190        if (useglobal)
5191                mutex_unlock(&dasd_reserve_mutex);
5192        else
5193                dasd_sfree_request(cqr, cqr->memdev);
5194        return rc;
5195}
5196
5197/*
5198 * Read performance statistics
5199 */
5200static int
5201dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5202{
5203        struct dasd_psf_prssd_data *prssdp;
5204        struct dasd_rssd_perf_stats_t *stats;
5205        struct dasd_ccw_req *cqr;
5206        struct ccw1 *ccw;
5207        int rc;
5208
5209        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5210                                   (sizeof(struct dasd_psf_prssd_data) +
5211                                    sizeof(struct dasd_rssd_perf_stats_t)),
5212                                   device, NULL);
5213        if (IS_ERR(cqr)) {
5214                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5215                            "Could not allocate initialization request");
5216                return PTR_ERR(cqr);
5217        }
5218        cqr->startdev = device;
5219        cqr->memdev = device;
5220        cqr->retries = 0;
5221        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5222        cqr->expires = 10 * HZ;
5223
5224        /* Prepare for Read Subsystem Data */
5225        prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5226        memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5227        prssdp->order = PSF_ORDER_PRSSD;
5228        prssdp->suborder = 0x01;        /* Performance Statistics */
5229        prssdp->varies[1] = 0x01;       /* Perf Statistics for the Subsystem */
5230
5231        ccw = cqr->cpaddr;
5232        ccw->cmd_code = DASD_ECKD_CCW_PSF;
5233        ccw->count = sizeof(struct dasd_psf_prssd_data);
5234        ccw->flags |= CCW_FLAG_CC;
5235        ccw->cda = (__u32)(addr_t) prssdp;
5236
5237        /* Read Subsystem Data - Performance Statistics */
5238        stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5239        memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5240
5241        ccw++;
5242        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5243        ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5244        ccw->cda = (__u32)(addr_t) stats;
5245
5246        cqr->buildclk = get_tod_clock();
5247        cqr->status = DASD_CQR_FILLED;
5248        rc = dasd_sleep_on(cqr);
5249        if (rc == 0) {
5250                prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5251                stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5252                if (copy_to_user(argp, stats,
5253                                 sizeof(struct dasd_rssd_perf_stats_t)))
5254                        rc = -EFAULT;
5255        }
5256        dasd_sfree_request(cqr, cqr->memdev);
5257        return rc;
5258}
5259
5260/*
5261 * Get attributes (cache operations)
5262 * Returnes the cache attributes used in Define Extend (DE).
5263 */
5264static int
5265dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5266{
5267        struct dasd_eckd_private *private = device->private;
5268        struct attrib_data_t attrib = private->attrib;
5269        int rc;
5270
5271        if (!capable(CAP_SYS_ADMIN))
5272                return -EACCES;
5273        if (!argp)
5274                return -EINVAL;
5275
5276        rc = 0;
5277        if (copy_to_user(argp, (long *) &attrib,
5278                         sizeof(struct attrib_data_t)))
5279                rc = -EFAULT;
5280
5281        return rc;
5282}
5283
5284/*
5285 * Set attributes (cache operations)
5286 * Stores the attributes for cache operation to be used in Define Extend (DE).
5287 */
5288static int
5289dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5290{
5291        struct dasd_eckd_private *private = device->private;
5292        struct attrib_data_t attrib;
5293
5294        if (!capable(CAP_SYS_ADMIN))
5295                return -EACCES;
5296        if (!argp)
5297                return -EINVAL;
5298
5299        if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5300                return -EFAULT;
5301        private->attrib = attrib;
5302
5303        dev_info(&device->cdev->dev,
5304                 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5305                 private->attrib.operation, private->attrib.nr_cyl);
5306        return 0;
5307}
5308
5309/*
5310 * Issue syscall I/O to EMC Symmetrix array.
5311 * CCWs are PSF and RSSD
5312 */
5313static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5314{
5315        struct dasd_symmio_parms usrparm;
5316        char *psf_data, *rssd_result;
5317        struct dasd_ccw_req *cqr;
5318        struct ccw1 *ccw;
5319        char psf0, psf1;
5320        int rc;
5321
5322        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5323                return -EACCES;
5324        psf0 = psf1 = 0;
5325
5326        /* Copy parms from caller */
5327        rc = -EFAULT;
5328        if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5329                goto out;
5330        if (is_compat_task()) {
5331                /* Make sure pointers are sane even on 31 bit. */
5332                rc = -EINVAL;
5333                if ((usrparm.psf_data >> 32) != 0)
5334                        goto out;
5335                if ((usrparm.rssd_result >> 32) != 0)
5336                        goto out;
5337                usrparm.psf_data &= 0x7fffffffULL;
5338                usrparm.rssd_result &= 0x7fffffffULL;
5339        }
5340        /* alloc I/O data area */
5341        psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5342        rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5343        if (!psf_data || !rssd_result) {
5344                rc = -ENOMEM;
5345                goto out_free;
5346        }
5347
5348        /* get syscall header from user space */
5349        rc = -EFAULT;
5350        if (copy_from_user(psf_data,
5351                           (void __user *)(unsigned long) usrparm.psf_data,
5352                           usrparm.psf_data_len))
5353                goto out_free;
5354        psf0 = psf_data[0];
5355        psf1 = psf_data[1];
5356
5357        /* setup CCWs for PSF + RSSD */
5358        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5359        if (IS_ERR(cqr)) {
5360                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5361                        "Could not allocate initialization request");
5362                rc = PTR_ERR(cqr);
5363                goto out_free;
5364        }
5365
5366        cqr->startdev = device;
5367        cqr->memdev = device;
5368        cqr->retries = 3;
5369        cqr->expires = 10 * HZ;
5370        cqr->buildclk = get_tod_clock();
5371        cqr->status = DASD_CQR_FILLED;
5372
5373        /* Build the ccws */
5374        ccw = cqr->cpaddr;
5375
5376        /* PSF ccw */
5377        ccw->cmd_code = DASD_ECKD_CCW_PSF;
5378        ccw->count = usrparm.psf_data_len;
5379        ccw->flags |= CCW_FLAG_CC;
5380        ccw->cda = (__u32)(addr_t) psf_data;
5381
5382        ccw++;
5383
5384        /* RSSD ccw  */
5385        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5386        ccw->count = usrparm.rssd_result_len;
5387        ccw->flags = CCW_FLAG_SLI ;
5388        ccw->cda = (__u32)(addr_t) rssd_result;
5389
5390        rc = dasd_sleep_on(cqr);
5391        if (rc)
5392                goto out_sfree;
5393
5394        rc = -EFAULT;
5395        if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5396                           rssd_result, usrparm.rssd_result_len))
5397                goto out_sfree;
5398        rc = 0;
5399
5400out_sfree:
5401        dasd_sfree_request(cqr, cqr->memdev);
5402out_free:
5403        kfree(rssd_result);
5404        kfree(psf_data);
5405out:
5406        DBF_DEV_EVENT(DBF_WARNING, device,
5407                      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5408                      (int) psf0, (int) psf1, rc);
5409        return rc;
5410}
5411
5412static int
5413dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5414{
5415        struct dasd_device *device = block->base;
5416
5417        switch (cmd) {
5418        case BIODASDGATTR:
5419                return dasd_eckd_get_attrib(device, argp);
5420        case BIODASDSATTR:
5421                return dasd_eckd_set_attrib(device, argp);
5422        case BIODASDPSRD:
5423                return dasd_eckd_performance(device, argp);
5424        case BIODASDRLSE:
5425                return dasd_eckd_release(device);
5426        case BIODASDRSRV:
5427                return dasd_eckd_reserve(device);
5428        case BIODASDSLCK:
5429                return dasd_eckd_steal_lock(device);
5430        case BIODASDSNID:
5431                return dasd_eckd_snid(device, argp);
5432        case BIODASDSYMMIO:
5433                return dasd_symm_io(device, argp);
5434        default:
5435                return -ENOTTY;
5436        }
5437}
5438
5439/*
5440 * Dump the range of CCWs into 'page' buffer
5441 * and return number of printed chars.
5442 */
5443static int
5444dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5445{
5446        int len, count;
5447        char *datap;
5448
5449        len = 0;
5450        while (from <= to) {
5451                len += sprintf(page + len, PRINTK_HEADER
5452                               " CCW %p: %08X %08X DAT:",
5453                               from, ((int *) from)[0], ((int *) from)[1]);
5454
5455                /* get pointer to data (consider IDALs) */
5456                if (from->flags & CCW_FLAG_IDA)
5457                        datap = (char *) *((addr_t *) (addr_t) from->cda);
5458                else
5459                        datap = (char *) ((addr_t) from->cda);
5460
5461                /* dump data (max 32 bytes) */
5462                for (count = 0; count < from->count && count < 32; count++) {
5463                        if (count % 8 == 0) len += sprintf(page + len, " ");
5464                        if (count % 4 == 0) len += sprintf(page + len, " ");
5465                        len += sprintf(page + len, "%02x", datap[count]);
5466                }
5467                len += sprintf(page + len, "\n");
5468                from++;
5469        }
5470        return len;
5471}
5472
5473static void
5474dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5475                         char *reason)
5476{
5477        u64 *sense;
5478        u64 *stat;
5479
5480        sense = (u64 *) dasd_get_sense(irb);
5481        stat = (u64 *) &irb->scsw;
5482        if (sense) {
5483                DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5484                              "%016llx %016llx %016llx %016llx",
5485                              reason, *stat, *((u32 *) (stat + 1)),
5486                              sense[0], sense[1], sense[2], sense[3]);
5487        } else {
5488                DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5489                              reason, *stat, *((u32 *) (stat + 1)),
5490                              "NO VALID SENSE");
5491        }
5492}
5493
5494/*
5495 * Print sense data and related channel program.
5496 * Parts are printed because printk buffer is only 1024 bytes.
5497 */
5498static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5499                                 struct dasd_ccw_req *req, struct irb *irb)
5500{
5501        char *page;
5502        struct ccw1 *first, *last, *fail, *from, *to;
5503        int len, sl, sct;
5504
5505        page = (char *) get_zeroed_page(GFP_ATOMIC);
5506        if (page == NULL) {
5507                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5508                              "No memory to dump sense data\n");
5509                return;
5510        }
5511        /* dump the sense data */
5512        len = sprintf(page, PRINTK_HEADER
5513                      " I/O status report for device %s:\n",
5514                      dev_name(&device->cdev->dev));
5515        len += sprintf(page + len, PRINTK_HEADER
5516                       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5517                       "CS:%02X RC:%d\n",
5518                       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5519                       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5520                       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5521                       req ? req->intrc : 0);
5522        len += sprintf(page + len, PRINTK_HEADER
5523                       " device %s: Failing CCW: %p\n",
5524                       dev_name(&device->cdev->dev),
5525                       (void *) (addr_t) irb->scsw.cmd.cpa);
5526        if (irb->esw.esw0.erw.cons) {
5527                for (sl = 0; sl < 4; sl++) {
5528                        len += sprintf(page + len, PRINTK_HEADER
5529                                       " Sense(hex) %2d-%2d:",
5530                                       (8 * sl), ((8 * sl) + 7));
5531
5532                        for (sct = 0; sct < 8; sct++) {
5533                                len += sprintf(page + len, " %02x",
5534                                               irb->ecw[8 * sl + sct]);
5535                        }
5536                        len += sprintf(page + len, "\n");
5537                }
5538
5539                if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5540                        /* 24 Byte Sense Data */
5541                        sprintf(page + len, PRINTK_HEADER
5542                                " 24 Byte: %x MSG %x, "
5543                                "%s MSGb to SYSOP\n",
5544                                irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5545                                irb->ecw[1] & 0x10 ? "" : "no");
5546                } else {
5547                        /* 32 Byte Sense Data */
5548                        sprintf(page + len, PRINTK_HEADER
5549                                " 32 Byte: Format: %x "
5550                                "Exception class %x\n",
5551                                irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5552                }
5553        } else {
5554                sprintf(page + len, PRINTK_HEADER
5555                        " SORRY - NO VALID SENSE AVAILABLE\n");
5556        }
5557        printk(KERN_ERR "%s", page);
5558
5559        if (req) {
5560                /* req == NULL for unsolicited interrupts */
5561                /* dump the Channel Program (max 140 Bytes per line) */
5562                /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5563                first = req->cpaddr;
5564                for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5565                to = min(first + 6, last);
5566                len = sprintf(page, PRINTK_HEADER
5567                              " Related CP in req: %p\n", req);
5568                dasd_eckd_dump_ccw_range(first, to, page + len);
5569                printk(KERN_ERR "%s", page);
5570
5571                /* print failing CCW area (maximum 4) */
5572                /* scsw->cda is either valid or zero  */
5573                len = 0;
5574                from = ++to;
5575                fail = (struct ccw1 *)(addr_t)
5576                                irb->scsw.cmd.cpa; /* failing CCW */
5577                if (from <  fail - 2) {
5578                        from = fail - 2;     /* there is a gap - print header */
5579                        len += sprintf(page, PRINTK_HEADER "......\n");
5580                }
5581                to = min(fail + 1, last);
5582                len += dasd_eckd_dump_ccw_range(from, to, page + len);
5583
5584                /* print last CCWs (maximum 2) */
5585                from = max(from, ++to);
5586                if (from < last - 1) {
5587                        from = last - 1;     /* there is a gap - print header */
5588                        len += sprintf(page + len, PRINTK_HEADER "......\n");
5589                }
5590                len += dasd_eckd_dump_ccw_range(from, last, page + len);
5591                if (len > 0)
5592                        printk(KERN_ERR "%s", page);
5593        }
5594        free_page((unsigned long) page);
5595}
5596
5597
5598/*
5599 * Print sense data from a tcw.
5600 */
5601static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5602                                 struct dasd_ccw_req *req, struct irb *irb)
5603{
5604        char *page;
5605        int len, sl, sct, residual;
5606        struct tsb *tsb;
5607        u8 *sense, *rcq;
5608
5609        page = (char *) get_zeroed_page(GFP_ATOMIC);
5610        if (page == NULL) {
5611                DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5612                            "No memory to dump sense data");
5613                return;
5614        }
5615        /* dump the sense data */
5616        len = sprintf(page, PRINTK_HEADER
5617                      " I/O status report for device %s:\n",
5618                      dev_name(&device->cdev->dev));
5619        len += sprintf(page + len, PRINTK_HEADER
5620                       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5621                       "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5622                       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5623                       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5624                       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5625                       irb->scsw.tm.fcxs,
5626                       (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5627                       req ? req->intrc : 0);
5628        len += sprintf(page + len, PRINTK_HEADER
5629                       " device %s: Failing TCW: %p\n",
5630                       dev_name(&device->cdev->dev),
5631                       (void *) (addr_t) irb->scsw.tm.tcw);
5632
5633        tsb = NULL;
5634        sense = NULL;
5635        if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5636                tsb = tcw_get_tsb(
5637                        (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5638
5639        if (tsb) {
5640                len += sprintf(page + len, PRINTK_HEADER
5641                               " tsb->length %d\n", tsb->length);
5642                len += sprintf(page + len, PRINTK_HEADER
5643                               " tsb->flags %x\n", tsb->flags);
5644                len += sprintf(page + len, PRINTK_HEADER
5645                               " tsb->dcw_offset %d\n", tsb->dcw_offset);
5646                len += sprintf(page + len, PRINTK_HEADER
5647                               " tsb->count %d\n", tsb->count);
5648                residual = tsb->count - 28;
5649                len += sprintf(page + len, PRINTK_HEADER
5650                               " residual %d\n", residual);
5651
5652                switch (tsb->flags & 0x07) {
5653                case 1: /* tsa_iostat */
5654                        len += sprintf(page + len, PRINTK_HEADER
5655                               " tsb->tsa.iostat.dev_time %d\n",
5656                                       tsb->tsa.iostat.dev_time);
5657                        len += sprintf(page + len, PRINTK_HEADER
5658                               " tsb->tsa.iostat.def_time %d\n",
5659                                       tsb->tsa.iostat.def_time);
5660                        len += sprintf(page + len, PRINTK_HEADER
5661                               " tsb->tsa.iostat.queue_time %d\n",
5662                                       tsb->tsa.iostat.queue_time);
5663                        len += sprintf(page + len, PRINTK_HEADER
5664                               " tsb->tsa.iostat.dev_busy_time %d\n",
5665                                       tsb->tsa.iostat.dev_busy_time);
5666                        len += sprintf(page + len, PRINTK_HEADER
5667                               " tsb->tsa.iostat.dev_act_time %d\n",
5668                                       tsb->tsa.iostat.dev_act_time);
5669                        sense = tsb->tsa.iostat.sense;
5670                        break;
5671                case 2: /* ts_ddpc */
5672                        len += sprintf(page + len, PRINTK_HEADER
5673                               " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5674                        for (sl = 0; sl < 2; sl++) {
5675                                len += sprintf(page + len, PRINTK_HEADER
5676                                               " tsb->tsa.ddpc.rcq %2d-%2d: ",
5677                                               (8 * sl), ((8 * sl) + 7));
5678                                rcq = tsb->tsa.ddpc.rcq;
5679                                for (sct = 0; sct < 8; sct++) {
5680                                        len += sprintf(page + len, " %02x",
5681                                                       rcq[8 * sl + sct]);
5682                                }
5683                                len += sprintf(page + len, "\n");
5684                        }
5685                        sense = tsb->tsa.ddpc.sense;
5686                        break;
5687                case 3: /* tsa_intrg */
5688                        len += sprintf(page + len, PRINTK_HEADER
5689                                      " tsb->tsa.intrg.: not supported yet\n");
5690                        break;
5691                }
5692
5693                if (sense) {
5694                        for (sl = 0; sl < 4; sl++) {
5695                                len += sprintf(page + len, PRINTK_HEADER
5696                                               " Sense(hex) %2d-%2d:",
5697                                               (8 * sl), ((8 * sl) + 7));
5698                                for (sct = 0; sct < 8; sct++) {
5699                                        len += sprintf(page + len, " %02x",
5700                                                       sense[8 * sl + sct]);
5701                                }
5702                                len += sprintf(page + len, "\n");
5703                        }
5704
5705                        if (sense[27] & DASD_SENSE_BIT_0) {
5706                                /* 24 Byte Sense Data */
5707                                sprintf(page + len, PRINTK_HEADER
5708                                        " 24 Byte: %x MSG %x, "
5709                                        "%s MSGb to SYSOP\n",
5710                                        sense[7] >> 4, sense[7] & 0x0f,
5711                                        sense[1] & 0x10 ? "" : "no");
5712                        } else {
5713                                /* 32 Byte Sense Data */
5714                                sprintf(page + len, PRINTK_HEADER
5715                                        " 32 Byte: Format: %x "
5716                                        "Exception class %x\n",
5717                                        sense[6] & 0x0f, sense[22] >> 4);
5718                        }
5719                } else {
5720                        sprintf(page + len, PRINTK_HEADER
5721                                " SORRY - NO VALID SENSE AVAILABLE\n");
5722                }
5723        } else {
5724                sprintf(page + len, PRINTK_HEADER
5725                        " SORRY - NO TSB DATA AVAILABLE\n");
5726        }
5727        printk(KERN_ERR "%s", page);
5728        free_page((unsigned long) page);
5729}
5730
5731static void dasd_eckd_dump_sense(struct dasd_device *device,
5732                                 struct dasd_ccw_req *req, struct irb *irb)
5733{
5734        u8 *sense = dasd_get_sense(irb);
5735
5736        if (scsw_is_tm(&irb->scsw)) {
5737                /*
5738                 * In some cases the 'File Protected' or 'Incorrect Length'
5739                 * error might be expected and log messages shouldn't be written
5740                 * then. Check if the according suppress bit is set.
5741                 */
5742                if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5743                    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5744                        return;
5745                if (scsw_cstat(&irb->scsw) == 0x40 &&
5746                    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5747                        return;
5748
5749                dasd_eckd_dump_sense_tcw(device, req, irb);
5750        } else {
5751                /*
5752                 * In some cases the 'Command Reject' or 'No Record Found'
5753                 * error might be expected and log messages shouldn't be
5754                 * written then. Check if the according suppress bit is set.
5755                 */
5756                if (sense && sense[0] & SNS0_CMD_REJECT &&
5757                    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5758                        return;
5759
5760                if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5761                    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5762                        return;
5763
5764                dasd_eckd_dump_sense_ccw(device, req, irb);
5765        }
5766}
5767
5768static int dasd_eckd_pm_freeze(struct dasd_device *device)
5769{
5770        /*
5771         * the device should be disconnected from our LCU structure
5772         * on restore we will reconnect it and reread LCU specific
5773         * information like PAV support that might have changed
5774         */
5775        dasd_alias_remove_device(device);
5776        dasd_alias_disconnect_device_from_lcu(device);
5777
5778        return 0;
5779}
5780
5781static int dasd_eckd_restore_device(struct dasd_device *device)
5782{
5783        struct dasd_eckd_private *private = device->private;
5784        struct dasd_eckd_characteristics temp_rdc_data;
5785        int rc;
5786        struct dasd_uid temp_uid;
5787        unsigned long flags;
5788        unsigned long cqr_flags = 0;
5789
5790        /* Read Configuration Data */
5791        rc = dasd_eckd_read_conf(device);
5792        if (rc) {
5793                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5794                                "Read configuration data failed, rc=%d", rc);
5795                goto out_err;
5796        }
5797
5798        dasd_eckd_get_uid(device, &temp_uid);
5799        /* Generate device unique id */
5800        rc = dasd_eckd_generate_uid(device);
5801        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5802        if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
5803                dev_err(&device->cdev->dev, "The UID of the DASD has "
5804                        "changed\n");
5805        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5806        if (rc)
5807                goto out_err;
5808
5809        /* register lcu with alias handling, enable PAV if this is a new lcu */
5810        rc = dasd_alias_make_device_known_to_lcu(device);
5811        if (rc)
5812                goto out_err;
5813
5814        set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
5815        dasd_eckd_validate_server(device, cqr_flags);
5816
5817        /* RE-Read Configuration Data */
5818        rc = dasd_eckd_read_conf(device);
5819        if (rc) {
5820                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5821                        "Read configuration data failed, rc=%d", rc);
5822                goto out_err2;
5823        }
5824
5825        /* Read Feature Codes */
5826        dasd_eckd_read_features(device);
5827
5828        /* Read Volume Information */
5829        dasd_eckd_read_vol_info(device);
5830
5831        /* Read Extent Pool Information */
5832        dasd_eckd_read_ext_pool_info(device);
5833
5834        /* Read Device Characteristics */
5835        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
5836                                         &temp_rdc_data, 64);
5837        if (rc) {
5838                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5839                                "Read device characteristic failed, rc=%d", rc);
5840                goto out_err2;
5841        }
5842        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5843        memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
5844        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5845
5846        /* add device to alias management */
5847        dasd_alias_add_device(device);
5848
5849        return 0;
5850
5851out_err2:
5852        dasd_alias_disconnect_device_from_lcu(device);
5853out_err:
5854        return -1;
5855}
5856
5857static int dasd_eckd_reload_device(struct dasd_device *device)
5858{
5859        struct dasd_eckd_private *private = device->private;
5860        int rc, old_base;
5861        char print_uid[60];
5862        struct dasd_uid uid;
5863        unsigned long flags;
5864
5865        /*
5866         * remove device from alias handling to prevent new requests
5867         * from being scheduled on the wrong alias device
5868         */
5869        dasd_alias_remove_device(device);
5870
5871        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5872        old_base = private->uid.base_unit_addr;
5873        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5874
5875        /* Read Configuration Data */
5876        rc = dasd_eckd_read_conf(device);
5877        if (rc)
5878                goto out_err;
5879
5880        rc = dasd_eckd_generate_uid(device);
5881        if (rc)
5882                goto out_err;
5883        /*
5884         * update unit address configuration and
5885         * add device to alias management
5886         */
5887        dasd_alias_update_add_device(device);
5888
5889        dasd_eckd_get_uid(device, &uid);
5890
5891        if (old_base != uid.base_unit_addr) {
5892                if (strlen(uid.vduit) > 0)
5893                        snprintf(print_uid, sizeof(print_uid),
5894                                 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5895                                 uid.ssid, uid.base_unit_addr, uid.vduit);
5896                else
5897                        snprintf(print_uid, sizeof(print_uid),
5898                                 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5899                                 uid.ssid, uid.base_unit_addr);
5900
5901                dev_info(&device->cdev->dev,
5902                         "An Alias device was reassigned to a new base device "
5903                         "with UID: %s\n", print_uid);
5904        }
5905        return 0;
5906
5907out_err:
5908        return -1;
5909}
5910
5911static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5912                                         struct dasd_rssd_messages *messages,
5913                                         __u8 lpum)
5914{
5915        struct dasd_rssd_messages *message_buf;
5916        struct dasd_psf_prssd_data *prssdp;
5917        struct dasd_ccw_req *cqr;
5918        struct ccw1 *ccw;
5919        int rc;
5920
5921        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5922                                   (sizeof(struct dasd_psf_prssd_data) +
5923                                    sizeof(struct dasd_rssd_messages)),
5924                                   device, NULL);
5925        if (IS_ERR(cqr)) {
5926                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5927                                "Could not allocate read message buffer request");
5928                return PTR_ERR(cqr);
5929        }
5930
5931        cqr->lpm = lpum;
5932retry:
5933        cqr->startdev = device;
5934        cqr->memdev = device;
5935        cqr->block = NULL;
5936        cqr->expires = 10 * HZ;
5937        set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5938        /* dasd_sleep_on_immediatly does not do complex error
5939         * recovery so clear erp flag and set retry counter to
5940         * do basic erp */
5941        clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5942        cqr->retries = 256;
5943
5944        /* Prepare for Read Subsystem Data */
5945        prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5946        memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5947        prssdp->order = PSF_ORDER_PRSSD;
5948        prssdp->suborder = 0x03;        /* Message Buffer */
5949        /* all other bytes of prssdp must be zero */
5950
5951        ccw = cqr->cpaddr;
5952        ccw->cmd_code = DASD_ECKD_CCW_PSF;
5953        ccw->count = sizeof(struct dasd_psf_prssd_data);
5954        ccw->flags |= CCW_FLAG_CC;
5955        ccw->flags |= CCW_FLAG_SLI;
5956        ccw->cda = (__u32)(addr_t) prssdp;
5957
5958        /* Read Subsystem Data - message buffer */
5959        message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5960        memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5961
5962        ccw++;
5963        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5964        ccw->count = sizeof(struct dasd_rssd_messages);
5965        ccw->flags |= CCW_FLAG_SLI;
5966        ccw->cda = (__u32)(addr_t) message_buf;
5967
5968        cqr->buildclk = get_tod_clock();
5969        cqr->status = DASD_CQR_FILLED;
5970        rc = dasd_sleep_on_immediatly(cqr);
5971        if (rc == 0) {
5972                prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5973                message_buf = (struct dasd_rssd_messages *)
5974                        (prssdp + 1);
5975                memcpy(messages, message_buf,
5976                       sizeof(struct dasd_rssd_messages));
5977        } else if (cqr->lpm) {
5978                /*
5979                 * on z/VM we might not be able to do I/O on the requested path
5980                 * but instead we get the required information on any path
5981                 * so retry with open path mask
5982                 */
5983                cqr->lpm = 0;
5984                goto retry;
5985        } else
5986                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5987                                "Reading messages failed with rc=%d\n"
5988                                , rc);
5989        dasd_sfree_request(cqr, cqr->memdev);
5990        return rc;
5991}
5992
5993static int dasd_eckd_query_host_access(struct dasd_device *device,
5994                                       struct dasd_psf_query_host_access *data)
5995{
5996        struct dasd_eckd_private *private = device->private;
5997        struct dasd_psf_query_host_access *host_access;
5998        struct dasd_psf_prssd_data *prssdp;
5999        struct dasd_ccw_req *cqr;
6000        struct ccw1 *ccw;
6001        int rc;
6002
6003        /* not available for HYPER PAV alias devices */
6004        if (!device->block && private->lcu->pav == HYPER_PAV)
6005                return -EOPNOTSUPP;
6006
6007        /* may not be supported by the storage server */
6008        if (!(private->features.feature[14] & 0x80))
6009                return -EOPNOTSUPP;
6010
6011        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6012                                   sizeof(struct dasd_psf_prssd_data) + 1,
6013                                   device, NULL);
6014        if (IS_ERR(cqr)) {
6015                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6016                                "Could not allocate read message buffer request");
6017                return PTR_ERR(cqr);
6018        }
6019        host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
6020        if (!host_access) {
6021                dasd_sfree_request(cqr, device);
6022                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6023                                "Could not allocate host_access buffer");
6024                return -ENOMEM;
6025        }
6026        cqr->startdev = device;
6027        cqr->memdev = device;
6028        cqr->block = NULL;
6029        cqr->retries = 256;
6030        cqr->expires = 10 * HZ;
6031
6032        /* Prepare for Read Subsystem Data */
6033        prssdp = (struct dasd_psf_prssd_data *) cqr->data;
6034        memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
6035        prssdp->order = PSF_ORDER_PRSSD;
6036        prssdp->suborder = PSF_SUBORDER_QHA;    /* query host access */
6037        /* LSS and Volume that will be queried */
6038        prssdp->lss = private->ned->ID;
6039        prssdp->volume = private->ned->unit_addr;
6040        /* all other bytes of prssdp must be zero */
6041
6042        ccw = cqr->cpaddr;
6043        ccw->cmd_code = DASD_ECKD_CCW_PSF;
6044        ccw->count = sizeof(struct dasd_psf_prssd_data);
6045        ccw->flags |= CCW_FLAG_CC;
6046        ccw->flags |= CCW_FLAG_SLI;
6047        ccw->cda = (__u32)(addr_t) prssdp;
6048
6049        /* Read Subsystem Data - query host access */
6050        ccw++;
6051        ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6052        ccw->count = sizeof(struct dasd_psf_query_host_access);
6053        ccw->flags |= CCW_FLAG_SLI;
6054        ccw->cda = (__u32)(addr_t) host_access;
6055
6056        cqr->buildclk = get_tod_clock();
6057        cqr->status = DASD_CQR_FILLED;
6058        /* the command might not be supported, suppress error message */
6059        __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6060        rc = dasd_sleep_on_interruptible(cqr);
6061        if (rc == 0) {
6062                *data = *host_access;
6063        } else {
6064                DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6065                                "Reading host access data failed with rc=%d\n",
6066                                rc);
6067                rc = -EOPNOTSUPP;
6068        }
6069
6070        dasd_sfree_request(cqr, cqr->memdev);
6071        kfree(host_access);
6072        return rc;
6073}
6074/*
6075 * return number of grouped devices
6076 */
6077static int dasd_eckd_host_access_count(struct dasd_device *device)
6078{
6079        struct dasd_psf_query_host_access *access;
6080        struct dasd_ckd_path_group_entry *entry;
6081        struct dasd_ckd_host_information *info;
6082        int count = 0;
6083        int rc, i;
6084
6085        access = kzalloc(sizeof(*access), GFP_NOIO);
6086        if (!access) {
6087                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6088                                "Could not allocate access buffer");
6089                return -ENOMEM;
6090        }
6091        rc = dasd_eckd_query_host_access(device, access);
6092        if (rc) {
6093                kfree(access);
6094                return rc;
6095        }
6096
6097        info = (struct dasd_ckd_host_information *)
6098                access->host_access_information;
6099        for (i = 0; i < info->entry_count; i++) {
6100                entry = (struct dasd_ckd_path_group_entry *)
6101                        (info->entry + i * info->entry_size);
6102                if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6103                        count++;
6104        }
6105
6106        kfree(access);
6107        return count;
6108}
6109
6110/*
6111 * write host access information to a sequential file
6112 */
6113static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6114{
6115        struct dasd_psf_query_host_access *access;
6116        struct dasd_ckd_path_group_entry *entry;
6117        struct dasd_ckd_host_information *info;
6118        char sysplex[9] = "";
6119        int rc, i;
6120
6121        access = kzalloc(sizeof(*access), GFP_NOIO);
6122        if (!access) {
6123                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6124                                "Could not allocate access buffer");
6125                return -ENOMEM;
6126        }
6127        rc = dasd_eckd_query_host_access(device, access);
6128        if (rc) {
6129                kfree(access);
6130                return rc;
6131        }
6132
6133        info = (struct dasd_ckd_host_information *)
6134                access->host_access_information;
6135        for (i = 0; i < info->entry_count; i++) {
6136                entry = (struct dasd_ckd_path_group_entry *)
6137                        (info->entry + i * info->entry_size);
6138                /* PGID */
6139                seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6140                /* FLAGS */
6141                seq_printf(m, "status_flags %02x\n", entry->status_flags);
6142                /* SYSPLEX NAME */
6143                memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6144                EBCASC(sysplex, sizeof(sysplex));
6145                seq_printf(m, "sysplex_name %8s\n", sysplex);
6146                /* SUPPORTED CYLINDER */
6147                seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6148                /* TIMESTAMP */
6149                seq_printf(m, "timestamp %lu\n", (unsigned long)
6150                           entry->timestamp);
6151        }
6152        kfree(access);
6153
6154        return 0;
6155}
6156
6157/*
6158 * Perform Subsystem Function - CUIR response
6159 */
6160static int
6161dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6162                            __u32 message_id, __u8 lpum)
6163{
6164        struct dasd_psf_cuir_response *psf_cuir;
6165        int pos = pathmask_to_pos(lpum);
6166        struct dasd_ccw_req *cqr;
6167        struct ccw1 *ccw;
6168        int rc;
6169
6170        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6171                                   sizeof(struct dasd_psf_cuir_response),
6172                                   device, NULL);
6173
6174        if (IS_ERR(cqr)) {
6175                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6176                           "Could not allocate PSF-CUIR request");
6177                return PTR_ERR(cqr);
6178        }
6179
6180        psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6181        psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6182        psf_cuir->cc = response;
6183        psf_cuir->chpid = device->path[pos].chpid;
6184        psf_cuir->message_id = message_id;
6185        psf_cuir->cssid = device->path[pos].cssid;
6186        psf_cuir->ssid = device->path[pos].ssid;
6187        ccw = cqr->cpaddr;
6188        ccw->cmd_code = DASD_ECKD_CCW_PSF;
6189        ccw->cda = (__u32)(addr_t)psf_cuir;
6190        ccw->flags = CCW_FLAG_SLI;
6191        ccw->count = sizeof(struct dasd_psf_cuir_response);
6192
6193        cqr->startdev = device;
6194        cqr->memdev = device;
6195        cqr->block = NULL;
6196        cqr->retries = 256;
6197        cqr->expires = 10*HZ;
6198        cqr->buildclk = get_tod_clock();
6199        cqr->status = DASD_CQR_FILLED;
6200        set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6201
6202        rc = dasd_sleep_on(cqr);
6203
6204        dasd_sfree_request(cqr, cqr->memdev);
6205        return rc;
6206}
6207
6208/*
6209 * return configuration data that is referenced by record selector
6210 * if a record selector is specified or per default return the
6211 * conf_data pointer for the path specified by lpum
6212 */
6213static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6214                                                     __u8 lpum,
6215                                                     struct dasd_cuir_message *cuir)
6216{
6217        struct dasd_conf_data *conf_data;
6218        int path, pos;
6219
6220        if (cuir->record_selector == 0)
6221                goto out;
6222        for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6223                conf_data = device->path[pos].conf_data;
6224                if (conf_data->gneq.record_selector ==
6225                    cuir->record_selector)
6226                        return conf_data;
6227        }
6228out:
6229        return device->path[pathmask_to_pos(lpum)].conf_data;
6230}
6231
6232/*
6233 * This function determines the scope of a reconfiguration request by
6234 * analysing the path and device selection data provided in the CUIR request.
6235 * Returns a path mask containing CUIR affected paths for the give device.
6236 *
6237 * If the CUIR request does not contain the required information return the
6238 * path mask of the path the attention message for the CUIR request was reveived
6239 * on.
6240 */
6241static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6242                                struct dasd_cuir_message *cuir)
6243{
6244        struct dasd_conf_data *ref_conf_data;
6245        unsigned long bitmask = 0, mask = 0;
6246        struct dasd_conf_data *conf_data;
6247        unsigned int pos, path;
6248        char *ref_gneq, *gneq;
6249        char *ref_ned, *ned;
6250        int tbcpm = 0;
6251
6252        /* if CUIR request does not specify the scope use the path
6253           the attention message was presented on */
6254        if (!cuir->ned_map ||
6255            !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6256                return lpum;
6257
6258        /* get reference conf data */
6259        ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6260        /* reference ned is determined by ned_map field */
6261        pos = 8 - ffs(cuir->ned_map);
6262        ref_ned = (char *)&ref_conf_data->neds[pos];
6263        ref_gneq = (char *)&ref_conf_data->gneq;
6264        /* transfer 24 bit neq_map to mask */
6265        mask = cuir->neq_map[2];
6266        mask |= cuir->neq_map[1] << 8;
6267        mask |= cuir->neq_map[0] << 16;
6268
6269        for (path = 0; path < 8; path++) {
6270                /* initialise data per path */
6271                bitmask = mask;
6272                conf_data = device->path[path].conf_data;
6273                pos = 8 - ffs(cuir->ned_map);
6274                ned = (char *) &conf_data->neds[pos];
6275                /* compare reference ned and per path ned */
6276                if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6277                        continue;
6278                gneq = (char *)&conf_data->gneq;
6279                /* compare reference gneq and per_path gneq under
6280                   24 bit mask where mask bit 0 equals byte 7 of
6281                   the gneq and mask bit 24 equals byte 31 */
6282                while (bitmask) {
6283                        pos = ffs(bitmask) - 1;
6284                        if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6285                            != 0)
6286                                break;
6287                        clear_bit(pos, &bitmask);
6288                }
6289                if (bitmask)
6290                        continue;
6291                /* device and path match the reference values
6292                   add path to CUIR scope */
6293                tbcpm |= 0x80 >> path;
6294        }
6295        return tbcpm;
6296}
6297
6298static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6299                                       unsigned long paths, int action)
6300{
6301        int pos;
6302
6303        while (paths) {
6304                /* get position of bit in mask */
6305                pos = 8 - ffs(paths);
6306                /* get channel path descriptor from this position */
6307                if (action == CUIR_QUIESCE)
6308                        pr_warn("Service on the storage server caused path %x.%02x to go offline",
6309                                device->path[pos].cssid,
6310                                device->path[pos].chpid);
6311                else if (action == CUIR_RESUME)
6312                        pr_info("Path %x.%02x is back online after service on the storage server",
6313                                device->path[pos].cssid,
6314                                device->path[pos].chpid);
6315                clear_bit(7 - pos, &paths);
6316        }
6317}
6318
6319static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6320                                      struct dasd_cuir_message *cuir)
6321{
6322        unsigned long tbcpm;
6323
6324        tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6325        /* nothing to do if path is not in use */
6326        if (!(dasd_path_get_opm(device) & tbcpm))
6327                return 0;
6328        if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6329                /* no path would be left if the CUIR action is taken
6330                   return error */
6331                return -EINVAL;
6332        }
6333        /* remove device from operational path mask */
6334        dasd_path_remove_opm(device, tbcpm);
6335        dasd_path_add_cuirpm(device, tbcpm);
6336        return tbcpm;
6337}
6338
6339/*
6340 * walk through all devices and build a path mask to quiesce them
6341 * return an error if the last path to a device would be removed
6342 *
6343 * if only part of the devices are quiesced and an error
6344 * occurs no onlining necessary, the storage server will
6345 * notify the already set offline devices again
6346 */
6347static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6348                                  struct dasd_cuir_message *cuir)
6349{
6350        struct dasd_eckd_private *private = device->private;
6351        struct alias_pav_group *pavgroup, *tempgroup;
6352        struct dasd_device *dev, *n;
6353        unsigned long paths = 0;
6354        unsigned long flags;
6355        int tbcpm;
6356
6357        /* active devices */
6358        list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6359                                 alias_list) {
6360                spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6361                tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6362                spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6363                if (tbcpm < 0)
6364                        goto out_err;
6365                paths |= tbcpm;
6366        }
6367        /* inactive devices */
6368        list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6369                                 alias_list) {
6370                spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6371                tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6372                spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6373                if (tbcpm < 0)
6374                        goto out_err;
6375                paths |= tbcpm;
6376        }
6377        /* devices in PAV groups */
6378        list_for_each_entry_safe(pavgroup, tempgroup,
6379                                 &private->lcu->grouplist, group) {
6380                list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6381                                         alias_list) {
6382                        spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6383                        tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6384                        spin_unlock_irqrestore(
6385                                get_ccwdev_lock(dev->cdev), flags);
6386                        if (tbcpm < 0)
6387                                goto out_err;
6388                        paths |= tbcpm;
6389                }
6390                list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6391                                         alias_list) {
6392                        spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6393                        tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6394                        spin_unlock_irqrestore(
6395                                get_ccwdev_lock(dev->cdev), flags);
6396                        if (tbcpm < 0)
6397                                goto out_err;
6398                        paths |= tbcpm;
6399                }
6400        }
6401        /* notify user about all paths affected by CUIR action */
6402        dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6403        return 0;
6404out_err:
6405        return tbcpm;
6406}
6407
6408static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6409                                 struct dasd_cuir_message *cuir)
6410{
6411        struct dasd_eckd_private *private = device->private;
6412        struct alias_pav_group *pavgroup, *tempgroup;
6413        struct dasd_device *dev, *n;
6414        unsigned long paths = 0;
6415        int tbcpm;
6416
6417        /*
6418         * the path may have been added through a generic path event before
6419         * only trigger path verification if the path is not already in use
6420         */
6421        list_for_each_entry_safe(dev, n,
6422                                 &private->lcu->active_devices,
6423                                 alias_list) {
6424                tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6425                paths |= tbcpm;
6426                if (!(dasd_path_get_opm(dev) & tbcpm)) {
6427                        dasd_path_add_tbvpm(dev, tbcpm);
6428                        dasd_schedule_device_bh(dev);
6429                }
6430        }
6431        list_for_each_entry_safe(dev, n,
6432                                 &private->lcu->inactive_devices,
6433                                 alias_list) {
6434                tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6435                paths |= tbcpm;
6436                if (!(dasd_path_get_opm(dev) & tbcpm)) {
6437                        dasd_path_add_tbvpm(dev, tbcpm);
6438                        dasd_schedule_device_bh(dev);
6439                }
6440        }
6441        /* devices in PAV groups */
6442        list_for_each_entry_safe(pavgroup, tempgroup,
6443                                 &private->lcu->grouplist,
6444                                 group) {
6445                list_for_each_entry_safe(dev, n,
6446                                         &pavgroup->baselist,
6447                                         alias_list) {
6448                        tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6449                        paths |= tbcpm;
6450                        if (!(dasd_path_get_opm(dev) & tbcpm)) {
6451                                dasd_path_add_tbvpm(dev, tbcpm);
6452                                dasd_schedule_device_bh(dev);
6453                        }
6454                }
6455                list_for_each_entry_safe(dev, n,
6456                                         &pavgroup->aliaslist,
6457                                         alias_list) {
6458                        tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6459                        paths |= tbcpm;
6460                        if (!(dasd_path_get_opm(dev) & tbcpm)) {
6461                                dasd_path_add_tbvpm(dev, tbcpm);
6462                                dasd_schedule_device_bh(dev);
6463                        }
6464                }
6465        }
6466        /* notify user about all paths affected by CUIR action */
6467        dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6468        return 0;
6469}
6470
6471static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6472                                 __u8 lpum)
6473{
6474        struct dasd_cuir_message *cuir = messages;
6475        int response;
6476
6477        DBF_DEV_EVENT(DBF_WARNING, device,
6478                      "CUIR request: %016llx %016llx %016llx %08x",
6479                      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6480                      ((u32 *)cuir)[3]);
6481
6482        if (cuir->code == CUIR_QUIESCE) {
6483                /* quiesce */
6484                if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6485                        response = PSF_CUIR_LAST_PATH;
6486                else
6487                        response = PSF_CUIR_COMPLETED;
6488        } else if (cuir->code == CUIR_RESUME) {
6489                /* resume */
6490                dasd_eckd_cuir_resume(device, lpum, cuir);
6491                response = PSF_CUIR_COMPLETED;
6492        } else
6493                response = PSF_CUIR_NOT_SUPPORTED;
6494
6495        dasd_eckd_psf_cuir_response(device, response,
6496                                    cuir->message_id, lpum);
6497        DBF_DEV_EVENT(DBF_WARNING, device,
6498                      "CUIR response: %d on message ID %08x", response,
6499                      cuir->message_id);
6500        /* to make sure there is no attention left schedule work again */
6501        device->discipline->check_attention(device, lpum);
6502}
6503
6504static void dasd_eckd_oos_resume(struct dasd_device *device)
6505{
6506        struct dasd_eckd_private *private = device->private;
6507        struct alias_pav_group *pavgroup, *tempgroup;
6508        struct dasd_device *dev, *n;
6509        unsigned long flags;
6510
6511        spin_lock_irqsave(&private->lcu->lock, flags);
6512        list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6513                                 alias_list) {
6514                if (dev->stopped & DASD_STOPPED_NOSPC)
6515                        dasd_generic_space_avail(dev);
6516        }
6517        list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6518                                 alias_list) {
6519                if (dev->stopped & DASD_STOPPED_NOSPC)
6520                        dasd_generic_space_avail(dev);
6521        }
6522        /* devices in PAV groups */
6523        list_for_each_entry_safe(pavgroup, tempgroup,
6524                                 &private->lcu->grouplist,
6525                                 group) {
6526                list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6527                                         alias_list) {
6528                        if (dev->stopped & DASD_STOPPED_NOSPC)
6529                                dasd_generic_space_avail(dev);
6530                }
6531                list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6532                                         alias_list) {
6533                        if (dev->stopped & DASD_STOPPED_NOSPC)
6534                                dasd_generic_space_avail(dev);
6535                }
6536        }
6537        spin_unlock_irqrestore(&private->lcu->lock, flags);
6538}
6539
6540static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6541                                 __u8 lpum)
6542{
6543        struct dasd_oos_message *oos = messages;
6544
6545        switch (oos->code) {
6546        case REPO_WARN:
6547        case POOL_WARN:
6548                dev_warn(&device->cdev->dev,
6549                         "Extent pool usage has reached a critical value\n");
6550                dasd_eckd_oos_resume(device);
6551                break;
6552        case REPO_EXHAUST:
6553        case POOL_EXHAUST:
6554                dev_warn(&device->cdev->dev,
6555                         "Extent pool is exhausted\n");
6556                break;
6557        case REPO_RELIEVE:
6558        case POOL_RELIEVE:
6559                dev_info(&device->cdev->dev,
6560                         "Extent pool physical space constraint has been relieved\n");
6561                break;
6562        }
6563
6564        /* In any case, update related data */
6565        dasd_eckd_read_ext_pool_info(device);
6566
6567        /* to make sure there is no attention left schedule work again */
6568        device->discipline->check_attention(device, lpum);
6569}
6570
6571static void dasd_eckd_check_attention_work(struct work_struct *work)
6572{
6573        struct check_attention_work_data *data;
6574        struct dasd_rssd_messages *messages;
6575        struct dasd_device *device;
6576        int rc;
6577
6578        data = container_of(work, struct check_attention_work_data, worker);
6579        device = data->device;
6580        messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6581        if (!messages) {
6582                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6583                              "Could not allocate attention message buffer");
6584                goto out;
6585        }
6586        rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6587        if (rc)
6588                goto out;
6589
6590        if (messages->length == ATTENTION_LENGTH_CUIR &&
6591            messages->format == ATTENTION_FORMAT_CUIR)
6592                dasd_eckd_handle_cuir(device, messages, data->lpum);
6593        if (messages->length == ATTENTION_LENGTH_OOS &&
6594            messages->format == ATTENTION_FORMAT_OOS)
6595                dasd_eckd_handle_oos(device, messages, data->lpum);
6596
6597out:
6598        dasd_put_device(device);
6599        kfree(messages);
6600        kfree(data);
6601}
6602
6603static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6604{
6605        struct check_attention_work_data *data;
6606
6607        data = kzalloc(sizeof(*data), GFP_ATOMIC);
6608        if (!data)
6609                return -ENOMEM;
6610        INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6611        dasd_get_device(device);
6612        data->device = device;
6613        data->lpum = lpum;
6614        schedule_work(&data->worker);
6615        return 0;
6616}
6617
6618static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6619{
6620        if (~lpum & dasd_path_get_opm(device)) {
6621                dasd_path_add_nohpfpm(device, lpum);
6622                dasd_path_remove_opm(device, lpum);
6623                dev_err(&device->cdev->dev,
6624                        "Channel path %02X lost HPF functionality and is disabled\n",
6625                        lpum);
6626                return 1;
6627        }
6628        return 0;
6629}
6630
6631static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6632{
6633        struct dasd_eckd_private *private = device->private;
6634
6635        dev_err(&device->cdev->dev,
6636                "High Performance FICON disabled\n");
6637        private->fcx_max_data = 0;
6638}
6639
6640static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6641{
6642        struct dasd_eckd_private *private = device->private;
6643
6644        return private->fcx_max_data ? 1 : 0;
6645}
6646
6647static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6648                                       struct irb *irb)
6649{
6650        struct dasd_eckd_private *private = device->private;
6651
6652        if (!private->fcx_max_data) {
6653                /* sanity check for no HPF, the error makes no sense */
6654                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6655                              "Trying to disable HPF for a non HPF device");
6656                return;
6657        }
6658        if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6659                dasd_eckd_disable_hpf_device(device);
6660        } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6661                if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6662                        return;
6663                dasd_eckd_disable_hpf_device(device);
6664                dasd_path_set_tbvpm(device,
6665                                  dasd_path_get_hpfpm(device));
6666        }
6667        /*
6668         * prevent that any new I/O ist started on the device and schedule a
6669         * requeue of existing requests
6670         */
6671        dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6672        dasd_schedule_requeue(device);
6673}
6674
6675/*
6676 * Initialize block layer request queue.
6677 */
6678static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6679{
6680        unsigned int logical_block_size = block->bp_block;
6681        struct request_queue *q = block->request_queue;
6682        struct dasd_device *device = block->base;
6683        int max;
6684
6685        if (device->features & DASD_FEATURE_USERAW) {
6686                /*
6687                 * the max_blocks value for raw_track access is 256
6688                 * it is higher than the native ECKD value because we
6689                 * only need one ccw per track
6690                 * so the max_hw_sectors are
6691                 * 2048 x 512B = 1024kB = 16 tracks
6692                 */
6693                max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6694        } else {
6695                max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6696        }
6697        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6698        q->limits.max_dev_sectors = max;
6699        blk_queue_logical_block_size(q, logical_block_size);
6700        blk_queue_max_hw_sectors(q, max);
6701        blk_queue_max_segments(q, USHRT_MAX);
6702        /* With page sized segments each segment can be translated into one idaw/tidaw */
6703        blk_queue_max_segment_size(q, PAGE_SIZE);
6704        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6705}
6706
6707static struct ccw_driver dasd_eckd_driver = {
6708        .driver = {
6709                .name   = "dasd-eckd",
6710                .owner  = THIS_MODULE,
6711        },
6712        .ids         = dasd_eckd_ids,
6713        .probe       = dasd_eckd_probe,
6714        .remove      = dasd_generic_remove,
6715        .set_offline = dasd_generic_set_offline,
6716        .set_online  = dasd_eckd_set_online,
6717        .notify      = dasd_generic_notify,
6718        .path_event  = dasd_generic_path_event,
6719        .shutdown    = dasd_generic_shutdown,
6720        .freeze      = dasd_generic_pm_freeze,
6721        .thaw        = dasd_generic_restore_device,
6722        .restore     = dasd_generic_restore_device,
6723        .uc_handler  = dasd_generic_uc_handler,
6724        .int_class   = IRQIO_DAS,
6725};
6726
6727static struct dasd_discipline dasd_eckd_discipline = {
6728        .owner = THIS_MODULE,
6729        .name = "ECKD",
6730        .ebcname = "ECKD",
6731        .check_device = dasd_eckd_check_characteristics,
6732        .uncheck_device = dasd_eckd_uncheck_device,
6733        .do_analysis = dasd_eckd_do_analysis,
6734        .pe_handler = dasd_eckd_pe_handler,
6735        .basic_to_ready = dasd_eckd_basic_to_ready,
6736        .online_to_ready = dasd_eckd_online_to_ready,
6737        .basic_to_known = dasd_eckd_basic_to_known,
6738        .setup_blk_queue = dasd_eckd_setup_blk_queue,
6739        .fill_geometry = dasd_eckd_fill_geometry,
6740        .start_IO = dasd_start_IO,
6741        .term_IO = dasd_term_IO,
6742        .handle_terminated_request = dasd_eckd_handle_terminated_request,
6743        .format_device = dasd_eckd_format_device,
6744        .check_device_format = dasd_eckd_check_device_format,
6745        .erp_action = dasd_eckd_erp_action,
6746        .erp_postaction = dasd_eckd_erp_postaction,
6747        .check_for_device_change = dasd_eckd_check_for_device_change,
6748        .build_cp = dasd_eckd_build_alias_cp,
6749        .free_cp = dasd_eckd_free_alias_cp,
6750        .dump_sense = dasd_eckd_dump_sense,
6751        .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6752        .fill_info = dasd_eckd_fill_info,
6753        .ioctl = dasd_eckd_ioctl,
6754        .freeze = dasd_eckd_pm_freeze,
6755        .restore = dasd_eckd_restore_device,
6756        .reload = dasd_eckd_reload_device,
6757        .get_uid = dasd_eckd_get_uid,
6758        .kick_validate = dasd_eckd_kick_validate_server,
6759        .check_attention = dasd_eckd_check_attention,
6760        .host_access_count = dasd_eckd_host_access_count,
6761        .hosts_print = dasd_hosts_print,
6762        .handle_hpf_error = dasd_eckd_handle_hpf_error,
6763        .disable_hpf = dasd_eckd_disable_hpf_device,
6764        .hpf_enabled = dasd_eckd_hpf_enabled,
6765        .reset_path = dasd_eckd_reset_path,
6766        .is_ese = dasd_eckd_is_ese,
6767        .space_allocated = dasd_eckd_space_allocated,
6768        .space_configured = dasd_eckd_space_configured,
6769        .logical_capacity = dasd_eckd_logical_capacity,
6770        .release_space = dasd_eckd_release_space,
6771        .ext_pool_id = dasd_eckd_ext_pool_id,
6772        .ext_size = dasd_eckd_ext_size,
6773        .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6774        .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6775        .ext_pool_oos = dasd_eckd_ext_pool_oos,
6776        .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6777        .ese_format = dasd_eckd_ese_format,
6778        .ese_read = dasd_eckd_ese_read,
6779};
6780
6781static int __init
6782dasd_eckd_init(void)
6783{
6784        int ret;
6785
6786        ASCEBC(dasd_eckd_discipline.ebcname, 4);
6787        dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6788                                   GFP_KERNEL | GFP_DMA);
6789        if (!dasd_reserve_req)
6790                return -ENOMEM;
6791        dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6792                                    GFP_KERNEL | GFP_DMA);
6793        if (!dasd_vol_info_req)
6794                return -ENOMEM;
6795        pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6796                                    GFP_KERNEL | GFP_DMA);
6797        if (!pe_handler_worker) {
6798                kfree(dasd_reserve_req);
6799                kfree(dasd_vol_info_req);
6800                return -ENOMEM;
6801        }
6802        rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6803        if (!rawpadpage) {
6804                kfree(pe_handler_worker);
6805                kfree(dasd_reserve_req);
6806                kfree(dasd_vol_info_req);
6807                return -ENOMEM;
6808        }
6809        ret = ccw_driver_register(&dasd_eckd_driver);
6810        if (!ret)
6811                wait_for_device_probe();
6812        else {
6813                kfree(pe_handler_worker);
6814                kfree(dasd_reserve_req);
6815                kfree(dasd_vol_info_req);
6816                free_page((unsigned long)rawpadpage);
6817        }
6818        return ret;
6819}
6820
6821static void __exit
6822dasd_eckd_cleanup(void)
6823{
6824        ccw_driver_unregister(&dasd_eckd_driver);
6825        kfree(pe_handler_worker);
6826        kfree(dasd_reserve_req);
6827        free_page((unsigned long)rawpadpage);
6828}
6829
6830module_init(dasd_eckd_init);
6831module_exit(dasd_eckd_cleanup);
6832