linux/drivers/s390/cio/device_pgid.c
<<
>>
Prefs
   1/*
   2 *  CCW device PGID and path verification I/O handling.
   3 *
   4 *    Copyright IBM Corp. 2002, 2009
   5 *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
   6 *               Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 *               Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/string.h>
  12#include <linux/bitops.h>
  13#include <linux/types.h>
  14#include <linux/errno.h>
  15#include <linux/slab.h>
  16#include <asm/ccwdev.h>
  17#include <asm/cio.h>
  18
  19#include "cio.h"
  20#include "cio_debug.h"
  21#include "device.h"
  22#include "io_sch.h"
  23
  24#define PGID_RETRIES    256
  25#define PGID_TIMEOUT    (10 * HZ)
  26
  27static void verify_start(struct ccw_device *cdev);
  28
  29/*
  30 * Process path verification data and report result.
  31 */
  32static void verify_done(struct ccw_device *cdev, int rc)
  33{
  34        struct subchannel *sch = to_subchannel(cdev->dev.parent);
  35        struct ccw_dev_id *id = &cdev->private->dev_id;
  36        int mpath = cdev->private->flags.mpath;
  37        int pgroup = cdev->private->flags.pgroup;
  38
  39        if (rc)
  40                goto out;
  41        /* Ensure consistent multipathing state at device and channel. */
  42        if (sch->config.mp != mpath) {
  43                sch->config.mp = mpath;
  44                rc = cio_commit_config(sch);
  45        }
  46out:
  47        CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
  48                         "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
  49                         sch->vpm);
  50        ccw_device_verify_done(cdev, rc);
  51}
  52
  53/*
  54 * Create channel program to perform a NOOP.
  55 */
  56static void nop_build_cp(struct ccw_device *cdev)
  57{
  58        struct ccw_request *req = &cdev->private->req;
  59        struct ccw1 *cp = cdev->private->iccws;
  60
  61        cp->cmd_code    = CCW_CMD_NOOP;
  62        cp->cda         = 0;
  63        cp->count       = 0;
  64        cp->flags       = CCW_FLAG_SLI;
  65        req->cp         = cp;
  66}
  67
  68/*
  69 * Perform NOOP on a single path.
  70 */
  71static void nop_do(struct ccw_device *cdev)
  72{
  73        struct subchannel *sch = to_subchannel(cdev->dev.parent);
  74        struct ccw_request *req = &cdev->private->req;
  75
  76        req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
  77                              ~cdev->private->path_noirq_mask);
  78        if (!req->lpm)
  79                goto out_nopath;
  80        nop_build_cp(cdev);
  81        ccw_request_start(cdev);
  82        return;
  83
  84out_nopath:
  85        verify_done(cdev, sch->vpm ? 0 : -EACCES);
  86}
  87
  88/*
  89 * Adjust NOOP I/O status.
  90 */
  91static enum io_status nop_filter(struct ccw_device *cdev, void *data,
  92                                 struct irb *irb, enum io_status status)
  93{
  94        /* Only subchannel status might indicate a path error. */
  95        if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
  96                return IO_DONE;
  97        return status;
  98}
  99
 100/*
 101 * Process NOOP request result for a single path.
 102 */
 103static void nop_callback(struct ccw_device *cdev, void *data, int rc)
 104{
 105        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 106        struct ccw_request *req = &cdev->private->req;
 107
 108        switch (rc) {
 109        case 0:
 110                sch->vpm |= req->lpm;
 111                break;
 112        case -ETIME:
 113                cdev->private->path_noirq_mask |= req->lpm;
 114                break;
 115        case -EACCES:
 116                cdev->private->path_notoper_mask |= req->lpm;
 117                break;
 118        default:
 119                goto err;
 120        }
 121        /* Continue on the next path. */
 122        req->lpm >>= 1;
 123        nop_do(cdev);
 124        return;
 125
 126err:
 127        verify_done(cdev, rc);
 128}
 129
 130/*
 131 * Create channel program to perform SET PGID on a single path.
 132 */
 133static void spid_build_cp(struct ccw_device *cdev, u8 fn)
 134{
 135        struct ccw_request *req = &cdev->private->req;
 136        struct ccw1 *cp = cdev->private->iccws;
 137        int i = pathmask_to_pos(req->lpm);
 138        struct pgid *pgid = &cdev->private->pgid[i];
 139
 140        pgid->inf.fc    = fn;
 141        cp->cmd_code    = CCW_CMD_SET_PGID;
 142        cp->cda         = (u32) (addr_t) pgid;
 143        cp->count       = sizeof(*pgid);
 144        cp->flags       = CCW_FLAG_SLI;
 145        req->cp         = cp;
 146}
 147
 148static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
 149{
 150        if (rc) {
 151                /* We don't know the path groups' state. Abort. */
 152                verify_done(cdev, rc);
 153                return;
 154        }
 155        /*
 156         * Path groups have been reset. Restart path verification but
 157         * leave paths in path_noirq_mask out.
 158         */
 159        cdev->private->flags.pgid_unknown = 0;
 160        verify_start(cdev);
 161}
 162
 163/*
 164 * Reset pathgroups and restart path verification, leave unusable paths out.
 165 */
 166static void pgid_wipeout_start(struct ccw_device *cdev)
 167{
 168        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 169        struct ccw_dev_id *id = &cdev->private->dev_id;
 170        struct ccw_request *req = &cdev->private->req;
 171        u8 fn;
 172
 173        CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
 174                      id->ssid, id->devno, cdev->private->pgid_valid_mask,
 175                      cdev->private->path_noirq_mask);
 176
 177        /* Initialize request data. */
 178        memset(req, 0, sizeof(*req));
 179        req->timeout    = PGID_TIMEOUT;
 180        req->maxretries = PGID_RETRIES;
 181        req->lpm        = sch->schib.pmcw.pam;
 182        req->callback   = pgid_wipeout_callback;
 183        fn = SPID_FUNC_DISBAND;
 184        if (cdev->private->flags.mpath)
 185                fn |= SPID_FUNC_MULTI_PATH;
 186        spid_build_cp(cdev, fn);
 187        ccw_request_start(cdev);
 188}
 189
 190/*
 191 * Perform establish/resign SET PGID on a single path.
 192 */
 193static void spid_do(struct ccw_device *cdev)
 194{
 195        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 196        struct ccw_request *req = &cdev->private->req;
 197        u8 fn;
 198
 199        /* Use next available path that is not already in correct state. */
 200        req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
 201        if (!req->lpm)
 202                goto out_nopath;
 203        /* Channel program setup. */
 204        if (req->lpm & sch->opm)
 205                fn = SPID_FUNC_ESTABLISH;
 206        else
 207                fn = SPID_FUNC_RESIGN;
 208        if (cdev->private->flags.mpath)
 209                fn |= SPID_FUNC_MULTI_PATH;
 210        spid_build_cp(cdev, fn);
 211        ccw_request_start(cdev);
 212        return;
 213
 214out_nopath:
 215        if (cdev->private->flags.pgid_unknown) {
 216                /* At least one SPID could be partially done. */
 217                pgid_wipeout_start(cdev);
 218                return;
 219        }
 220        verify_done(cdev, sch->vpm ? 0 : -EACCES);
 221}
 222
 223/*
 224 * Process SET PGID request result for a single path.
 225 */
 226static void spid_callback(struct ccw_device *cdev, void *data, int rc)
 227{
 228        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 229        struct ccw_request *req = &cdev->private->req;
 230
 231        switch (rc) {
 232        case 0:
 233                sch->vpm |= req->lpm & sch->opm;
 234                break;
 235        case -ETIME:
 236                cdev->private->flags.pgid_unknown = 1;
 237                cdev->private->path_noirq_mask |= req->lpm;
 238                break;
 239        case -EACCES:
 240                cdev->private->path_notoper_mask |= req->lpm;
 241                break;
 242        case -EOPNOTSUPP:
 243                if (cdev->private->flags.mpath) {
 244                        /* Try without multipathing. */
 245                        cdev->private->flags.mpath = 0;
 246                        goto out_restart;
 247                }
 248                /* Try without pathgrouping. */
 249                cdev->private->flags.pgroup = 0;
 250                goto out_restart;
 251        default:
 252                goto err;
 253        }
 254        req->lpm >>= 1;
 255        spid_do(cdev);
 256        return;
 257
 258out_restart:
 259        verify_start(cdev);
 260        return;
 261err:
 262        verify_done(cdev, rc);
 263}
 264
 265static void spid_start(struct ccw_device *cdev)
 266{
 267        struct ccw_request *req = &cdev->private->req;
 268
 269        /* Initialize request data. */
 270        memset(req, 0, sizeof(*req));
 271        req->timeout    = PGID_TIMEOUT;
 272        req->maxretries = PGID_RETRIES;
 273        req->lpm        = 0x80;
 274        req->singlepath = 1;
 275        req->callback   = spid_callback;
 276        spid_do(cdev);
 277}
 278
 279static int pgid_is_reset(struct pgid *p)
 280{
 281        char *c;
 282
 283        for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
 284                if (*c != 0)
 285                        return 0;
 286        }
 287        return 1;
 288}
 289
 290static int pgid_cmp(struct pgid *p1, struct pgid *p2)
 291{
 292        return memcmp((char *) p1 + 1, (char *) p2 + 1,
 293                      sizeof(struct pgid) - 1);
 294}
 295
 296/*
 297 * Determine pathgroup state from PGID data.
 298 */
 299static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
 300                         int *mismatch, u8 *reserved, u8 *reset)
 301{
 302        struct pgid *pgid = &cdev->private->pgid[0];
 303        struct pgid *first = NULL;
 304        int lpm;
 305        int i;
 306
 307        *mismatch = 0;
 308        *reserved = 0;
 309        *reset = 0;
 310        for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
 311                if ((cdev->private->pgid_valid_mask & lpm) == 0)
 312                        continue;
 313                if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
 314                        *reserved |= lpm;
 315                if (pgid_is_reset(pgid)) {
 316                        *reset |= lpm;
 317                        continue;
 318                }
 319                if (!first) {
 320                        first = pgid;
 321                        continue;
 322                }
 323                if (pgid_cmp(pgid, first) != 0)
 324                        *mismatch = 1;
 325        }
 326        if (!first)
 327                first = &channel_subsystems[0]->global_pgid;
 328        *p = first;
 329}
 330
 331static u8 pgid_to_donepm(struct ccw_device *cdev)
 332{
 333        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 334        struct pgid *pgid;
 335        int i;
 336        int lpm;
 337        u8 donepm = 0;
 338
 339        /* Set bits for paths which are already in the target state. */
 340        for (i = 0; i < 8; i++) {
 341                lpm = 0x80 >> i;
 342                if ((cdev->private->pgid_valid_mask & lpm) == 0)
 343                        continue;
 344                pgid = &cdev->private->pgid[i];
 345                if (sch->opm & lpm) {
 346                        if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
 347                                continue;
 348                } else {
 349                        if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
 350                                continue;
 351                }
 352                if (cdev->private->flags.mpath) {
 353                        if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
 354                                continue;
 355                } else {
 356                        if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
 357                                continue;
 358                }
 359                donepm |= lpm;
 360        }
 361
 362        return donepm;
 363}
 364
 365static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
 366{
 367        int i;
 368
 369        for (i = 0; i < 8; i++)
 370                memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
 371}
 372
 373/*
 374 * Process SENSE PGID data and report result.
 375 */
 376static void snid_done(struct ccw_device *cdev, int rc)
 377{
 378        struct ccw_dev_id *id = &cdev->private->dev_id;
 379        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 380        struct pgid *pgid;
 381        int mismatch = 0;
 382        u8 reserved = 0;
 383        u8 reset = 0;
 384        u8 donepm;
 385
 386        if (rc)
 387                goto out;
 388        pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
 389        if (reserved == cdev->private->pgid_valid_mask)
 390                rc = -EUSERS;
 391        else if (mismatch)
 392                rc = -EOPNOTSUPP;
 393        else {
 394                donepm = pgid_to_donepm(cdev);
 395                sch->vpm = donepm & sch->opm;
 396                cdev->private->pgid_reset_mask |= reset;
 397                cdev->private->pgid_todo_mask &=
 398                        ~(donepm | cdev->private->path_noirq_mask);
 399                pgid_fill(cdev, pgid);
 400        }
 401out:
 402        CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
 403                      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
 404                      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
 405                      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
 406        switch (rc) {
 407        case 0:
 408                if (cdev->private->flags.pgid_unknown) {
 409                        pgid_wipeout_start(cdev);
 410                        return;
 411                }
 412                /* Anything left to do? */
 413                if (cdev->private->pgid_todo_mask == 0) {
 414                        verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
 415                        return;
 416                }
 417                /* Perform path-grouping. */
 418                spid_start(cdev);
 419                break;
 420        case -EOPNOTSUPP:
 421                /* Path-grouping not supported. */
 422                cdev->private->flags.pgroup = 0;
 423                cdev->private->flags.mpath = 0;
 424                verify_start(cdev);
 425                break;
 426        default:
 427                verify_done(cdev, rc);
 428        }
 429}
 430
 431/*
 432 * Create channel program to perform a SENSE PGID on a single path.
 433 */
 434static void snid_build_cp(struct ccw_device *cdev)
 435{
 436        struct ccw_request *req = &cdev->private->req;
 437        struct ccw1 *cp = cdev->private->iccws;
 438        int i = pathmask_to_pos(req->lpm);
 439
 440        /* Channel program setup. */
 441        cp->cmd_code    = CCW_CMD_SENSE_PGID;
 442        cp->cda         = (u32) (addr_t) &cdev->private->pgid[i];
 443        cp->count       = sizeof(struct pgid);
 444        cp->flags       = CCW_FLAG_SLI;
 445        req->cp         = cp;
 446}
 447
 448/*
 449 * Perform SENSE PGID on a single path.
 450 */
 451static void snid_do(struct ccw_device *cdev)
 452{
 453        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 454        struct ccw_request *req = &cdev->private->req;
 455        int ret;
 456
 457        req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
 458                              ~cdev->private->path_noirq_mask);
 459        if (!req->lpm)
 460                goto out_nopath;
 461        snid_build_cp(cdev);
 462        ccw_request_start(cdev);
 463        return;
 464
 465out_nopath:
 466        if (cdev->private->pgid_valid_mask)
 467                ret = 0;
 468        else if (cdev->private->path_noirq_mask)
 469                ret = -ETIME;
 470        else
 471                ret = -EACCES;
 472        snid_done(cdev, ret);
 473}
 474
 475/*
 476 * Process SENSE PGID request result for single path.
 477 */
 478static void snid_callback(struct ccw_device *cdev, void *data, int rc)
 479{
 480        struct ccw_request *req = &cdev->private->req;
 481
 482        switch (rc) {
 483        case 0:
 484                cdev->private->pgid_valid_mask |= req->lpm;
 485                break;
 486        case -ETIME:
 487                cdev->private->flags.pgid_unknown = 1;
 488                cdev->private->path_noirq_mask |= req->lpm;
 489                break;
 490        case -EACCES:
 491                cdev->private->path_notoper_mask |= req->lpm;
 492                break;
 493        default:
 494                goto err;
 495        }
 496        /* Continue on the next path. */
 497        req->lpm >>= 1;
 498        snid_do(cdev);
 499        return;
 500
 501err:
 502        snid_done(cdev, rc);
 503}
 504
 505/*
 506 * Perform path verification.
 507 */
 508static void verify_start(struct ccw_device *cdev)
 509{
 510        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 511        struct ccw_request *req = &cdev->private->req;
 512        struct ccw_dev_id *devid = &cdev->private->dev_id;
 513
 514        sch->vpm = 0;
 515        sch->lpm = sch->schib.pmcw.pam;
 516
 517        /* Initialize PGID data. */
 518        memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
 519        cdev->private->pgid_valid_mask = 0;
 520        cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
 521        cdev->private->path_notoper_mask = 0;
 522
 523        /* Initialize request data. */
 524        memset(req, 0, sizeof(*req));
 525        req->timeout    = PGID_TIMEOUT;
 526        req->maxretries = PGID_RETRIES;
 527        req->lpm        = 0x80;
 528        req->singlepath = 1;
 529        if (cdev->private->flags.pgroup) {
 530                CIO_TRACE_EVENT(4, "snid");
 531                CIO_HEX_EVENT(4, devid, sizeof(*devid));
 532                req->callback   = snid_callback;
 533                snid_do(cdev);
 534        } else {
 535                CIO_TRACE_EVENT(4, "nop");
 536                CIO_HEX_EVENT(4, devid, sizeof(*devid));
 537                req->filter     = nop_filter;
 538                req->callback   = nop_callback;
 539                nop_do(cdev);
 540        }
 541}
 542
 543/**
 544 * ccw_device_verify_start - perform path verification
 545 * @cdev: ccw device
 546 *
 547 * Perform an I/O on each available channel path to @cdev to determine which
 548 * paths are operational. The resulting path mask is stored in sch->vpm.
 549 * If device options specify pathgrouping, establish a pathgroup for the
 550 * operational paths. When finished, call ccw_device_verify_done with a
 551 * return code specifying the result.
 552 */
 553void ccw_device_verify_start(struct ccw_device *cdev)
 554{
 555        CIO_TRACE_EVENT(4, "vrfy");
 556        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 557        /*
 558         * Initialize pathgroup and multipath state with target values.
 559         * They may change in the course of path verification.
 560         */
 561        cdev->private->flags.pgroup = cdev->private->options.pgroup;
 562        cdev->private->flags.mpath = cdev->private->options.mpath;
 563        cdev->private->flags.doverify = 0;
 564        cdev->private->path_noirq_mask = 0;
 565        verify_start(cdev);
 566}
 567
 568/*
 569 * Process disband SET PGID request result.
 570 */
 571static void disband_callback(struct ccw_device *cdev, void *data, int rc)
 572{
 573        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 574        struct ccw_dev_id *id = &cdev->private->dev_id;
 575
 576        if (rc)
 577                goto out;
 578        /* Ensure consistent multipathing state at device and channel. */
 579        cdev->private->flags.mpath = 0;
 580        if (sch->config.mp) {
 581                sch->config.mp = 0;
 582                rc = cio_commit_config(sch);
 583        }
 584out:
 585        CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
 586                      rc);
 587        ccw_device_disband_done(cdev, rc);
 588}
 589
 590/**
 591 * ccw_device_disband_start - disband pathgroup
 592 * @cdev: ccw device
 593 *
 594 * Execute a SET PGID channel program on @cdev to disband a previously
 595 * established pathgroup. When finished, call ccw_device_disband_done with
 596 * a return code specifying the result.
 597 */
 598void ccw_device_disband_start(struct ccw_device *cdev)
 599{
 600        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 601        struct ccw_request *req = &cdev->private->req;
 602        u8 fn;
 603
 604        CIO_TRACE_EVENT(4, "disb");
 605        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 606        /* Request setup. */
 607        memset(req, 0, sizeof(*req));
 608        req->timeout    = PGID_TIMEOUT;
 609        req->maxretries = PGID_RETRIES;
 610        req->lpm        = sch->schib.pmcw.pam & sch->opm;
 611        req->singlepath = 1;
 612        req->callback   = disband_callback;
 613        fn = SPID_FUNC_DISBAND;
 614        if (cdev->private->flags.mpath)
 615                fn |= SPID_FUNC_MULTI_PATH;
 616        spid_build_cp(cdev, fn);
 617        ccw_request_start(cdev);
 618}
 619
 620struct stlck_data {
 621        struct completion done;
 622        int rc;
 623};
 624
 625static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
 626{
 627        struct ccw_request *req = &cdev->private->req;
 628        struct ccw1 *cp = cdev->private->iccws;
 629
 630        cp[0].cmd_code = CCW_CMD_STLCK;
 631        cp[0].cda = (u32) (addr_t) buf1;
 632        cp[0].count = 32;
 633        cp[0].flags = CCW_FLAG_CC;
 634        cp[1].cmd_code = CCW_CMD_RELEASE;
 635        cp[1].cda = (u32) (addr_t) buf2;
 636        cp[1].count = 32;
 637        cp[1].flags = 0;
 638        req->cp = cp;
 639}
 640
 641static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
 642{
 643        struct stlck_data *sdata = data;
 644
 645        sdata->rc = rc;
 646        complete(&sdata->done);
 647}
 648
 649/**
 650 * ccw_device_stlck_start - perform unconditional release
 651 * @cdev: ccw device
 652 * @data: data pointer to be passed to ccw_device_stlck_done
 653 * @buf1: data pointer used in channel program
 654 * @buf2: data pointer used in channel program
 655 *
 656 * Execute a channel program on @cdev to release an existing PGID reservation.
 657 */
 658static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
 659                                   void *buf1, void *buf2)
 660{
 661        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 662        struct ccw_request *req = &cdev->private->req;
 663
 664        CIO_TRACE_EVENT(4, "stlck");
 665        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 666        /* Request setup. */
 667        memset(req, 0, sizeof(*req));
 668        req->timeout    = PGID_TIMEOUT;
 669        req->maxretries = PGID_RETRIES;
 670        req->lpm        = sch->schib.pmcw.pam & sch->opm;
 671        req->data       = data;
 672        req->callback   = stlck_callback;
 673        stlck_build_cp(cdev, buf1, buf2);
 674        ccw_request_start(cdev);
 675}
 676
 677/*
 678 * Perform unconditional reserve + release.
 679 */
 680int ccw_device_stlck(struct ccw_device *cdev)
 681{
 682        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 683        struct stlck_data data;
 684        u8 *buffer;
 685        int rc;
 686
 687        /* Check if steal lock operation is valid for this device. */
 688        if (cdev->drv) {
 689                if (!cdev->private->options.force)
 690                        return -EINVAL;
 691        }
 692        buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
 693        if (!buffer)
 694                return -ENOMEM;
 695        init_completion(&data.done);
 696        data.rc = -EIO;
 697        spin_lock_irq(sch->lock);
 698        rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
 699        if (rc)
 700                goto out_unlock;
 701        /* Perform operation. */
 702        cdev->private->state = DEV_STATE_STEAL_LOCK;
 703        ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
 704        spin_unlock_irq(sch->lock);
 705        /* Wait for operation to finish. */
 706        if (wait_for_completion_interruptible(&data.done)) {
 707                /* Got a signal. */
 708                spin_lock_irq(sch->lock);
 709                ccw_request_cancel(cdev);
 710                spin_unlock_irq(sch->lock);
 711                wait_for_completion(&data.done);
 712        }
 713        rc = data.rc;
 714        /* Check results. */
 715        spin_lock_irq(sch->lock);
 716        cio_disable_subchannel(sch);
 717        cdev->private->state = DEV_STATE_BOXED;
 718out_unlock:
 719        spin_unlock_irq(sch->lock);
 720        kfree(buffer);
 721
 722        return rc;
 723}
 724