linux/drivers/s390/cio/device_pgid.c
<<
>>
Prefs
   1/*
   2 *  CCW device PGID and path verification I/O handling.
   3 *
   4 *    Copyright IBM Corp. 2002, 2009
   5 *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
   6 *               Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 *               Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/string.h>
  12#include <linux/types.h>
  13#include <linux/errno.h>
  14#include <linux/bitops.h>
  15#include <asm/ccwdev.h>
  16#include <asm/cio.h>
  17
  18#include "cio.h"
  19#include "cio_debug.h"
  20#include "device.h"
  21#include "io_sch.h"
  22
  23#define PGID_RETRIES    256
  24#define PGID_TIMEOUT    (10 * HZ)
  25
  26static void verify_start(struct ccw_device *cdev);
  27
  28/*
  29 * Process path verification data and report result.
  30 */
  31static void verify_done(struct ccw_device *cdev, int rc)
  32{
  33        struct subchannel *sch = to_subchannel(cdev->dev.parent);
  34        struct ccw_dev_id *id = &cdev->private->dev_id;
  35        int mpath = cdev->private->flags.mpath;
  36        int pgroup = cdev->private->flags.pgroup;
  37
  38        if (rc)
  39                goto out;
  40        /* Ensure consistent multipathing state at device and channel. */
  41        if (sch->config.mp != mpath) {
  42                sch->config.mp = mpath;
  43                rc = cio_commit_config(sch);
  44        }
  45out:
  46        CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
  47                         "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
  48                         sch->vpm);
  49        ccw_device_verify_done(cdev, rc);
  50}
  51
  52/*
  53 * Create channel program to perform a NOOP.
  54 */
  55static void nop_build_cp(struct ccw_device *cdev)
  56{
  57        struct ccw_request *req = &cdev->private->req;
  58        struct ccw1 *cp = cdev->private->iccws;
  59
  60        cp->cmd_code    = CCW_CMD_NOOP;
  61        cp->cda         = 0;
  62        cp->count       = 0;
  63        cp->flags       = CCW_FLAG_SLI;
  64        req->cp         = cp;
  65}
  66
  67/*
  68 * Perform NOOP on a single path.
  69 */
  70static void nop_do(struct ccw_device *cdev)
  71{
  72        struct subchannel *sch = to_subchannel(cdev->dev.parent);
  73        struct ccw_request *req = &cdev->private->req;
  74
  75        req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
  76                              ~cdev->private->path_noirq_mask);
  77        if (!req->lpm)
  78                goto out_nopath;
  79        nop_build_cp(cdev);
  80        ccw_request_start(cdev);
  81        return;
  82
  83out_nopath:
  84        verify_done(cdev, sch->vpm ? 0 : -EACCES);
  85}
  86
  87/*
  88 * Adjust NOOP I/O status.
  89 */
  90static enum io_status nop_filter(struct ccw_device *cdev, void *data,
  91                                 struct irb *irb, enum io_status status)
  92{
  93        /* Only subchannel status might indicate a path error. */
  94        if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
  95                return IO_DONE;
  96        return status;
  97}
  98
  99/*
 100 * Process NOOP request result for a single path.
 101 */
 102static void nop_callback(struct ccw_device *cdev, void *data, int rc)
 103{
 104        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 105        struct ccw_request *req = &cdev->private->req;
 106
 107        switch (rc) {
 108        case 0:
 109                sch->vpm |= req->lpm;
 110                break;
 111        case -ETIME:
 112                cdev->private->path_noirq_mask |= req->lpm;
 113                break;
 114        case -EACCES:
 115                cdev->private->path_notoper_mask |= req->lpm;
 116                break;
 117        default:
 118                goto err;
 119        }
 120        /* Continue on the next path. */
 121        req->lpm >>= 1;
 122        nop_do(cdev);
 123        return;
 124
 125err:
 126        verify_done(cdev, rc);
 127}
 128
 129/*
 130 * Create channel program to perform SET PGID on a single path.
 131 */
 132static void spid_build_cp(struct ccw_device *cdev, u8 fn)
 133{
 134        struct ccw_request *req = &cdev->private->req;
 135        struct ccw1 *cp = cdev->private->iccws;
 136        int i = 8 - ffs(req->lpm);
 137        struct pgid *pgid = &cdev->private->pgid[i];
 138
 139        pgid->inf.fc    = fn;
 140        cp->cmd_code    = CCW_CMD_SET_PGID;
 141        cp->cda         = (u32) (addr_t) pgid;
 142        cp->count       = sizeof(*pgid);
 143        cp->flags       = CCW_FLAG_SLI;
 144        req->cp         = cp;
 145}
 146
 147static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
 148{
 149        if (rc) {
 150                /* We don't know the path groups' state. Abort. */
 151                verify_done(cdev, rc);
 152                return;
 153        }
 154        /*
 155         * Path groups have been reset. Restart path verification but
 156         * leave paths in path_noirq_mask out.
 157         */
 158        cdev->private->flags.pgid_unknown = 0;
 159        verify_start(cdev);
 160}
 161
 162/*
 163 * Reset pathgroups and restart path verification, leave unusable paths out.
 164 */
 165static void pgid_wipeout_start(struct ccw_device *cdev)
 166{
 167        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 168        struct ccw_dev_id *id = &cdev->private->dev_id;
 169        struct ccw_request *req = &cdev->private->req;
 170        u8 fn;
 171
 172        CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
 173                      id->ssid, id->devno, cdev->private->pgid_valid_mask,
 174                      cdev->private->path_noirq_mask);
 175
 176        /* Initialize request data. */
 177        memset(req, 0, sizeof(*req));
 178        req->timeout    = PGID_TIMEOUT;
 179        req->maxretries = PGID_RETRIES;
 180        req->lpm        = sch->schib.pmcw.pam;
 181        req->callback   = pgid_wipeout_callback;
 182        fn = SPID_FUNC_DISBAND;
 183        if (cdev->private->flags.mpath)
 184                fn |= SPID_FUNC_MULTI_PATH;
 185        spid_build_cp(cdev, fn);
 186        ccw_request_start(cdev);
 187}
 188
 189/*
 190 * Perform establish/resign SET PGID on a single path.
 191 */
 192static void spid_do(struct ccw_device *cdev)
 193{
 194        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 195        struct ccw_request *req = &cdev->private->req;
 196        u8 fn;
 197
 198        /* Use next available path that is not already in correct state. */
 199        req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
 200        if (!req->lpm)
 201                goto out_nopath;
 202        /* Channel program setup. */
 203        if (req->lpm & sch->opm)
 204                fn = SPID_FUNC_ESTABLISH;
 205        else
 206                fn = SPID_FUNC_RESIGN;
 207        if (cdev->private->flags.mpath)
 208                fn |= SPID_FUNC_MULTI_PATH;
 209        spid_build_cp(cdev, fn);
 210        ccw_request_start(cdev);
 211        return;
 212
 213out_nopath:
 214        if (cdev->private->flags.pgid_unknown) {
 215                /* At least one SPID could be partially done. */
 216                pgid_wipeout_start(cdev);
 217                return;
 218        }
 219        verify_done(cdev, sch->vpm ? 0 : -EACCES);
 220}
 221
 222/*
 223 * Process SET PGID request result for a single path.
 224 */
 225static void spid_callback(struct ccw_device *cdev, void *data, int rc)
 226{
 227        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 228        struct ccw_request *req = &cdev->private->req;
 229
 230        switch (rc) {
 231        case 0:
 232                sch->vpm |= req->lpm & sch->opm;
 233                break;
 234        case -ETIME:
 235                cdev->private->flags.pgid_unknown = 1;
 236                cdev->private->path_noirq_mask |= req->lpm;
 237                break;
 238        case -EACCES:
 239                cdev->private->path_notoper_mask |= req->lpm;
 240                break;
 241        case -EOPNOTSUPP:
 242                if (cdev->private->flags.mpath) {
 243                        /* Try without multipathing. */
 244                        cdev->private->flags.mpath = 0;
 245                        goto out_restart;
 246                }
 247                /* Try without pathgrouping. */
 248                cdev->private->flags.pgroup = 0;
 249                goto out_restart;
 250        default:
 251                goto err;
 252        }
 253        req->lpm >>= 1;
 254        spid_do(cdev);
 255        return;
 256
 257out_restart:
 258        verify_start(cdev);
 259        return;
 260err:
 261        verify_done(cdev, rc);
 262}
 263
 264static void spid_start(struct ccw_device *cdev)
 265{
 266        struct ccw_request *req = &cdev->private->req;
 267
 268        /* Initialize request data. */
 269        memset(req, 0, sizeof(*req));
 270        req->timeout    = PGID_TIMEOUT;
 271        req->maxretries = PGID_RETRIES;
 272        req->lpm        = 0x80;
 273        req->singlepath = 1;
 274        req->callback   = spid_callback;
 275        spid_do(cdev);
 276}
 277
 278static int pgid_is_reset(struct pgid *p)
 279{
 280        char *c;
 281
 282        for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
 283                if (*c != 0)
 284                        return 0;
 285        }
 286        return 1;
 287}
 288
 289static int pgid_cmp(struct pgid *p1, struct pgid *p2)
 290{
 291        return memcmp((char *) p1 + 1, (char *) p2 + 1,
 292                      sizeof(struct pgid) - 1);
 293}
 294
 295/*
 296 * Determine pathgroup state from PGID data.
 297 */
 298static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
 299                         int *mismatch, u8 *reserved, u8 *reset)
 300{
 301        struct pgid *pgid = &cdev->private->pgid[0];
 302        struct pgid *first = NULL;
 303        int lpm;
 304        int i;
 305
 306        *mismatch = 0;
 307        *reserved = 0;
 308        *reset = 0;
 309        for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
 310                if ((cdev->private->pgid_valid_mask & lpm) == 0)
 311                        continue;
 312                if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
 313                        *reserved |= lpm;
 314                if (pgid_is_reset(pgid)) {
 315                        *reset |= lpm;
 316                        continue;
 317                }
 318                if (!first) {
 319                        first = pgid;
 320                        continue;
 321                }
 322                if (pgid_cmp(pgid, first) != 0)
 323                        *mismatch = 1;
 324        }
 325        if (!first)
 326                first = &channel_subsystems[0]->global_pgid;
 327        *p = first;
 328}
 329
 330static u8 pgid_to_donepm(struct ccw_device *cdev)
 331{
 332        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 333        struct pgid *pgid;
 334        int i;
 335        int lpm;
 336        u8 donepm = 0;
 337
 338        /* Set bits for paths which are already in the target state. */
 339        for (i = 0; i < 8; i++) {
 340                lpm = 0x80 >> i;
 341                if ((cdev->private->pgid_valid_mask & lpm) == 0)
 342                        continue;
 343                pgid = &cdev->private->pgid[i];
 344                if (sch->opm & lpm) {
 345                        if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
 346                                continue;
 347                } else {
 348                        if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
 349                                continue;
 350                }
 351                if (cdev->private->flags.mpath) {
 352                        if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
 353                                continue;
 354                } else {
 355                        if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
 356                                continue;
 357                }
 358                donepm |= lpm;
 359        }
 360
 361        return donepm;
 362}
 363
 364static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
 365{
 366        int i;
 367
 368        for (i = 0; i < 8; i++)
 369                memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
 370}
 371
 372/*
 373 * Process SENSE PGID data and report result.
 374 */
 375static void snid_done(struct ccw_device *cdev, int rc)
 376{
 377        struct ccw_dev_id *id = &cdev->private->dev_id;
 378        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 379        struct pgid *pgid;
 380        int mismatch = 0;
 381        u8 reserved = 0;
 382        u8 reset = 0;
 383        u8 donepm;
 384
 385        if (rc)
 386                goto out;
 387        pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
 388        if (reserved == cdev->private->pgid_valid_mask)
 389                rc = -EUSERS;
 390        else if (mismatch)
 391                rc = -EOPNOTSUPP;
 392        else {
 393                donepm = pgid_to_donepm(cdev);
 394                sch->vpm = donepm & sch->opm;
 395                cdev->private->pgid_reset_mask |= reset;
 396                cdev->private->pgid_todo_mask &=
 397                        ~(donepm | cdev->private->path_noirq_mask);
 398                pgid_fill(cdev, pgid);
 399        }
 400out:
 401        CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
 402                      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
 403                      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
 404                      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
 405        switch (rc) {
 406        case 0:
 407                if (cdev->private->flags.pgid_unknown) {
 408                        pgid_wipeout_start(cdev);
 409                        return;
 410                }
 411                /* Anything left to do? */
 412                if (cdev->private->pgid_todo_mask == 0) {
 413                        verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
 414                        return;
 415                }
 416                /* Perform path-grouping. */
 417                spid_start(cdev);
 418                break;
 419        case -EOPNOTSUPP:
 420                /* Path-grouping not supported. */
 421                cdev->private->flags.pgroup = 0;
 422                cdev->private->flags.mpath = 0;
 423                verify_start(cdev);
 424                break;
 425        default:
 426                verify_done(cdev, rc);
 427        }
 428}
 429
 430/*
 431 * Create channel program to perform a SENSE PGID on a single path.
 432 */
 433static void snid_build_cp(struct ccw_device *cdev)
 434{
 435        struct ccw_request *req = &cdev->private->req;
 436        struct ccw1 *cp = cdev->private->iccws;
 437        int i = 8 - ffs(req->lpm);
 438
 439        /* Channel program setup. */
 440        cp->cmd_code    = CCW_CMD_SENSE_PGID;
 441        cp->cda         = (u32) (addr_t) &cdev->private->pgid[i];
 442        cp->count       = sizeof(struct pgid);
 443        cp->flags       = CCW_FLAG_SLI;
 444        req->cp         = cp;
 445}
 446
 447/*
 448 * Perform SENSE PGID on a single path.
 449 */
 450static void snid_do(struct ccw_device *cdev)
 451{
 452        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 453        struct ccw_request *req = &cdev->private->req;
 454        int ret;
 455
 456        req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
 457                              ~cdev->private->path_noirq_mask);
 458        if (!req->lpm)
 459                goto out_nopath;
 460        snid_build_cp(cdev);
 461        ccw_request_start(cdev);
 462        return;
 463
 464out_nopath:
 465        if (cdev->private->pgid_valid_mask)
 466                ret = 0;
 467        else if (cdev->private->path_noirq_mask)
 468                ret = -ETIME;
 469        else
 470                ret = -EACCES;
 471        snid_done(cdev, ret);
 472}
 473
 474/*
 475 * Process SENSE PGID request result for single path.
 476 */
 477static void snid_callback(struct ccw_device *cdev, void *data, int rc)
 478{
 479        struct ccw_request *req = &cdev->private->req;
 480
 481        switch (rc) {
 482        case 0:
 483                cdev->private->pgid_valid_mask |= req->lpm;
 484                break;
 485        case -ETIME:
 486                cdev->private->flags.pgid_unknown = 1;
 487                cdev->private->path_noirq_mask |= req->lpm;
 488                break;
 489        case -EACCES:
 490                cdev->private->path_notoper_mask |= req->lpm;
 491                break;
 492        default:
 493                goto err;
 494        }
 495        /* Continue on the next path. */
 496        req->lpm >>= 1;
 497        snid_do(cdev);
 498        return;
 499
 500err:
 501        snid_done(cdev, rc);
 502}
 503
 504/*
 505 * Perform path verification.
 506 */
 507static void verify_start(struct ccw_device *cdev)
 508{
 509        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 510        struct ccw_request *req = &cdev->private->req;
 511        struct ccw_dev_id *devid = &cdev->private->dev_id;
 512
 513        sch->vpm = 0;
 514        sch->lpm = sch->schib.pmcw.pam;
 515
 516        /* Initialize PGID data. */
 517        memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
 518        cdev->private->pgid_valid_mask = 0;
 519        cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
 520        cdev->private->path_notoper_mask = 0;
 521
 522        /* Initialize request data. */
 523        memset(req, 0, sizeof(*req));
 524        req->timeout    = PGID_TIMEOUT;
 525        req->maxretries = PGID_RETRIES;
 526        req->lpm        = 0x80;
 527        req->singlepath = 1;
 528        if (cdev->private->flags.pgroup) {
 529                CIO_TRACE_EVENT(4, "snid");
 530                CIO_HEX_EVENT(4, devid, sizeof(*devid));
 531                req->callback   = snid_callback;
 532                snid_do(cdev);
 533        } else {
 534                CIO_TRACE_EVENT(4, "nop");
 535                CIO_HEX_EVENT(4, devid, sizeof(*devid));
 536                req->filter     = nop_filter;
 537                req->callback   = nop_callback;
 538                nop_do(cdev);
 539        }
 540}
 541
 542/**
 543 * ccw_device_verify_start - perform path verification
 544 * @cdev: ccw device
 545 *
 546 * Perform an I/O on each available channel path to @cdev to determine which
 547 * paths are operational. The resulting path mask is stored in sch->vpm.
 548 * If device options specify pathgrouping, establish a pathgroup for the
 549 * operational paths. When finished, call ccw_device_verify_done with a
 550 * return code specifying the result.
 551 */
 552void ccw_device_verify_start(struct ccw_device *cdev)
 553{
 554        CIO_TRACE_EVENT(4, "vrfy");
 555        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 556        /*
 557         * Initialize pathgroup and multipath state with target values.
 558         * They may change in the course of path verification.
 559         */
 560        cdev->private->flags.pgroup = cdev->private->options.pgroup;
 561        cdev->private->flags.mpath = cdev->private->options.mpath;
 562        cdev->private->flags.doverify = 0;
 563        cdev->private->path_noirq_mask = 0;
 564        verify_start(cdev);
 565}
 566
 567/*
 568 * Process disband SET PGID request result.
 569 */
 570static void disband_callback(struct ccw_device *cdev, void *data, int rc)
 571{
 572        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 573        struct ccw_dev_id *id = &cdev->private->dev_id;
 574
 575        if (rc)
 576                goto out;
 577        /* Ensure consistent multipathing state at device and channel. */
 578        cdev->private->flags.mpath = 0;
 579        if (sch->config.mp) {
 580                sch->config.mp = 0;
 581                rc = cio_commit_config(sch);
 582        }
 583out:
 584        CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
 585                      rc);
 586        ccw_device_disband_done(cdev, rc);
 587}
 588
 589/**
 590 * ccw_device_disband_start - disband pathgroup
 591 * @cdev: ccw device
 592 *
 593 * Execute a SET PGID channel program on @cdev to disband a previously
 594 * established pathgroup. When finished, call ccw_device_disband_done with
 595 * a return code specifying the result.
 596 */
 597void ccw_device_disband_start(struct ccw_device *cdev)
 598{
 599        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 600        struct ccw_request *req = &cdev->private->req;
 601        u8 fn;
 602
 603        CIO_TRACE_EVENT(4, "disb");
 604        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 605        /* Request setup. */
 606        memset(req, 0, sizeof(*req));
 607        req->timeout    = PGID_TIMEOUT;
 608        req->maxretries = PGID_RETRIES;
 609        req->lpm        = sch->schib.pmcw.pam & sch->opm;
 610        req->singlepath = 1;
 611        req->callback   = disband_callback;
 612        fn = SPID_FUNC_DISBAND;
 613        if (cdev->private->flags.mpath)
 614                fn |= SPID_FUNC_MULTI_PATH;
 615        spid_build_cp(cdev, fn);
 616        ccw_request_start(cdev);
 617}
 618
 619static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
 620{
 621        struct ccw_request *req = &cdev->private->req;
 622        struct ccw1 *cp = cdev->private->iccws;
 623
 624        cp[0].cmd_code = CCW_CMD_STLCK;
 625        cp[0].cda = (u32) (addr_t) buf1;
 626        cp[0].count = 32;
 627        cp[0].flags = CCW_FLAG_CC;
 628        cp[1].cmd_code = CCW_CMD_RELEASE;
 629        cp[1].cda = (u32) (addr_t) buf2;
 630        cp[1].count = 32;
 631        cp[1].flags = 0;
 632        req->cp = cp;
 633}
 634
 635static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
 636{
 637        ccw_device_stlck_done(cdev, data, rc);
 638}
 639
 640/**
 641 * ccw_device_stlck_start - perform unconditional release
 642 * @cdev: ccw device
 643 * @data: data pointer to be passed to ccw_device_stlck_done
 644 * @buf1: data pointer used in channel program
 645 * @buf2: data pointer used in channel program
 646 *
 647 * Execute a channel program on @cdev to release an existing PGID reservation.
 648 * When finished, call ccw_device_stlck_done with a return code specifying the
 649 * result.
 650 */
 651void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
 652                            void *buf2)
 653{
 654        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 655        struct ccw_request *req = &cdev->private->req;
 656
 657        CIO_TRACE_EVENT(4, "stlck");
 658        CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
 659        /* Request setup. */
 660        memset(req, 0, sizeof(*req));
 661        req->timeout    = PGID_TIMEOUT;
 662        req->maxretries = PGID_RETRIES;
 663        req->lpm        = sch->schib.pmcw.pam & sch->opm;
 664        req->data       = data;
 665        req->callback   = stlck_callback;
 666        stlck_build_cp(cdev, buf1, buf2);
 667        ccw_request_start(cdev);
 668}
 669
 670