linux/drivers/s390/cio/ccwreq.c
<<
>>
Prefs
   1/*
   2 *  Handling of internal CCW device requests.
   3 *
   4 *    Copyright IBM Corp. 2009, 2011
   5 *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
   6 */
   7
   8#define KMSG_COMPONENT "cio"
   9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10
  11#include <linux/types.h>
  12#include <linux/err.h>
  13#include <asm/ccwdev.h>
  14#include <asm/cio.h>
  15
  16#include "io_sch.h"
  17#include "cio.h"
  18#include "device.h"
  19#include "cio_debug.h"
  20
  21/**
  22 * lpm_adjust - adjust path mask
  23 * @lpm: path mask to adjust
  24 * @mask: mask of available paths
  25 *
  26 * Shift @lpm right until @lpm and @mask have at least one bit in common or
  27 * until @lpm is zero. Return the resulting lpm.
  28 */
  29int lpm_adjust(int lpm, int mask)
  30{
  31        while (lpm && ((lpm & mask) == 0))
  32                lpm >>= 1;
  33        return lpm;
  34}
  35
  36/*
  37 * Adjust path mask to use next path and reset retry count. Return resulting
  38 * path mask.
  39 */
  40static u16 ccwreq_next_path(struct ccw_device *cdev)
  41{
  42        struct ccw_request *req = &cdev->private->req;
  43
  44        if (!req->singlepath) {
  45                req->mask = 0;
  46                goto out;
  47        }
  48        req->retries    = req->maxretries;
  49        req->mask       = lpm_adjust(req->mask >>= 1, req->lpm);
  50out:
  51        return req->mask;
  52}
  53
  54/*
  55 * Clean up device state and report to callback.
  56 */
  57static void ccwreq_stop(struct ccw_device *cdev, int rc)
  58{
  59        struct ccw_request *req = &cdev->private->req;
  60
  61        if (req->done)
  62                return;
  63        req->done = 1;
  64        ccw_device_set_timeout(cdev, 0);
  65        memset(&cdev->private->irb, 0, sizeof(struct irb));
  66        if (rc && rc != -ENODEV && req->drc)
  67                rc = req->drc;
  68        req->callback(cdev, req->data, rc);
  69}
  70
  71/*
  72 * (Re-)Start the operation until retries and paths are exhausted.
  73 */
  74static void ccwreq_do(struct ccw_device *cdev)
  75{
  76        struct ccw_request *req = &cdev->private->req;
  77        struct subchannel *sch = to_subchannel(cdev->dev.parent);
  78        struct ccw1 *cp = req->cp;
  79        int rc = -EACCES;
  80
  81        while (req->mask) {
  82                if (req->retries-- == 0) {
  83                        /* Retries exhausted, try next path. */
  84                        ccwreq_next_path(cdev);
  85                        continue;
  86                }
  87                /* Perform start function. */
  88                memset(&cdev->private->irb, 0, sizeof(struct irb));
  89                rc = cio_start(sch, cp, (u8) req->mask);
  90                if (rc == 0) {
  91                        /* I/O started successfully. */
  92                        ccw_device_set_timeout(cdev, req->timeout);
  93                        return;
  94                }
  95                if (rc == -ENODEV) {
  96                        /* Permanent device error. */
  97                        break;
  98                }
  99                if (rc == -EACCES) {
 100                        /* Permant path error. */
 101                        ccwreq_next_path(cdev);
 102                        continue;
 103                }
 104                /* Temporary improper status. */
 105                rc = cio_clear(sch);
 106                if (rc)
 107                        break;
 108                return;
 109        }
 110        ccwreq_stop(cdev, rc);
 111}
 112
 113/**
 114 * ccw_request_start - perform I/O request
 115 * @cdev: ccw device
 116 *
 117 * Perform the I/O request specified by cdev->req.
 118 */
 119void ccw_request_start(struct ccw_device *cdev)
 120{
 121        struct ccw_request *req = &cdev->private->req;
 122
 123        if (req->singlepath) {
 124                /* Try all paths twice to counter link flapping. */
 125                req->mask = 0x8080;
 126        } else
 127                req->mask = req->lpm;
 128
 129        req->retries    = req->maxretries;
 130        req->mask       = lpm_adjust(req->mask, req->lpm);
 131        req->drc        = 0;
 132        req->done       = 0;
 133        req->cancel     = 0;
 134        if (!req->mask)
 135                goto out_nopath;
 136        ccwreq_do(cdev);
 137        return;
 138
 139out_nopath:
 140        ccwreq_stop(cdev, -EACCES);
 141}
 142
 143/**
 144 * ccw_request_cancel - cancel running I/O request
 145 * @cdev: ccw device
 146 *
 147 * Cancel the I/O request specified by cdev->req. Return non-zero if request
 148 * has already finished, zero otherwise.
 149 */
 150int ccw_request_cancel(struct ccw_device *cdev)
 151{
 152        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 153        struct ccw_request *req = &cdev->private->req;
 154        int rc;
 155
 156        if (req->done)
 157                return 1;
 158        req->cancel = 1;
 159        rc = cio_clear(sch);
 160        if (rc)
 161                ccwreq_stop(cdev, rc);
 162        return 0;
 163}
 164
 165/*
 166 * Return the status of the internal I/O started on the specified ccw device.
 167 * Perform BASIC SENSE if required.
 168 */
 169static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
 170{
 171        struct irb *irb = &cdev->private->irb;
 172        struct cmd_scsw *scsw = &irb->scsw.cmd;
 173        enum uc_todo todo;
 174
 175        /* Perform BASIC SENSE if needed. */
 176        if (ccw_device_accumulate_and_sense(cdev, lcirb))
 177                return IO_RUNNING;
 178        /* Check for halt/clear interrupt. */
 179        if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
 180                return IO_KILLED;
 181        /* Check for path error. */
 182        if (scsw->cc == 3 || scsw->pno)
 183                return IO_PATH_ERROR;
 184        /* Handle BASIC SENSE data. */
 185        if (irb->esw.esw0.erw.cons) {
 186                CIO_TRACE_EVENT(2, "sensedata");
 187                CIO_HEX_EVENT(2, &cdev->private->dev_id,
 188                              sizeof(struct ccw_dev_id));
 189                CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
 190                /* Check for command reject. */
 191                if (irb->ecw[0] & SNS0_CMD_REJECT)
 192                        return IO_REJECTED;
 193                /* Ask the driver what to do */
 194                if (cdev->drv && cdev->drv->uc_handler) {
 195                        todo = cdev->drv->uc_handler(cdev, lcirb);
 196                        CIO_TRACE_EVENT(2, "uc_response");
 197                        CIO_HEX_EVENT(2, &todo, sizeof(todo));
 198                        switch (todo) {
 199                        case UC_TODO_RETRY:
 200                                return IO_STATUS_ERROR;
 201                        case UC_TODO_RETRY_ON_NEW_PATH:
 202                                return IO_PATH_ERROR;
 203                        case UC_TODO_STOP:
 204                                return IO_REJECTED;
 205                        default:
 206                                return IO_STATUS_ERROR;
 207                        }
 208                }
 209                /* Assume that unexpected SENSE data implies an error. */
 210                return IO_STATUS_ERROR;
 211        }
 212        /* Check for channel errors. */
 213        if (scsw->cstat != 0)
 214                return IO_STATUS_ERROR;
 215        /* Check for device errors. */
 216        if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
 217                return IO_STATUS_ERROR;
 218        /* Check for final state. */
 219        if (!(scsw->dstat & DEV_STAT_DEV_END))
 220                return IO_RUNNING;
 221        /* Check for other improper status. */
 222        if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
 223                return IO_STATUS_ERROR;
 224        return IO_DONE;
 225}
 226
 227/*
 228 * Log ccw request status.
 229 */
 230static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
 231{
 232        struct ccw_request *req = &cdev->private->req;
 233        struct {
 234                struct ccw_dev_id dev_id;
 235                u16 retries;
 236                u8 lpm;
 237                u8 status;
 238        }  __attribute__ ((packed)) data;
 239        data.dev_id     = cdev->private->dev_id;
 240        data.retries    = req->retries;
 241        data.lpm        = (u8) req->mask;
 242        data.status     = (u8) status;
 243        CIO_TRACE_EVENT(2, "reqstat");
 244        CIO_HEX_EVENT(2, &data, sizeof(data));
 245}
 246
 247/**
 248 * ccw_request_handler - interrupt handler for I/O request procedure.
 249 * @cdev: ccw device
 250 *
 251 * Handle interrupt during I/O request procedure.
 252 */
 253void ccw_request_handler(struct ccw_device *cdev)
 254{
 255        struct irb *irb = (struct irb *)&S390_lowcore.irb;
 256        struct ccw_request *req = &cdev->private->req;
 257        enum io_status status;
 258        int rc = -EOPNOTSUPP;
 259
 260        /* Check status of I/O request. */
 261        status = ccwreq_status(cdev, irb);
 262        if (req->filter)
 263                status = req->filter(cdev, req->data, irb, status);
 264        if (status != IO_RUNNING)
 265                ccw_device_set_timeout(cdev, 0);
 266        if (status != IO_DONE && status != IO_RUNNING)
 267                ccwreq_log_status(cdev, status);
 268        switch (status) {
 269        case IO_DONE:
 270                break;
 271        case IO_RUNNING:
 272                return;
 273        case IO_REJECTED:
 274                goto err;
 275        case IO_PATH_ERROR:
 276                goto out_next_path;
 277        case IO_STATUS_ERROR:
 278                goto out_restart;
 279        case IO_KILLED:
 280                /* Check if request was cancelled on purpose. */
 281                if (req->cancel) {
 282                        rc = -EIO;
 283                        goto err;
 284                }
 285                goto out_restart;
 286        }
 287        /* Check back with request initiator. */
 288        if (!req->check)
 289                goto out;
 290        switch (req->check(cdev, req->data)) {
 291        case 0:
 292                break;
 293        case -EAGAIN:
 294                goto out_restart;
 295        case -EACCES:
 296                goto out_next_path;
 297        default:
 298                goto err;
 299        }
 300out:
 301        ccwreq_stop(cdev, 0);
 302        return;
 303
 304out_next_path:
 305        /* Try next path and restart I/O. */
 306        if (!ccwreq_next_path(cdev)) {
 307                rc = -EACCES;
 308                goto err;
 309        }
 310out_restart:
 311        /* Restart. */
 312        ccwreq_do(cdev);
 313        return;
 314err:
 315        ccwreq_stop(cdev, rc);
 316}
 317
 318
 319/**
 320 * ccw_request_timeout - timeout handler for I/O request procedure
 321 * @cdev: ccw device
 322 *
 323 * Handle timeout during I/O request procedure.
 324 */
 325void ccw_request_timeout(struct ccw_device *cdev)
 326{
 327        struct subchannel *sch = to_subchannel(cdev->dev.parent);
 328        struct ccw_request *req = &cdev->private->req;
 329        int rc = -ENODEV, chp;
 330
 331        if (cio_update_schib(sch))
 332                goto err;
 333
 334        for (chp = 0; chp < 8; chp++) {
 335                if ((0x80 >> chp) & sch->schib.pmcw.lpum)
 336                        pr_warning("%s: No interrupt was received within %lus "
 337                                   "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
 338                                   dev_name(&cdev->dev), req->timeout / HZ,
 339                                   scsw_cstat(&sch->schib.scsw),
 340                                   scsw_dstat(&sch->schib.scsw),
 341                                   sch->schid.cssid,
 342                                   sch->schib.pmcw.chpid[chp]);
 343        }
 344
 345        if (!ccwreq_next_path(cdev)) {
 346                /* set the final return code for this request */
 347                req->drc = -ETIME;
 348        }
 349        rc = cio_clear(sch);
 350        if (rc)
 351                goto err;
 352        return;
 353
 354err:
 355        ccwreq_stop(cdev, rc);
 356}
 357
 358/**
 359 * ccw_request_notoper - notoper handler for I/O request procedure
 360 * @cdev: ccw device
 361 *
 362 * Handle notoper during I/O request procedure.
 363 */
 364void ccw_request_notoper(struct ccw_device *cdev)
 365{
 366        ccwreq_stop(cdev, -ENODEV);
 367}
 368