linux/drivers/scsi/scsi_transport_srp.c
<<
>>
Prefs
   1/*
   2 * SCSI RDMA (SRP) transport class
   3 *
   4 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation, version 2 of the
   9 * License.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  19 * 02110-1301 USA
  20 */
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/jiffies.h>
  24#include <linux/err.h>
  25#include <linux/slab.h>
  26#include <linux/string.h>
  27#include <linux/delay.h>
  28
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_cmnd.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_host.h>
  33#include <scsi/scsi_transport.h>
  34#include <scsi/scsi_transport_srp.h>
  35#include "scsi_priv.h"
  36
  37struct srp_host_attrs {
  38        atomic_t next_port_id;
  39};
  40#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
  41
  42#define SRP_HOST_ATTRS 0
  43#define SRP_RPORT_ATTRS 8
  44
  45struct srp_internal {
  46        struct scsi_transport_template t;
  47        struct srp_function_template *f;
  48
  49        struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
  50
  51        struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
  52        struct transport_container rport_attr_cont;
  53};
  54
  55#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
  56
  57#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
  58#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
  59static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
  60{
  61        return dev_to_shost(r->dev.parent);
  62}
  63
  64static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  65{
  66        return transport_class_to_srp_rport(&shost->shost_gendev);
  67}
  68
  69/**
  70 * srp_tmo_valid() - check timeout combination validity
  71 * @reconnect_delay: Reconnect delay in seconds.
  72 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
  73 * @dev_loss_tmo: Device loss timeout in seconds.
  74 *
  75 * The combination of the timeout parameters must be such that SCSI commands
  76 * are finished in a reasonable time. Hence do not allow the fast I/O fail
  77 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
  78 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
  79 * parameters must be such that multipath can detect failed paths timely.
  80 * Hence do not allow all three parameters to be disabled simultaneously.
  81 */
  82int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
  83{
  84        if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
  85                return -EINVAL;
  86        if (reconnect_delay == 0)
  87                return -EINVAL;
  88        if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  89                return -EINVAL;
  90        if (fast_io_fail_tmo < 0 &&
  91            dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  92                return -EINVAL;
  93        if (dev_loss_tmo >= LONG_MAX / HZ)
  94                return -EINVAL;
  95        if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
  96            fast_io_fail_tmo >= dev_loss_tmo)
  97                return -EINVAL;
  98        return 0;
  99}
 100EXPORT_SYMBOL_GPL(srp_tmo_valid);
 101
 102static int srp_host_setup(struct transport_container *tc, struct device *dev,
 103                          struct device *cdev)
 104{
 105        struct Scsi_Host *shost = dev_to_shost(dev);
 106        struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
 107
 108        atomic_set(&srp_host->next_port_id, 0);
 109        return 0;
 110}
 111
 112static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
 113                               NULL, NULL);
 114
 115static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
 116                               NULL, NULL, NULL);
 117
 118#define SRP_PID(p) \
 119        (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
 120        (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
 121        (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
 122        (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
 123
 124#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
 125        "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
 126
 127static ssize_t
 128show_srp_rport_id(struct device *dev, struct device_attribute *attr,
 129                  char *buf)
 130{
 131        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 132        return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
 133}
 134
 135static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
 136
 137static const struct {
 138        u32 value;
 139        char *name;
 140} srp_rport_role_names[] = {
 141        {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
 142        {SRP_RPORT_ROLE_TARGET, "SRP Target"},
 143};
 144
 145static ssize_t
 146show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
 147                     char *buf)
 148{
 149        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 150        int i;
 151        char *name = NULL;
 152
 153        for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
 154                if (srp_rport_role_names[i].value == rport->roles) {
 155                        name = srp_rport_role_names[i].name;
 156                        break;
 157                }
 158        return sprintf(buf, "%s\n", name ? : "unknown");
 159}
 160
 161static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
 162
 163static ssize_t store_srp_rport_delete(struct device *dev,
 164                                      struct device_attribute *attr,
 165                                      const char *buf, size_t count)
 166{
 167        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 168        struct Scsi_Host *shost = dev_to_shost(dev);
 169        struct srp_internal *i = to_srp_internal(shost->transportt);
 170
 171        if (i->f->rport_delete) {
 172                i->f->rport_delete(rport);
 173                return count;
 174        } else {
 175                return -ENOSYS;
 176        }
 177}
 178
 179static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
 180
 181static ssize_t show_srp_rport_state(struct device *dev,
 182                                    struct device_attribute *attr,
 183                                    char *buf)
 184{
 185        static const char *const state_name[] = {
 186                [SRP_RPORT_RUNNING]     = "running",
 187                [SRP_RPORT_BLOCKED]     = "blocked",
 188                [SRP_RPORT_FAIL_FAST]   = "fail-fast",
 189                [SRP_RPORT_LOST]        = "lost",
 190        };
 191        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 192        enum srp_rport_state state = rport->state;
 193
 194        return sprintf(buf, "%s\n",
 195                       (unsigned)state < ARRAY_SIZE(state_name) ?
 196                       state_name[state] : "???");
 197}
 198
 199static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
 200
 201static ssize_t srp_show_tmo(char *buf, int tmo)
 202{
 203        return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
 204}
 205
 206int srp_parse_tmo(int *tmo, const char *buf)
 207{
 208        int res = 0;
 209
 210        if (strncmp(buf, "off", 3) != 0)
 211                res = kstrtoint(buf, 0, tmo);
 212        else
 213                *tmo = -1;
 214
 215        return res;
 216}
 217EXPORT_SYMBOL(srp_parse_tmo);
 218
 219static ssize_t show_reconnect_delay(struct device *dev,
 220                                    struct device_attribute *attr, char *buf)
 221{
 222        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 223
 224        return srp_show_tmo(buf, rport->reconnect_delay);
 225}
 226
 227static ssize_t store_reconnect_delay(struct device *dev,
 228                                     struct device_attribute *attr,
 229                                     const char *buf, const size_t count)
 230{
 231        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 232        int res, delay;
 233
 234        res = srp_parse_tmo(&delay, buf);
 235        if (res)
 236                goto out;
 237        res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
 238                            rport->dev_loss_tmo);
 239        if (res)
 240                goto out;
 241
 242        if (rport->reconnect_delay <= 0 && delay > 0 &&
 243            rport->state != SRP_RPORT_RUNNING) {
 244                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 245                                   delay * HZ);
 246        } else if (delay <= 0) {
 247                cancel_delayed_work(&rport->reconnect_work);
 248        }
 249        rport->reconnect_delay = delay;
 250        res = count;
 251
 252out:
 253        return res;
 254}
 255
 256static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
 257                   store_reconnect_delay);
 258
 259static ssize_t show_failed_reconnects(struct device *dev,
 260                                      struct device_attribute *attr, char *buf)
 261{
 262        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 263
 264        return sprintf(buf, "%d\n", rport->failed_reconnects);
 265}
 266
 267static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
 268
 269static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
 270                                               struct device_attribute *attr,
 271                                               char *buf)
 272{
 273        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 274
 275        return srp_show_tmo(buf, rport->fast_io_fail_tmo);
 276}
 277
 278static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
 279                                                struct device_attribute *attr,
 280                                                const char *buf, size_t count)
 281{
 282        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 283        int res;
 284        int fast_io_fail_tmo;
 285
 286        res = srp_parse_tmo(&fast_io_fail_tmo, buf);
 287        if (res)
 288                goto out;
 289        res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
 290                            rport->dev_loss_tmo);
 291        if (res)
 292                goto out;
 293        rport->fast_io_fail_tmo = fast_io_fail_tmo;
 294        res = count;
 295
 296out:
 297        return res;
 298}
 299
 300static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
 301                   show_srp_rport_fast_io_fail_tmo,
 302                   store_srp_rport_fast_io_fail_tmo);
 303
 304static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
 305                                           struct device_attribute *attr,
 306                                           char *buf)
 307{
 308        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 309
 310        return srp_show_tmo(buf, rport->dev_loss_tmo);
 311}
 312
 313static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
 314                                            struct device_attribute *attr,
 315                                            const char *buf, size_t count)
 316{
 317        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 318        int res;
 319        int dev_loss_tmo;
 320
 321        res = srp_parse_tmo(&dev_loss_tmo, buf);
 322        if (res)
 323                goto out;
 324        res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
 325                            dev_loss_tmo);
 326        if (res)
 327                goto out;
 328        rport->dev_loss_tmo = dev_loss_tmo;
 329        res = count;
 330
 331out:
 332        return res;
 333}
 334
 335static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
 336                   show_srp_rport_dev_loss_tmo,
 337                   store_srp_rport_dev_loss_tmo);
 338
 339static int srp_rport_set_state(struct srp_rport *rport,
 340                               enum srp_rport_state new_state)
 341{
 342        enum srp_rport_state old_state = rport->state;
 343
 344        lockdep_assert_held(&rport->mutex);
 345
 346        switch (new_state) {
 347        case SRP_RPORT_RUNNING:
 348                switch (old_state) {
 349                case SRP_RPORT_LOST:
 350                        goto invalid;
 351                default:
 352                        break;
 353                }
 354                break;
 355        case SRP_RPORT_BLOCKED:
 356                switch (old_state) {
 357                case SRP_RPORT_RUNNING:
 358                        break;
 359                default:
 360                        goto invalid;
 361                }
 362                break;
 363        case SRP_RPORT_FAIL_FAST:
 364                switch (old_state) {
 365                case SRP_RPORT_LOST:
 366                        goto invalid;
 367                default:
 368                        break;
 369                }
 370                break;
 371        case SRP_RPORT_LOST:
 372                break;
 373        }
 374        rport->state = new_state;
 375        return 0;
 376
 377invalid:
 378        return -EINVAL;
 379}
 380
 381/**
 382 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
 383 * @work: Work structure used for scheduling this operation.
 384 */
 385static void srp_reconnect_work(struct work_struct *work)
 386{
 387        struct srp_rport *rport = container_of(to_delayed_work(work),
 388                                        struct srp_rport, reconnect_work);
 389        struct Scsi_Host *shost = rport_to_shost(rport);
 390        int delay, res;
 391
 392        res = srp_reconnect_rport(rport);
 393        if (res != 0) {
 394                shost_printk(KERN_ERR, shost,
 395                             "reconnect attempt %d failed (%d)\n",
 396                             ++rport->failed_reconnects, res);
 397                delay = rport->reconnect_delay *
 398                        min(100, max(1, rport->failed_reconnects - 10));
 399                if (delay > 0)
 400                        queue_delayed_work(system_long_wq,
 401                                           &rport->reconnect_work, delay * HZ);
 402        }
 403}
 404
 405/**
 406 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
 407 * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
 408 *
 409 * To do: add support for scsi-mq in this function.
 410 */
 411static int scsi_request_fn_active(struct Scsi_Host *shost)
 412{
 413        struct scsi_device *sdev;
 414        struct request_queue *q;
 415        int request_fn_active = 0;
 416
 417        shost_for_each_device(sdev, shost) {
 418                q = sdev->request_queue;
 419
 420                spin_lock_irq(q->queue_lock);
 421                request_fn_active += q->request_fn_active;
 422                spin_unlock_irq(q->queue_lock);
 423        }
 424
 425        return request_fn_active;
 426}
 427
 428/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
 429static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
 430{
 431        while (scsi_request_fn_active(shost))
 432                msleep(20);
 433}
 434
 435static void __rport_fail_io_fast(struct srp_rport *rport)
 436{
 437        struct Scsi_Host *shost = rport_to_shost(rport);
 438        struct srp_internal *i;
 439
 440        lockdep_assert_held(&rport->mutex);
 441
 442        if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
 443                return;
 444        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 445
 446        /* Involve the LLD if possible to terminate all I/O on the rport. */
 447        i = to_srp_internal(shost->transportt);
 448        if (i->f->terminate_rport_io) {
 449                srp_wait_for_queuecommand(shost);
 450                i->f->terminate_rport_io(rport);
 451        }
 452}
 453
 454/**
 455 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
 456 * @work: Work structure used for scheduling this operation.
 457 */
 458static void rport_fast_io_fail_timedout(struct work_struct *work)
 459{
 460        struct srp_rport *rport = container_of(to_delayed_work(work),
 461                                        struct srp_rport, fast_io_fail_work);
 462        struct Scsi_Host *shost = rport_to_shost(rport);
 463
 464        pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
 465                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 466
 467        mutex_lock(&rport->mutex);
 468        if (rport->state == SRP_RPORT_BLOCKED)
 469                __rport_fail_io_fast(rport);
 470        mutex_unlock(&rport->mutex);
 471}
 472
 473/**
 474 * rport_dev_loss_timedout() - device loss timeout handler
 475 * @work: Work structure used for scheduling this operation.
 476 */
 477static void rport_dev_loss_timedout(struct work_struct *work)
 478{
 479        struct srp_rport *rport = container_of(to_delayed_work(work),
 480                                        struct srp_rport, dev_loss_work);
 481        struct Scsi_Host *shost = rport_to_shost(rport);
 482        struct srp_internal *i = to_srp_internal(shost->transportt);
 483
 484        pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
 485                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 486
 487        mutex_lock(&rport->mutex);
 488        WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
 489        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 490        mutex_unlock(&rport->mutex);
 491
 492        i->f->rport_delete(rport);
 493}
 494
 495static void __srp_start_tl_fail_timers(struct srp_rport *rport)
 496{
 497        struct Scsi_Host *shost = rport_to_shost(rport);
 498        int delay, fast_io_fail_tmo, dev_loss_tmo;
 499
 500        lockdep_assert_held(&rport->mutex);
 501
 502        delay = rport->reconnect_delay;
 503        fast_io_fail_tmo = rport->fast_io_fail_tmo;
 504        dev_loss_tmo = rport->dev_loss_tmo;
 505        pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
 506                 rport->state);
 507
 508        if (rport->state == SRP_RPORT_LOST)
 509                return;
 510        if (delay > 0)
 511                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 512                                   1UL * delay * HZ);
 513        if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
 514            srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
 515                pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
 516                         rport->state);
 517                scsi_target_block(&shost->shost_gendev);
 518                if (fast_io_fail_tmo >= 0)
 519                        queue_delayed_work(system_long_wq,
 520                                           &rport->fast_io_fail_work,
 521                                           1UL * fast_io_fail_tmo * HZ);
 522                if (dev_loss_tmo >= 0)
 523                        queue_delayed_work(system_long_wq,
 524                                           &rport->dev_loss_work,
 525                                           1UL * dev_loss_tmo * HZ);
 526        }
 527}
 528
 529/**
 530 * srp_start_tl_fail_timers() - start the transport layer failure timers
 531 * @rport: SRP target port.
 532 *
 533 * Start the transport layer fast I/O failure and device loss timers. Do not
 534 * modify a timer that was already started.
 535 */
 536void srp_start_tl_fail_timers(struct srp_rport *rport)
 537{
 538        mutex_lock(&rport->mutex);
 539        __srp_start_tl_fail_timers(rport);
 540        mutex_unlock(&rport->mutex);
 541}
 542EXPORT_SYMBOL(srp_start_tl_fail_timers);
 543
 544/**
 545 * srp_reconnect_rport() - reconnect to an SRP target port
 546 * @rport: SRP target port.
 547 *
 548 * Blocks SCSI command queueing before invoking reconnect() such that
 549 * queuecommand() won't be invoked concurrently with reconnect() from outside
 550 * the SCSI EH. This is important since a reconnect() implementation may
 551 * reallocate resources needed by queuecommand().
 552 *
 553 * Notes:
 554 * - This function neither waits until outstanding requests have finished nor
 555 *   tries to abort these. It is the responsibility of the reconnect()
 556 *   function to finish outstanding commands before reconnecting to the target
 557 *   port.
 558 * - It is the responsibility of the caller to ensure that the resources
 559 *   reallocated by the reconnect() function won't be used while this function
 560 *   is in progress. One possible strategy is to invoke this function from
 561 *   the context of the SCSI EH thread only. Another possible strategy is to
 562 *   lock the rport mutex inside each SCSI LLD callback that can be invoked by
 563 *   the SCSI EH (the scsi_host_template.eh_*() functions and also the
 564 *   scsi_host_template.queuecommand() function).
 565 */
 566int srp_reconnect_rport(struct srp_rport *rport)
 567{
 568        struct Scsi_Host *shost = rport_to_shost(rport);
 569        struct srp_internal *i = to_srp_internal(shost->transportt);
 570        struct scsi_device *sdev;
 571        int res;
 572
 573        pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
 574
 575        res = mutex_lock_interruptible(&rport->mutex);
 576        if (res)
 577                goto out;
 578        scsi_target_block(&shost->shost_gendev);
 579        srp_wait_for_queuecommand(shost);
 580        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
 581        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
 582                 dev_name(&shost->shost_gendev), rport->state, res);
 583        if (res == 0) {
 584                cancel_delayed_work(&rport->fast_io_fail_work);
 585                cancel_delayed_work(&rport->dev_loss_work);
 586
 587                rport->failed_reconnects = 0;
 588                srp_rport_set_state(rport, SRP_RPORT_RUNNING);
 589                scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
 590                /*
 591                 * If the SCSI error handler has offlined one or more devices,
 592                 * invoking scsi_target_unblock() won't change the state of
 593                 * these devices into running so do that explicitly.
 594                 */
 595                spin_lock_irq(shost->host_lock);
 596                __shost_for_each_device(sdev, shost)
 597                        if (sdev->sdev_state == SDEV_OFFLINE)
 598                                sdev->sdev_state = SDEV_RUNNING;
 599                spin_unlock_irq(shost->host_lock);
 600        } else if (rport->state == SRP_RPORT_RUNNING) {
 601                /*
 602                 * srp_reconnect_rport() has been invoked with fast_io_fail
 603                 * and dev_loss off. Mark the port as failed and start the TL
 604                 * failure timers if these had not yet been started.
 605                 */
 606                __rport_fail_io_fast(rport);
 607                scsi_target_unblock(&shost->shost_gendev,
 608                                    SDEV_TRANSPORT_OFFLINE);
 609                __srp_start_tl_fail_timers(rport);
 610        } else if (rport->state != SRP_RPORT_BLOCKED) {
 611                scsi_target_unblock(&shost->shost_gendev,
 612                                    SDEV_TRANSPORT_OFFLINE);
 613        }
 614        mutex_unlock(&rport->mutex);
 615
 616out:
 617        return res;
 618}
 619EXPORT_SYMBOL(srp_reconnect_rport);
 620
 621/**
 622 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
 623 * @scmd: SCSI command.
 624 *
 625 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
 626 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
 627 * handle the timeout (BLK_EH_NOT_HANDLED).
 628 *
 629 * Note: This function is called from soft-IRQ context and with the request
 630 * queue lock held.
 631 */
 632static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
 633{
 634        struct scsi_device *sdev = scmd->device;
 635        struct Scsi_Host *shost = sdev->host;
 636        struct srp_internal *i = to_srp_internal(shost->transportt);
 637        struct srp_rport *rport = shost_to_rport(shost);
 638
 639        pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
 640        return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
 641                i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
 642                BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 643}
 644
 645static void srp_rport_release(struct device *dev)
 646{
 647        struct srp_rport *rport = dev_to_rport(dev);
 648
 649        put_device(dev->parent);
 650        kfree(rport);
 651}
 652
 653static int scsi_is_srp_rport(const struct device *dev)
 654{
 655        return dev->release == srp_rport_release;
 656}
 657
 658static int srp_rport_match(struct attribute_container *cont,
 659                           struct device *dev)
 660{
 661        struct Scsi_Host *shost;
 662        struct srp_internal *i;
 663
 664        if (!scsi_is_srp_rport(dev))
 665                return 0;
 666
 667        shost = dev_to_shost(dev->parent);
 668        if (!shost->transportt)
 669                return 0;
 670        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 671                return 0;
 672
 673        i = to_srp_internal(shost->transportt);
 674        return &i->rport_attr_cont.ac == cont;
 675}
 676
 677static int srp_host_match(struct attribute_container *cont, struct device *dev)
 678{
 679        struct Scsi_Host *shost;
 680        struct srp_internal *i;
 681
 682        if (!scsi_is_host_device(dev))
 683                return 0;
 684
 685        shost = dev_to_shost(dev);
 686        if (!shost->transportt)
 687                return 0;
 688        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 689                return 0;
 690
 691        i = to_srp_internal(shost->transportt);
 692        return &i->t.host_attrs.ac == cont;
 693}
 694
 695/**
 696 * srp_rport_get() - increment rport reference count
 697 * @rport: SRP target port.
 698 */
 699void srp_rport_get(struct srp_rport *rport)
 700{
 701        get_device(&rport->dev);
 702}
 703EXPORT_SYMBOL(srp_rport_get);
 704
 705/**
 706 * srp_rport_put() - decrement rport reference count
 707 * @rport: SRP target port.
 708 */
 709void srp_rport_put(struct srp_rport *rport)
 710{
 711        put_device(&rport->dev);
 712}
 713EXPORT_SYMBOL(srp_rport_put);
 714
 715/**
 716 * srp_rport_add - add a SRP remote port to the device hierarchy
 717 * @shost:      scsi host the remote port is connected to.
 718 * @ids:        The port id for the remote port.
 719 *
 720 * Publishes a port to the rest of the system.
 721 */
 722struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
 723                                struct srp_rport_identifiers *ids)
 724{
 725        struct srp_rport *rport;
 726        struct device *parent = &shost->shost_gendev;
 727        struct srp_internal *i = to_srp_internal(shost->transportt);
 728        int id, ret;
 729
 730        rport = kzalloc(sizeof(*rport), GFP_KERNEL);
 731        if (!rport)
 732                return ERR_PTR(-ENOMEM);
 733
 734        mutex_init(&rport->mutex);
 735
 736        device_initialize(&rport->dev);
 737
 738        rport->dev.parent = get_device(parent);
 739        rport->dev.release = srp_rport_release;
 740
 741        memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
 742        rport->roles = ids->roles;
 743
 744        if (i->f->reconnect)
 745                rport->reconnect_delay = i->f->reconnect_delay ?
 746                        *i->f->reconnect_delay : 10;
 747        INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
 748        rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
 749                *i->f->fast_io_fail_tmo : 15;
 750        rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
 751        INIT_DELAYED_WORK(&rport->fast_io_fail_work,
 752                          rport_fast_io_fail_timedout);
 753        INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
 754
 755        id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
 756        dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 757
 758        transport_setup_device(&rport->dev);
 759
 760        ret = device_add(&rport->dev);
 761        if (ret) {
 762                transport_destroy_device(&rport->dev);
 763                put_device(&rport->dev);
 764                return ERR_PTR(ret);
 765        }
 766
 767        transport_add_device(&rport->dev);
 768        transport_configure_device(&rport->dev);
 769
 770        return rport;
 771}
 772EXPORT_SYMBOL_GPL(srp_rport_add);
 773
 774/**
 775 * srp_rport_del  -  remove a SRP remote port
 776 * @rport:      SRP remote port to remove
 777 *
 778 * Removes the specified SRP remote port.
 779 */
 780void srp_rport_del(struct srp_rport *rport)
 781{
 782        struct device *dev = &rport->dev;
 783
 784        transport_remove_device(dev);
 785        device_del(dev);
 786        transport_destroy_device(dev);
 787
 788        put_device(dev);
 789}
 790EXPORT_SYMBOL_GPL(srp_rport_del);
 791
 792static int do_srp_rport_del(struct device *dev, void *data)
 793{
 794        if (scsi_is_srp_rport(dev))
 795                srp_rport_del(dev_to_rport(dev));
 796        return 0;
 797}
 798
 799/**
 800 * srp_remove_host  -  tear down a Scsi_Host's SRP data structures
 801 * @shost:      Scsi Host that is torn down
 802 *
 803 * Removes all SRP remote ports for a given Scsi_Host.
 804 * Must be called just before scsi_remove_host for SRP HBAs.
 805 */
 806void srp_remove_host(struct Scsi_Host *shost)
 807{
 808        device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
 809}
 810EXPORT_SYMBOL_GPL(srp_remove_host);
 811
 812/**
 813 * srp_stop_rport_timers - stop the transport layer recovery timers
 814 * @rport: SRP remote port for which to stop the timers.
 815 *
 816 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
 817 * must hold a reference on the rport (rport->dev) and on the SCSI host
 818 * (rport->dev.parent).
 819 */
 820void srp_stop_rport_timers(struct srp_rport *rport)
 821{
 822        mutex_lock(&rport->mutex);
 823        if (rport->state == SRP_RPORT_BLOCKED)
 824                __rport_fail_io_fast(rport);
 825        srp_rport_set_state(rport, SRP_RPORT_LOST);
 826        mutex_unlock(&rport->mutex);
 827
 828        cancel_delayed_work_sync(&rport->reconnect_work);
 829        cancel_delayed_work_sync(&rport->fast_io_fail_work);
 830        cancel_delayed_work_sync(&rport->dev_loss_work);
 831}
 832EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
 833
 834static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
 835                                 int result)
 836{
 837        struct srp_internal *i = to_srp_internal(shost->transportt);
 838        return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
 839}
 840
 841static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
 842{
 843        struct srp_internal *i = to_srp_internal(shost->transportt);
 844        return i->f->it_nexus_response(shost, nexus, result);
 845}
 846
 847/**
 848 * srp_attach_transport  -  instantiate SRP transport template
 849 * @ft:         SRP transport class function template
 850 */
 851struct scsi_transport_template *
 852srp_attach_transport(struct srp_function_template *ft)
 853{
 854        int count;
 855        struct srp_internal *i;
 856
 857        i = kzalloc(sizeof(*i), GFP_KERNEL);
 858        if (!i)
 859                return NULL;
 860
 861        i->t.eh_timed_out = srp_timed_out;
 862
 863        i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
 864        i->t.it_nexus_response = srp_it_nexus_response;
 865
 866        i->t.host_size = sizeof(struct srp_host_attrs);
 867        i->t.host_attrs.ac.attrs = &i->host_attrs[0];
 868        i->t.host_attrs.ac.class = &srp_host_class.class;
 869        i->t.host_attrs.ac.match = srp_host_match;
 870        i->host_attrs[0] = NULL;
 871        transport_container_register(&i->t.host_attrs);
 872
 873        i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
 874        i->rport_attr_cont.ac.class = &srp_rport_class.class;
 875        i->rport_attr_cont.ac.match = srp_rport_match;
 876
 877        count = 0;
 878        i->rport_attrs[count++] = &dev_attr_port_id;
 879        i->rport_attrs[count++] = &dev_attr_roles;
 880        if (ft->has_rport_state) {
 881                i->rport_attrs[count++] = &dev_attr_state;
 882                i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
 883                i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
 884        }
 885        if (ft->reconnect) {
 886                i->rport_attrs[count++] = &dev_attr_reconnect_delay;
 887                i->rport_attrs[count++] = &dev_attr_failed_reconnects;
 888        }
 889        if (ft->rport_delete)
 890                i->rport_attrs[count++] = &dev_attr_delete;
 891        i->rport_attrs[count++] = NULL;
 892        BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
 893
 894        transport_container_register(&i->rport_attr_cont);
 895
 896        i->f = ft;
 897
 898        return &i->t;
 899}
 900EXPORT_SYMBOL_GPL(srp_attach_transport);
 901
 902/**
 903 * srp_release_transport  -  release SRP transport template instance
 904 * @t:          transport template instance
 905 */
 906void srp_release_transport(struct scsi_transport_template *t)
 907{
 908        struct srp_internal *i = to_srp_internal(t);
 909
 910        transport_container_unregister(&i->t.host_attrs);
 911        transport_container_unregister(&i->rport_attr_cont);
 912
 913        kfree(i);
 914}
 915EXPORT_SYMBOL_GPL(srp_release_transport);
 916
 917static __init int srp_transport_init(void)
 918{
 919        int ret;
 920
 921        ret = transport_class_register(&srp_host_class);
 922        if (ret)
 923                return ret;
 924        ret = transport_class_register(&srp_rport_class);
 925        if (ret)
 926                goto unregister_host_class;
 927
 928        return 0;
 929unregister_host_class:
 930        transport_class_unregister(&srp_host_class);
 931        return ret;
 932}
 933
 934static void __exit srp_transport_exit(void)
 935{
 936        transport_class_unregister(&srp_host_class);
 937        transport_class_unregister(&srp_rport_class);
 938}
 939
 940MODULE_AUTHOR("FUJITA Tomonori");
 941MODULE_DESCRIPTION("SRP Transport Attributes");
 942MODULE_LICENSE("GPL");
 943
 944module_init(srp_transport_init);
 945module_exit(srp_transport_exit);
 946