linux/drivers/scsi/scsi_transport_srp.c
<<
>>
Prefs
   1/*
   2 * SCSI RDMA (SRP) transport class
   3 *
   4 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation, version 2 of the
   9 * License.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  19 * 02110-1301 USA
  20 */
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/jiffies.h>
  24#include <linux/err.h>
  25#include <linux/slab.h>
  26#include <linux/string.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_cmnd.h>
  30#include <scsi/scsi_device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_transport.h>
  33#include <scsi/scsi_transport_srp.h>
  34#include "scsi_priv.h"
  35
  36struct srp_host_attrs {
  37        atomic_t next_port_id;
  38};
  39#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
  40
  41#define SRP_HOST_ATTRS 0
  42#define SRP_RPORT_ATTRS 8
  43
  44struct srp_internal {
  45        struct scsi_transport_template t;
  46        struct srp_function_template *f;
  47
  48        struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
  49
  50        struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
  51        struct transport_container rport_attr_cont;
  52};
  53
  54#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
  55
  56#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
  57#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
  58static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
  59{
  60        return dev_to_shost(r->dev.parent);
  61}
  62
  63static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  64{
  65        return transport_class_to_srp_rport(&shost->shost_gendev);
  66}
  67
  68/**
  69 * srp_tmo_valid() - check timeout combination validity
  70 * @reconnect_delay: Reconnect delay in seconds.
  71 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
  72 * @dev_loss_tmo: Device loss timeout in seconds.
  73 *
  74 * The combination of the timeout parameters must be such that SCSI commands
  75 * are finished in a reasonable time. Hence do not allow the fast I/O fail
  76 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
  77 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
  78 * parameters must be such that multipath can detect failed paths timely.
  79 * Hence do not allow all three parameters to be disabled simultaneously.
  80 */
  81int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
  82{
  83        if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
  84                return -EINVAL;
  85        if (reconnect_delay == 0)
  86                return -EINVAL;
  87        if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  88                return -EINVAL;
  89        if (fast_io_fail_tmo < 0 &&
  90            dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  91                return -EINVAL;
  92        if (dev_loss_tmo >= LONG_MAX / HZ)
  93                return -EINVAL;
  94        if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
  95            fast_io_fail_tmo >= dev_loss_tmo)
  96                return -EINVAL;
  97        return 0;
  98}
  99EXPORT_SYMBOL_GPL(srp_tmo_valid);
 100
 101static int srp_host_setup(struct transport_container *tc, struct device *dev,
 102                          struct device *cdev)
 103{
 104        struct Scsi_Host *shost = dev_to_shost(dev);
 105        struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
 106
 107        atomic_set(&srp_host->next_port_id, 0);
 108        return 0;
 109}
 110
 111static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
 112                               NULL, NULL);
 113
 114static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
 115                               NULL, NULL, NULL);
 116
 117static ssize_t
 118show_srp_rport_id(struct device *dev, struct device_attribute *attr,
 119                  char *buf)
 120{
 121        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 122        return sprintf(buf, "%16phC\n", rport->port_id);
 123}
 124
 125static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
 126
 127static const struct {
 128        u32 value;
 129        char *name;
 130} srp_rport_role_names[] = {
 131        {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
 132        {SRP_RPORT_ROLE_TARGET, "SRP Target"},
 133};
 134
 135static ssize_t
 136show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
 137                     char *buf)
 138{
 139        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 140        int i;
 141        char *name = NULL;
 142
 143        for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
 144                if (srp_rport_role_names[i].value == rport->roles) {
 145                        name = srp_rport_role_names[i].name;
 146                        break;
 147                }
 148        return sprintf(buf, "%s\n", name ? : "unknown");
 149}
 150
 151static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
 152
 153static ssize_t store_srp_rport_delete(struct device *dev,
 154                                      struct device_attribute *attr,
 155                                      const char *buf, size_t count)
 156{
 157        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 158        struct Scsi_Host *shost = dev_to_shost(dev);
 159        struct srp_internal *i = to_srp_internal(shost->transportt);
 160
 161        if (i->f->rport_delete) {
 162                i->f->rport_delete(rport);
 163                return count;
 164        } else {
 165                return -ENOSYS;
 166        }
 167}
 168
 169static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
 170
 171static ssize_t show_srp_rport_state(struct device *dev,
 172                                    struct device_attribute *attr,
 173                                    char *buf)
 174{
 175        static const char *const state_name[] = {
 176                [SRP_RPORT_RUNNING]     = "running",
 177                [SRP_RPORT_BLOCKED]     = "blocked",
 178                [SRP_RPORT_FAIL_FAST]   = "fail-fast",
 179                [SRP_RPORT_LOST]        = "lost",
 180        };
 181        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 182        enum srp_rport_state state = rport->state;
 183
 184        return sprintf(buf, "%s\n",
 185                       (unsigned)state < ARRAY_SIZE(state_name) ?
 186                       state_name[state] : "???");
 187}
 188
 189static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
 190
 191static ssize_t srp_show_tmo(char *buf, int tmo)
 192{
 193        return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
 194}
 195
 196int srp_parse_tmo(int *tmo, const char *buf)
 197{
 198        int res = 0;
 199
 200        if (strncmp(buf, "off", 3) != 0)
 201                res = kstrtoint(buf, 0, tmo);
 202        else
 203                *tmo = -1;
 204
 205        return res;
 206}
 207EXPORT_SYMBOL(srp_parse_tmo);
 208
 209static ssize_t show_reconnect_delay(struct device *dev,
 210                                    struct device_attribute *attr, char *buf)
 211{
 212        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 213
 214        return srp_show_tmo(buf, rport->reconnect_delay);
 215}
 216
 217static ssize_t store_reconnect_delay(struct device *dev,
 218                                     struct device_attribute *attr,
 219                                     const char *buf, const size_t count)
 220{
 221        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 222        int res, delay;
 223
 224        res = srp_parse_tmo(&delay, buf);
 225        if (res)
 226                goto out;
 227        res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
 228                            rport->dev_loss_tmo);
 229        if (res)
 230                goto out;
 231
 232        if (rport->reconnect_delay <= 0 && delay > 0 &&
 233            rport->state != SRP_RPORT_RUNNING) {
 234                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 235                                   delay * HZ);
 236        } else if (delay <= 0) {
 237                cancel_delayed_work(&rport->reconnect_work);
 238        }
 239        rport->reconnect_delay = delay;
 240        res = count;
 241
 242out:
 243        return res;
 244}
 245
 246static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
 247                   store_reconnect_delay);
 248
 249static ssize_t show_failed_reconnects(struct device *dev,
 250                                      struct device_attribute *attr, char *buf)
 251{
 252        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 253
 254        return sprintf(buf, "%d\n", rport->failed_reconnects);
 255}
 256
 257static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
 258
 259static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
 260                                               struct device_attribute *attr,
 261                                               char *buf)
 262{
 263        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 264
 265        return srp_show_tmo(buf, rport->fast_io_fail_tmo);
 266}
 267
 268static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
 269                                                struct device_attribute *attr,
 270                                                const char *buf, size_t count)
 271{
 272        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 273        int res;
 274        int fast_io_fail_tmo;
 275
 276        res = srp_parse_tmo(&fast_io_fail_tmo, buf);
 277        if (res)
 278                goto out;
 279        res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
 280                            rport->dev_loss_tmo);
 281        if (res)
 282                goto out;
 283        rport->fast_io_fail_tmo = fast_io_fail_tmo;
 284        res = count;
 285
 286out:
 287        return res;
 288}
 289
 290static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
 291                   show_srp_rport_fast_io_fail_tmo,
 292                   store_srp_rport_fast_io_fail_tmo);
 293
 294static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
 295                                           struct device_attribute *attr,
 296                                           char *buf)
 297{
 298        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 299
 300        return srp_show_tmo(buf, rport->dev_loss_tmo);
 301}
 302
 303static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
 304                                            struct device_attribute *attr,
 305                                            const char *buf, size_t count)
 306{
 307        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 308        int res;
 309        int dev_loss_tmo;
 310
 311        res = srp_parse_tmo(&dev_loss_tmo, buf);
 312        if (res)
 313                goto out;
 314        res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
 315                            dev_loss_tmo);
 316        if (res)
 317                goto out;
 318        rport->dev_loss_tmo = dev_loss_tmo;
 319        res = count;
 320
 321out:
 322        return res;
 323}
 324
 325static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
 326                   show_srp_rport_dev_loss_tmo,
 327                   store_srp_rport_dev_loss_tmo);
 328
 329static int srp_rport_set_state(struct srp_rport *rport,
 330                               enum srp_rport_state new_state)
 331{
 332        enum srp_rport_state old_state = rport->state;
 333
 334        lockdep_assert_held(&rport->mutex);
 335
 336        switch (new_state) {
 337        case SRP_RPORT_RUNNING:
 338                switch (old_state) {
 339                case SRP_RPORT_LOST:
 340                        goto invalid;
 341                default:
 342                        break;
 343                }
 344                break;
 345        case SRP_RPORT_BLOCKED:
 346                switch (old_state) {
 347                case SRP_RPORT_RUNNING:
 348                        break;
 349                default:
 350                        goto invalid;
 351                }
 352                break;
 353        case SRP_RPORT_FAIL_FAST:
 354                switch (old_state) {
 355                case SRP_RPORT_LOST:
 356                        goto invalid;
 357                default:
 358                        break;
 359                }
 360                break;
 361        case SRP_RPORT_LOST:
 362                break;
 363        }
 364        rport->state = new_state;
 365        return 0;
 366
 367invalid:
 368        return -EINVAL;
 369}
 370
 371/**
 372 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
 373 * @work: Work structure used for scheduling this operation.
 374 */
 375static void srp_reconnect_work(struct work_struct *work)
 376{
 377        struct srp_rport *rport = container_of(to_delayed_work(work),
 378                                        struct srp_rport, reconnect_work);
 379        struct Scsi_Host *shost = rport_to_shost(rport);
 380        int delay, res;
 381
 382        res = srp_reconnect_rport(rport);
 383        if (res != 0) {
 384                shost_printk(KERN_ERR, shost,
 385                             "reconnect attempt %d failed (%d)\n",
 386                             ++rport->failed_reconnects, res);
 387                delay = rport->reconnect_delay *
 388                        min(100, max(1, rport->failed_reconnects - 10));
 389                if (delay > 0)
 390                        queue_delayed_work(system_long_wq,
 391                                           &rport->reconnect_work, delay * HZ);
 392        }
 393}
 394
 395static void __rport_fail_io_fast(struct srp_rport *rport)
 396{
 397        struct Scsi_Host *shost = rport_to_shost(rport);
 398        struct srp_internal *i;
 399
 400        lockdep_assert_held(&rport->mutex);
 401
 402        if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
 403                return;
 404        /*
 405         * Call scsi_target_block() to wait for ongoing shost->queuecommand()
 406         * calls before invoking i->f->terminate_rport_io().
 407         */
 408        scsi_target_block(rport->dev.parent);
 409        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 410
 411        /* Involve the LLD if possible to terminate all I/O on the rport. */
 412        i = to_srp_internal(shost->transportt);
 413        if (i->f->terminate_rport_io)
 414                i->f->terminate_rport_io(rport);
 415}
 416
 417/**
 418 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
 419 * @work: Work structure used for scheduling this operation.
 420 */
 421static void rport_fast_io_fail_timedout(struct work_struct *work)
 422{
 423        struct srp_rport *rport = container_of(to_delayed_work(work),
 424                                        struct srp_rport, fast_io_fail_work);
 425        struct Scsi_Host *shost = rport_to_shost(rport);
 426
 427        pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
 428                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 429
 430        mutex_lock(&rport->mutex);
 431        if (rport->state == SRP_RPORT_BLOCKED)
 432                __rport_fail_io_fast(rport);
 433        mutex_unlock(&rport->mutex);
 434}
 435
 436/**
 437 * rport_dev_loss_timedout() - device loss timeout handler
 438 * @work: Work structure used for scheduling this operation.
 439 */
 440static void rport_dev_loss_timedout(struct work_struct *work)
 441{
 442        struct srp_rport *rport = container_of(to_delayed_work(work),
 443                                        struct srp_rport, dev_loss_work);
 444        struct Scsi_Host *shost = rport_to_shost(rport);
 445        struct srp_internal *i = to_srp_internal(shost->transportt);
 446
 447        pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
 448                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 449
 450        mutex_lock(&rport->mutex);
 451        WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
 452        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 453        mutex_unlock(&rport->mutex);
 454
 455        i->f->rport_delete(rport);
 456}
 457
 458static void __srp_start_tl_fail_timers(struct srp_rport *rport)
 459{
 460        struct Scsi_Host *shost = rport_to_shost(rport);
 461        int delay, fast_io_fail_tmo, dev_loss_tmo;
 462
 463        lockdep_assert_held(&rport->mutex);
 464
 465        delay = rport->reconnect_delay;
 466        fast_io_fail_tmo = rport->fast_io_fail_tmo;
 467        dev_loss_tmo = rport->dev_loss_tmo;
 468        pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
 469                 rport->state);
 470
 471        if (rport->state == SRP_RPORT_LOST)
 472                return;
 473        if (delay > 0)
 474                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 475                                   1UL * delay * HZ);
 476        if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
 477            srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
 478                pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
 479                         rport->state);
 480                scsi_target_block(&shost->shost_gendev);
 481                if (fast_io_fail_tmo >= 0)
 482                        queue_delayed_work(system_long_wq,
 483                                           &rport->fast_io_fail_work,
 484                                           1UL * fast_io_fail_tmo * HZ);
 485                if (dev_loss_tmo >= 0)
 486                        queue_delayed_work(system_long_wq,
 487                                           &rport->dev_loss_work,
 488                                           1UL * dev_loss_tmo * HZ);
 489        }
 490}
 491
 492/**
 493 * srp_start_tl_fail_timers() - start the transport layer failure timers
 494 * @rport: SRP target port.
 495 *
 496 * Start the transport layer fast I/O failure and device loss timers. Do not
 497 * modify a timer that was already started.
 498 */
 499void srp_start_tl_fail_timers(struct srp_rport *rport)
 500{
 501        mutex_lock(&rport->mutex);
 502        __srp_start_tl_fail_timers(rport);
 503        mutex_unlock(&rport->mutex);
 504}
 505EXPORT_SYMBOL(srp_start_tl_fail_timers);
 506
 507/**
 508 * srp_reconnect_rport() - reconnect to an SRP target port
 509 * @rport: SRP target port.
 510 *
 511 * Blocks SCSI command queueing before invoking reconnect() such that
 512 * queuecommand() won't be invoked concurrently with reconnect() from outside
 513 * the SCSI EH. This is important since a reconnect() implementation may
 514 * reallocate resources needed by queuecommand().
 515 *
 516 * Notes:
 517 * - This function neither waits until outstanding requests have finished nor
 518 *   tries to abort these. It is the responsibility of the reconnect()
 519 *   function to finish outstanding commands before reconnecting to the target
 520 *   port.
 521 * - It is the responsibility of the caller to ensure that the resources
 522 *   reallocated by the reconnect() function won't be used while this function
 523 *   is in progress. One possible strategy is to invoke this function from
 524 *   the context of the SCSI EH thread only. Another possible strategy is to
 525 *   lock the rport mutex inside each SCSI LLD callback that can be invoked by
 526 *   the SCSI EH (the scsi_host_template.eh_*() functions and also the
 527 *   scsi_host_template.queuecommand() function).
 528 */
 529int srp_reconnect_rport(struct srp_rport *rport)
 530{
 531        struct Scsi_Host *shost = rport_to_shost(rport);
 532        struct srp_internal *i = to_srp_internal(shost->transportt);
 533        struct scsi_device *sdev;
 534        int res;
 535
 536        pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
 537
 538        res = mutex_lock_interruptible(&rport->mutex);
 539        if (res)
 540                goto out;
 541        scsi_target_block(&shost->shost_gendev);
 542        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
 543        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
 544                 dev_name(&shost->shost_gendev), rport->state, res);
 545        if (res == 0) {
 546                cancel_delayed_work(&rport->fast_io_fail_work);
 547                cancel_delayed_work(&rport->dev_loss_work);
 548
 549                rport->failed_reconnects = 0;
 550                srp_rport_set_state(rport, SRP_RPORT_RUNNING);
 551                scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
 552                /*
 553                 * If the SCSI error handler has offlined one or more devices,
 554                 * invoking scsi_target_unblock() won't change the state of
 555                 * these devices into running so do that explicitly.
 556                 */
 557                shost_for_each_device(sdev, shost) {
 558                        mutex_lock(&sdev->state_mutex);
 559                        if (sdev->sdev_state == SDEV_OFFLINE)
 560                                sdev->sdev_state = SDEV_RUNNING;
 561                        mutex_unlock(&sdev->state_mutex);
 562                }
 563        } else if (rport->state == SRP_RPORT_RUNNING) {
 564                /*
 565                 * srp_reconnect_rport() has been invoked with fast_io_fail
 566                 * and dev_loss off. Mark the port as failed and start the TL
 567                 * failure timers if these had not yet been started.
 568                 */
 569                __rport_fail_io_fast(rport);
 570                scsi_target_unblock(&shost->shost_gendev,
 571                                    SDEV_TRANSPORT_OFFLINE);
 572                __srp_start_tl_fail_timers(rport);
 573        } else if (rport->state != SRP_RPORT_BLOCKED) {
 574                scsi_target_unblock(&shost->shost_gendev,
 575                                    SDEV_TRANSPORT_OFFLINE);
 576        }
 577        mutex_unlock(&rport->mutex);
 578
 579out:
 580        return res;
 581}
 582EXPORT_SYMBOL(srp_reconnect_rport);
 583
 584/**
 585 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
 586 * @scmd: SCSI command.
 587 *
 588 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
 589 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
 590 * handle the timeout (BLK_EH_NOT_HANDLED).
 591 *
 592 * Note: This function is called from soft-IRQ context and with the request
 593 * queue lock held.
 594 */
 595enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
 596{
 597        struct scsi_device *sdev = scmd->device;
 598        struct Scsi_Host *shost = sdev->host;
 599        struct srp_internal *i = to_srp_internal(shost->transportt);
 600        struct srp_rport *rport = shost_to_rport(shost);
 601
 602        pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
 603        return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
 604                i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
 605                BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 606}
 607EXPORT_SYMBOL(srp_timed_out);
 608
 609static void srp_rport_release(struct device *dev)
 610{
 611        struct srp_rport *rport = dev_to_rport(dev);
 612
 613        put_device(dev->parent);
 614        kfree(rport);
 615}
 616
 617static int scsi_is_srp_rport(const struct device *dev)
 618{
 619        return dev->release == srp_rport_release;
 620}
 621
 622static int srp_rport_match(struct attribute_container *cont,
 623                           struct device *dev)
 624{
 625        struct Scsi_Host *shost;
 626        struct srp_internal *i;
 627
 628        if (!scsi_is_srp_rport(dev))
 629                return 0;
 630
 631        shost = dev_to_shost(dev->parent);
 632        if (!shost->transportt)
 633                return 0;
 634        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 635                return 0;
 636
 637        i = to_srp_internal(shost->transportt);
 638        return &i->rport_attr_cont.ac == cont;
 639}
 640
 641static int srp_host_match(struct attribute_container *cont, struct device *dev)
 642{
 643        struct Scsi_Host *shost;
 644        struct srp_internal *i;
 645
 646        if (!scsi_is_host_device(dev))
 647                return 0;
 648
 649        shost = dev_to_shost(dev);
 650        if (!shost->transportt)
 651                return 0;
 652        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 653                return 0;
 654
 655        i = to_srp_internal(shost->transportt);
 656        return &i->t.host_attrs.ac == cont;
 657}
 658
 659/**
 660 * srp_rport_get() - increment rport reference count
 661 * @rport: SRP target port.
 662 */
 663void srp_rport_get(struct srp_rport *rport)
 664{
 665        get_device(&rport->dev);
 666}
 667EXPORT_SYMBOL(srp_rport_get);
 668
 669/**
 670 * srp_rport_put() - decrement rport reference count
 671 * @rport: SRP target port.
 672 */
 673void srp_rport_put(struct srp_rport *rport)
 674{
 675        put_device(&rport->dev);
 676}
 677EXPORT_SYMBOL(srp_rport_put);
 678
 679/**
 680 * srp_rport_add - add a SRP remote port to the device hierarchy
 681 * @shost:      scsi host the remote port is connected to.
 682 * @ids:        The port id for the remote port.
 683 *
 684 * Publishes a port to the rest of the system.
 685 */
 686struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
 687                                struct srp_rport_identifiers *ids)
 688{
 689        struct srp_rport *rport;
 690        struct device *parent = &shost->shost_gendev;
 691        struct srp_internal *i = to_srp_internal(shost->transportt);
 692        int id, ret;
 693
 694        rport = kzalloc(sizeof(*rport), GFP_KERNEL);
 695        if (!rport)
 696                return ERR_PTR(-ENOMEM);
 697
 698        mutex_init(&rport->mutex);
 699
 700        device_initialize(&rport->dev);
 701
 702        rport->dev.parent = get_device(parent);
 703        rport->dev.release = srp_rport_release;
 704
 705        memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
 706        rport->roles = ids->roles;
 707
 708        if (i->f->reconnect)
 709                rport->reconnect_delay = i->f->reconnect_delay ?
 710                        *i->f->reconnect_delay : 10;
 711        INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
 712        rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
 713                *i->f->fast_io_fail_tmo : 15;
 714        rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
 715        INIT_DELAYED_WORK(&rport->fast_io_fail_work,
 716                          rport_fast_io_fail_timedout);
 717        INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
 718
 719        id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
 720        dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 721
 722        transport_setup_device(&rport->dev);
 723
 724        ret = device_add(&rport->dev);
 725        if (ret) {
 726                transport_destroy_device(&rport->dev);
 727                put_device(&rport->dev);
 728                return ERR_PTR(ret);
 729        }
 730
 731        transport_add_device(&rport->dev);
 732        transport_configure_device(&rport->dev);
 733
 734        return rport;
 735}
 736EXPORT_SYMBOL_GPL(srp_rport_add);
 737
 738/**
 739 * srp_rport_del  -  remove a SRP remote port
 740 * @rport:      SRP remote port to remove
 741 *
 742 * Removes the specified SRP remote port.
 743 */
 744void srp_rport_del(struct srp_rport *rport)
 745{
 746        struct device *dev = &rport->dev;
 747
 748        transport_remove_device(dev);
 749        device_del(dev);
 750        transport_destroy_device(dev);
 751
 752        put_device(dev);
 753}
 754EXPORT_SYMBOL_GPL(srp_rport_del);
 755
 756static int do_srp_rport_del(struct device *dev, void *data)
 757{
 758        if (scsi_is_srp_rport(dev))
 759                srp_rport_del(dev_to_rport(dev));
 760        return 0;
 761}
 762
 763/**
 764 * srp_remove_host  -  tear down a Scsi_Host's SRP data structures
 765 * @shost:      Scsi Host that is torn down
 766 *
 767 * Removes all SRP remote ports for a given Scsi_Host.
 768 * Must be called just before scsi_remove_host for SRP HBAs.
 769 */
 770void srp_remove_host(struct Scsi_Host *shost)
 771{
 772        device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
 773}
 774EXPORT_SYMBOL_GPL(srp_remove_host);
 775
 776/**
 777 * srp_stop_rport_timers - stop the transport layer recovery timers
 778 * @rport: SRP remote port for which to stop the timers.
 779 *
 780 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
 781 * must hold a reference on the rport (rport->dev) and on the SCSI host
 782 * (rport->dev.parent).
 783 */
 784void srp_stop_rport_timers(struct srp_rport *rport)
 785{
 786        mutex_lock(&rport->mutex);
 787        if (rport->state == SRP_RPORT_BLOCKED)
 788                __rport_fail_io_fast(rport);
 789        srp_rport_set_state(rport, SRP_RPORT_LOST);
 790        mutex_unlock(&rport->mutex);
 791
 792        cancel_delayed_work_sync(&rport->reconnect_work);
 793        cancel_delayed_work_sync(&rport->fast_io_fail_work);
 794        cancel_delayed_work_sync(&rport->dev_loss_work);
 795}
 796EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
 797
 798/**
 799 * srp_attach_transport  -  instantiate SRP transport template
 800 * @ft:         SRP transport class function template
 801 */
 802struct scsi_transport_template *
 803srp_attach_transport(struct srp_function_template *ft)
 804{
 805        int count;
 806        struct srp_internal *i;
 807
 808        i = kzalloc(sizeof(*i), GFP_KERNEL);
 809        if (!i)
 810                return NULL;
 811
 812        i->t.host_size = sizeof(struct srp_host_attrs);
 813        i->t.host_attrs.ac.attrs = &i->host_attrs[0];
 814        i->t.host_attrs.ac.class = &srp_host_class.class;
 815        i->t.host_attrs.ac.match = srp_host_match;
 816        i->host_attrs[0] = NULL;
 817        transport_container_register(&i->t.host_attrs);
 818
 819        i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
 820        i->rport_attr_cont.ac.class = &srp_rport_class.class;
 821        i->rport_attr_cont.ac.match = srp_rport_match;
 822
 823        count = 0;
 824        i->rport_attrs[count++] = &dev_attr_port_id;
 825        i->rport_attrs[count++] = &dev_attr_roles;
 826        if (ft->has_rport_state) {
 827                i->rport_attrs[count++] = &dev_attr_state;
 828                i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
 829                i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
 830        }
 831        if (ft->reconnect) {
 832                i->rport_attrs[count++] = &dev_attr_reconnect_delay;
 833                i->rport_attrs[count++] = &dev_attr_failed_reconnects;
 834        }
 835        if (ft->rport_delete)
 836                i->rport_attrs[count++] = &dev_attr_delete;
 837        i->rport_attrs[count++] = NULL;
 838        BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
 839
 840        transport_container_register(&i->rport_attr_cont);
 841
 842        i->f = ft;
 843
 844        return &i->t;
 845}
 846EXPORT_SYMBOL_GPL(srp_attach_transport);
 847
 848/**
 849 * srp_release_transport  -  release SRP transport template instance
 850 * @t:          transport template instance
 851 */
 852void srp_release_transport(struct scsi_transport_template *t)
 853{
 854        struct srp_internal *i = to_srp_internal(t);
 855
 856        transport_container_unregister(&i->t.host_attrs);
 857        transport_container_unregister(&i->rport_attr_cont);
 858
 859        kfree(i);
 860}
 861EXPORT_SYMBOL_GPL(srp_release_transport);
 862
 863static __init int srp_transport_init(void)
 864{
 865        int ret;
 866
 867        ret = transport_class_register(&srp_host_class);
 868        if (ret)
 869                return ret;
 870        ret = transport_class_register(&srp_rport_class);
 871        if (ret)
 872                goto unregister_host_class;
 873
 874        return 0;
 875unregister_host_class:
 876        transport_class_unregister(&srp_host_class);
 877        return ret;
 878}
 879
 880static void __exit srp_transport_exit(void)
 881{
 882        transport_class_unregister(&srp_host_class);
 883        transport_class_unregister(&srp_rport_class);
 884}
 885
 886MODULE_AUTHOR("FUJITA Tomonori");
 887MODULE_DESCRIPTION("SRP Transport Attributes");
 888MODULE_LICENSE("GPL");
 889
 890module_init(srp_transport_init);
 891module_exit(srp_transport_exit);
 892