linux/drivers/scsi/scsi_transport_srp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * SCSI RDMA (SRP) transport class
   4 *
   5 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
   6 */
   7#include <linux/init.h>
   8#include <linux/module.h>
   9#include <linux/jiffies.h>
  10#include <linux/err.h>
  11#include <linux/slab.h>
  12#include <linux/string.h>
  13
  14#include <scsi/scsi.h>
  15#include <scsi/scsi_cmnd.h>
  16#include <scsi/scsi_device.h>
  17#include <scsi/scsi_host.h>
  18#include <scsi/scsi_transport.h>
  19#include <scsi/scsi_transport_srp.h>
  20#include "scsi_priv.h"
  21
  22struct srp_host_attrs {
  23        atomic_t next_port_id;
  24};
  25#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
  26
  27#define SRP_HOST_ATTRS 0
  28#define SRP_RPORT_ATTRS 8
  29
  30struct srp_internal {
  31        struct scsi_transport_template t;
  32        struct srp_function_template *f;
  33
  34        struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
  35
  36        struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
  37        struct transport_container rport_attr_cont;
  38};
  39
  40static int scsi_is_srp_rport(const struct device *dev);
  41
  42#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
  43
  44#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
  45#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
  46static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
  47{
  48        return dev_to_shost(r->dev.parent);
  49}
  50
  51static int find_child_rport(struct device *dev, void *data)
  52{
  53        struct device **child = data;
  54
  55        if (scsi_is_srp_rport(dev)) {
  56                WARN_ON_ONCE(*child);
  57                *child = dev;
  58        }
  59        return 0;
  60}
  61
  62static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  63{
  64        struct device *child = NULL;
  65
  66        WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
  67                                           find_child_rport) < 0);
  68        return child ? dev_to_rport(child) : NULL;
  69}
  70
  71/**
  72 * srp_tmo_valid() - check timeout combination validity
  73 * @reconnect_delay: Reconnect delay in seconds.
  74 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
  75 * @dev_loss_tmo: Device loss timeout in seconds.
  76 *
  77 * The combination of the timeout parameters must be such that SCSI commands
  78 * are finished in a reasonable time. Hence do not allow the fast I/O fail
  79 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
  80 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
  81 * parameters must be such that multipath can detect failed paths timely.
  82 * Hence do not allow all three parameters to be disabled simultaneously.
  83 */
  84int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
  85{
  86        if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
  87                return -EINVAL;
  88        if (reconnect_delay == 0)
  89                return -EINVAL;
  90        if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  91                return -EINVAL;
  92        if (fast_io_fail_tmo < 0 &&
  93            dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  94                return -EINVAL;
  95        if (dev_loss_tmo >= LONG_MAX / HZ)
  96                return -EINVAL;
  97        if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
  98            fast_io_fail_tmo >= dev_loss_tmo)
  99                return -EINVAL;
 100        return 0;
 101}
 102EXPORT_SYMBOL_GPL(srp_tmo_valid);
 103
 104static int srp_host_setup(struct transport_container *tc, struct device *dev,
 105                          struct device *cdev)
 106{
 107        struct Scsi_Host *shost = dev_to_shost(dev);
 108        struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
 109
 110        atomic_set(&srp_host->next_port_id, 0);
 111        return 0;
 112}
 113
 114static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
 115                               NULL, NULL);
 116
 117static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
 118                               NULL, NULL, NULL);
 119
 120static ssize_t
 121show_srp_rport_id(struct device *dev, struct device_attribute *attr,
 122                  char *buf)
 123{
 124        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 125        return sprintf(buf, "%16phC\n", rport->port_id);
 126}
 127
 128static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
 129
 130static const struct {
 131        u32 value;
 132        char *name;
 133} srp_rport_role_names[] = {
 134        {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
 135        {SRP_RPORT_ROLE_TARGET, "SRP Target"},
 136};
 137
 138static ssize_t
 139show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
 140                     char *buf)
 141{
 142        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 143        int i;
 144        char *name = NULL;
 145
 146        for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
 147                if (srp_rport_role_names[i].value == rport->roles) {
 148                        name = srp_rport_role_names[i].name;
 149                        break;
 150                }
 151        return sprintf(buf, "%s\n", name ? : "unknown");
 152}
 153
 154static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
 155
 156static ssize_t store_srp_rport_delete(struct device *dev,
 157                                      struct device_attribute *attr,
 158                                      const char *buf, size_t count)
 159{
 160        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 161        struct Scsi_Host *shost = dev_to_shost(dev);
 162        struct srp_internal *i = to_srp_internal(shost->transportt);
 163
 164        if (i->f->rport_delete) {
 165                i->f->rport_delete(rport);
 166                return count;
 167        } else {
 168                return -ENOSYS;
 169        }
 170}
 171
 172static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
 173
 174static ssize_t show_srp_rport_state(struct device *dev,
 175                                    struct device_attribute *attr,
 176                                    char *buf)
 177{
 178        static const char *const state_name[] = {
 179                [SRP_RPORT_RUNNING]     = "running",
 180                [SRP_RPORT_BLOCKED]     = "blocked",
 181                [SRP_RPORT_FAIL_FAST]   = "fail-fast",
 182                [SRP_RPORT_LOST]        = "lost",
 183        };
 184        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 185        enum srp_rport_state state = rport->state;
 186
 187        return sprintf(buf, "%s\n",
 188                       (unsigned)state < ARRAY_SIZE(state_name) ?
 189                       state_name[state] : "???");
 190}
 191
 192static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
 193
 194static ssize_t srp_show_tmo(char *buf, int tmo)
 195{
 196        return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
 197}
 198
 199int srp_parse_tmo(int *tmo, const char *buf)
 200{
 201        int res = 0;
 202
 203        if (strncmp(buf, "off", 3) != 0)
 204                res = kstrtoint(buf, 0, tmo);
 205        else
 206                *tmo = -1;
 207
 208        return res;
 209}
 210EXPORT_SYMBOL(srp_parse_tmo);
 211
 212static ssize_t show_reconnect_delay(struct device *dev,
 213                                    struct device_attribute *attr, char *buf)
 214{
 215        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 216
 217        return srp_show_tmo(buf, rport->reconnect_delay);
 218}
 219
 220static ssize_t store_reconnect_delay(struct device *dev,
 221                                     struct device_attribute *attr,
 222                                     const char *buf, const size_t count)
 223{
 224        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 225        int res, delay;
 226
 227        res = srp_parse_tmo(&delay, buf);
 228        if (res)
 229                goto out;
 230        res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
 231                            rport->dev_loss_tmo);
 232        if (res)
 233                goto out;
 234
 235        if (rport->reconnect_delay <= 0 && delay > 0 &&
 236            rport->state != SRP_RPORT_RUNNING) {
 237                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 238                                   delay * HZ);
 239        } else if (delay <= 0) {
 240                cancel_delayed_work(&rport->reconnect_work);
 241        }
 242        rport->reconnect_delay = delay;
 243        res = count;
 244
 245out:
 246        return res;
 247}
 248
 249static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
 250                   store_reconnect_delay);
 251
 252static ssize_t show_failed_reconnects(struct device *dev,
 253                                      struct device_attribute *attr, char *buf)
 254{
 255        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 256
 257        return sprintf(buf, "%d\n", rport->failed_reconnects);
 258}
 259
 260static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
 261
 262static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
 263                                               struct device_attribute *attr,
 264                                               char *buf)
 265{
 266        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 267
 268        return srp_show_tmo(buf, rport->fast_io_fail_tmo);
 269}
 270
 271static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
 272                                                struct device_attribute *attr,
 273                                                const char *buf, size_t count)
 274{
 275        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 276        int res;
 277        int fast_io_fail_tmo;
 278
 279        res = srp_parse_tmo(&fast_io_fail_tmo, buf);
 280        if (res)
 281                goto out;
 282        res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
 283                            rport->dev_loss_tmo);
 284        if (res)
 285                goto out;
 286        rport->fast_io_fail_tmo = fast_io_fail_tmo;
 287        res = count;
 288
 289out:
 290        return res;
 291}
 292
 293static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
 294                   show_srp_rport_fast_io_fail_tmo,
 295                   store_srp_rport_fast_io_fail_tmo);
 296
 297static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
 298                                           struct device_attribute *attr,
 299                                           char *buf)
 300{
 301        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 302
 303        return srp_show_tmo(buf, rport->dev_loss_tmo);
 304}
 305
 306static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
 307                                            struct device_attribute *attr,
 308                                            const char *buf, size_t count)
 309{
 310        struct srp_rport *rport = transport_class_to_srp_rport(dev);
 311        int res;
 312        int dev_loss_tmo;
 313
 314        res = srp_parse_tmo(&dev_loss_tmo, buf);
 315        if (res)
 316                goto out;
 317        res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
 318                            dev_loss_tmo);
 319        if (res)
 320                goto out;
 321        rport->dev_loss_tmo = dev_loss_tmo;
 322        res = count;
 323
 324out:
 325        return res;
 326}
 327
 328static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
 329                   show_srp_rport_dev_loss_tmo,
 330                   store_srp_rport_dev_loss_tmo);
 331
 332static int srp_rport_set_state(struct srp_rport *rport,
 333                               enum srp_rport_state new_state)
 334{
 335        enum srp_rport_state old_state = rport->state;
 336
 337        lockdep_assert_held(&rport->mutex);
 338
 339        switch (new_state) {
 340        case SRP_RPORT_RUNNING:
 341                switch (old_state) {
 342                case SRP_RPORT_LOST:
 343                        goto invalid;
 344                default:
 345                        break;
 346                }
 347                break;
 348        case SRP_RPORT_BLOCKED:
 349                switch (old_state) {
 350                case SRP_RPORT_RUNNING:
 351                        break;
 352                default:
 353                        goto invalid;
 354                }
 355                break;
 356        case SRP_RPORT_FAIL_FAST:
 357                switch (old_state) {
 358                case SRP_RPORT_LOST:
 359                        goto invalid;
 360                default:
 361                        break;
 362                }
 363                break;
 364        case SRP_RPORT_LOST:
 365                break;
 366        }
 367        rport->state = new_state;
 368        return 0;
 369
 370invalid:
 371        return -EINVAL;
 372}
 373
 374/**
 375 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
 376 * @work: Work structure used for scheduling this operation.
 377 */
 378static void srp_reconnect_work(struct work_struct *work)
 379{
 380        struct srp_rport *rport = container_of(to_delayed_work(work),
 381                                        struct srp_rport, reconnect_work);
 382        struct Scsi_Host *shost = rport_to_shost(rport);
 383        int delay, res;
 384
 385        res = srp_reconnect_rport(rport);
 386        if (res != 0) {
 387                shost_printk(KERN_ERR, shost,
 388                             "reconnect attempt %d failed (%d)\n",
 389                             ++rport->failed_reconnects, res);
 390                delay = rport->reconnect_delay *
 391                        min(100, max(1, rport->failed_reconnects - 10));
 392                if (delay > 0)
 393                        queue_delayed_work(system_long_wq,
 394                                           &rport->reconnect_work, delay * HZ);
 395        }
 396}
 397
 398/*
 399 * scsi_target_block() must have been called before this function is
 400 * called to guarantee that no .queuecommand() calls are in progress.
 401 */
 402static void __rport_fail_io_fast(struct srp_rport *rport)
 403{
 404        struct Scsi_Host *shost = rport_to_shost(rport);
 405        struct srp_internal *i;
 406
 407        lockdep_assert_held(&rport->mutex);
 408
 409        if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
 410                return;
 411
 412        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 413
 414        /* Involve the LLD if possible to terminate all I/O on the rport. */
 415        i = to_srp_internal(shost->transportt);
 416        if (i->f->terminate_rport_io)
 417                i->f->terminate_rport_io(rport);
 418}
 419
 420/**
 421 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
 422 * @work: Work structure used for scheduling this operation.
 423 */
 424static void rport_fast_io_fail_timedout(struct work_struct *work)
 425{
 426        struct srp_rport *rport = container_of(to_delayed_work(work),
 427                                        struct srp_rport, fast_io_fail_work);
 428        struct Scsi_Host *shost = rport_to_shost(rport);
 429
 430        pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
 431                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 432
 433        mutex_lock(&rport->mutex);
 434        if (rport->state == SRP_RPORT_BLOCKED)
 435                __rport_fail_io_fast(rport);
 436        mutex_unlock(&rport->mutex);
 437}
 438
 439/**
 440 * rport_dev_loss_timedout() - device loss timeout handler
 441 * @work: Work structure used for scheduling this operation.
 442 */
 443static void rport_dev_loss_timedout(struct work_struct *work)
 444{
 445        struct srp_rport *rport = container_of(to_delayed_work(work),
 446                                        struct srp_rport, dev_loss_work);
 447        struct Scsi_Host *shost = rport_to_shost(rport);
 448        struct srp_internal *i = to_srp_internal(shost->transportt);
 449
 450        pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
 451                dev_name(&rport->dev), dev_name(&shost->shost_gendev));
 452
 453        mutex_lock(&rport->mutex);
 454        WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
 455        scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 456        mutex_unlock(&rport->mutex);
 457
 458        i->f->rport_delete(rport);
 459}
 460
 461static void __srp_start_tl_fail_timers(struct srp_rport *rport)
 462{
 463        struct Scsi_Host *shost = rport_to_shost(rport);
 464        int delay, fast_io_fail_tmo, dev_loss_tmo;
 465
 466        lockdep_assert_held(&rport->mutex);
 467
 468        delay = rport->reconnect_delay;
 469        fast_io_fail_tmo = rport->fast_io_fail_tmo;
 470        dev_loss_tmo = rport->dev_loss_tmo;
 471        pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
 472                 rport->state);
 473
 474        if (rport->state == SRP_RPORT_LOST)
 475                return;
 476        if (delay > 0)
 477                queue_delayed_work(system_long_wq, &rport->reconnect_work,
 478                                   1UL * delay * HZ);
 479        if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
 480            srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
 481                pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
 482                         rport->state);
 483                scsi_target_block(&shost->shost_gendev);
 484                if (fast_io_fail_tmo >= 0)
 485                        queue_delayed_work(system_long_wq,
 486                                           &rport->fast_io_fail_work,
 487                                           1UL * fast_io_fail_tmo * HZ);
 488                if (dev_loss_tmo >= 0)
 489                        queue_delayed_work(system_long_wq,
 490                                           &rport->dev_loss_work,
 491                                           1UL * dev_loss_tmo * HZ);
 492        }
 493}
 494
 495/**
 496 * srp_start_tl_fail_timers() - start the transport layer failure timers
 497 * @rport: SRP target port.
 498 *
 499 * Start the transport layer fast I/O failure and device loss timers. Do not
 500 * modify a timer that was already started.
 501 */
 502void srp_start_tl_fail_timers(struct srp_rport *rport)
 503{
 504        mutex_lock(&rport->mutex);
 505        __srp_start_tl_fail_timers(rport);
 506        mutex_unlock(&rport->mutex);
 507}
 508EXPORT_SYMBOL(srp_start_tl_fail_timers);
 509
 510/**
 511 * srp_reconnect_rport() - reconnect to an SRP target port
 512 * @rport: SRP target port.
 513 *
 514 * Blocks SCSI command queueing before invoking reconnect() such that
 515 * queuecommand() won't be invoked concurrently with reconnect() from outside
 516 * the SCSI EH. This is important since a reconnect() implementation may
 517 * reallocate resources needed by queuecommand().
 518 *
 519 * Notes:
 520 * - This function neither waits until outstanding requests have finished nor
 521 *   tries to abort these. It is the responsibility of the reconnect()
 522 *   function to finish outstanding commands before reconnecting to the target
 523 *   port.
 524 * - It is the responsibility of the caller to ensure that the resources
 525 *   reallocated by the reconnect() function won't be used while this function
 526 *   is in progress. One possible strategy is to invoke this function from
 527 *   the context of the SCSI EH thread only. Another possible strategy is to
 528 *   lock the rport mutex inside each SCSI LLD callback that can be invoked by
 529 *   the SCSI EH (the scsi_host_template.eh_*() functions and also the
 530 *   scsi_host_template.queuecommand() function).
 531 */
 532int srp_reconnect_rport(struct srp_rport *rport)
 533{
 534        struct Scsi_Host *shost = rport_to_shost(rport);
 535        struct srp_internal *i = to_srp_internal(shost->transportt);
 536        struct scsi_device *sdev;
 537        int res;
 538
 539        pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
 540
 541        res = mutex_lock_interruptible(&rport->mutex);
 542        if (res)
 543                goto out;
 544        if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
 545                /*
 546                 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
 547                 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
 548                 * later is ok though, scsi_internal_device_unblock_nowait()
 549                 * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
 550                 */
 551                scsi_target_block(&shost->shost_gendev);
 552        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
 553        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
 554                 dev_name(&shost->shost_gendev), rport->state, res);
 555        if (res == 0) {
 556                cancel_delayed_work(&rport->fast_io_fail_work);
 557                cancel_delayed_work(&rport->dev_loss_work);
 558
 559                rport->failed_reconnects = 0;
 560                srp_rport_set_state(rport, SRP_RPORT_RUNNING);
 561                scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
 562                /*
 563                 * If the SCSI error handler has offlined one or more devices,
 564                 * invoking scsi_target_unblock() won't change the state of
 565                 * these devices into running so do that explicitly.
 566                 */
 567                shost_for_each_device(sdev, shost) {
 568                        mutex_lock(&sdev->state_mutex);
 569                        if (sdev->sdev_state == SDEV_OFFLINE)
 570                                sdev->sdev_state = SDEV_RUNNING;
 571                        mutex_unlock(&sdev->state_mutex);
 572                }
 573        } else if (rport->state == SRP_RPORT_RUNNING) {
 574                /*
 575                 * srp_reconnect_rport() has been invoked with fast_io_fail
 576                 * and dev_loss off. Mark the port as failed and start the TL
 577                 * failure timers if these had not yet been started.
 578                 */
 579                __rport_fail_io_fast(rport);
 580                __srp_start_tl_fail_timers(rport);
 581        } else if (rport->state != SRP_RPORT_BLOCKED) {
 582                scsi_target_unblock(&shost->shost_gendev,
 583                                    SDEV_TRANSPORT_OFFLINE);
 584        }
 585        mutex_unlock(&rport->mutex);
 586
 587out:
 588        return res;
 589}
 590EXPORT_SYMBOL(srp_reconnect_rport);
 591
 592/**
 593 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
 594 * @scmd: SCSI command.
 595 *
 596 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
 597 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
 598 * handle the timeout (BLK_EH_DONE).
 599 *
 600 * Note: This function is called from soft-IRQ context and with the request
 601 * queue lock held.
 602 */
 603enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
 604{
 605        struct scsi_device *sdev = scmd->device;
 606        struct Scsi_Host *shost = sdev->host;
 607        struct srp_internal *i = to_srp_internal(shost->transportt);
 608        struct srp_rport *rport = shost_to_rport(shost);
 609
 610        pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
 611        return rport && rport->fast_io_fail_tmo < 0 &&
 612                rport->dev_loss_tmo < 0 &&
 613                i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
 614                BLK_EH_RESET_TIMER : BLK_EH_DONE;
 615}
 616EXPORT_SYMBOL(srp_timed_out);
 617
 618static void srp_rport_release(struct device *dev)
 619{
 620        struct srp_rport *rport = dev_to_rport(dev);
 621
 622        put_device(dev->parent);
 623        kfree(rport);
 624}
 625
 626static int scsi_is_srp_rport(const struct device *dev)
 627{
 628        return dev->release == srp_rport_release;
 629}
 630
 631static int srp_rport_match(struct attribute_container *cont,
 632                           struct device *dev)
 633{
 634        struct Scsi_Host *shost;
 635        struct srp_internal *i;
 636
 637        if (!scsi_is_srp_rport(dev))
 638                return 0;
 639
 640        shost = dev_to_shost(dev->parent);
 641        if (!shost->transportt)
 642                return 0;
 643        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 644                return 0;
 645
 646        i = to_srp_internal(shost->transportt);
 647        return &i->rport_attr_cont.ac == cont;
 648}
 649
 650static int srp_host_match(struct attribute_container *cont, struct device *dev)
 651{
 652        struct Scsi_Host *shost;
 653        struct srp_internal *i;
 654
 655        if (!scsi_is_host_device(dev))
 656                return 0;
 657
 658        shost = dev_to_shost(dev);
 659        if (!shost->transportt)
 660                return 0;
 661        if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
 662                return 0;
 663
 664        i = to_srp_internal(shost->transportt);
 665        return &i->t.host_attrs.ac == cont;
 666}
 667
 668/**
 669 * srp_rport_get() - increment rport reference count
 670 * @rport: SRP target port.
 671 */
 672void srp_rport_get(struct srp_rport *rport)
 673{
 674        get_device(&rport->dev);
 675}
 676EXPORT_SYMBOL(srp_rport_get);
 677
 678/**
 679 * srp_rport_put() - decrement rport reference count
 680 * @rport: SRP target port.
 681 */
 682void srp_rport_put(struct srp_rport *rport)
 683{
 684        put_device(&rport->dev);
 685}
 686EXPORT_SYMBOL(srp_rport_put);
 687
 688/**
 689 * srp_rport_add - add a SRP remote port to the device hierarchy
 690 * @shost:      scsi host the remote port is connected to.
 691 * @ids:        The port id for the remote port.
 692 *
 693 * Publishes a port to the rest of the system.
 694 */
 695struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
 696                                struct srp_rport_identifiers *ids)
 697{
 698        struct srp_rport *rport;
 699        struct device *parent = &shost->shost_gendev;
 700        struct srp_internal *i = to_srp_internal(shost->transportt);
 701        int id, ret;
 702
 703        rport = kzalloc(sizeof(*rport), GFP_KERNEL);
 704        if (!rport)
 705                return ERR_PTR(-ENOMEM);
 706
 707        mutex_init(&rport->mutex);
 708
 709        device_initialize(&rport->dev);
 710
 711        rport->dev.parent = get_device(parent);
 712        rport->dev.release = srp_rport_release;
 713
 714        memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
 715        rport->roles = ids->roles;
 716
 717        if (i->f->reconnect)
 718                rport->reconnect_delay = i->f->reconnect_delay ?
 719                        *i->f->reconnect_delay : 10;
 720        INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
 721        rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
 722                *i->f->fast_io_fail_tmo : 15;
 723        rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
 724        INIT_DELAYED_WORK(&rport->fast_io_fail_work,
 725                          rport_fast_io_fail_timedout);
 726        INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
 727
 728        id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
 729        dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 730
 731        transport_setup_device(&rport->dev);
 732
 733        ret = device_add(&rport->dev);
 734        if (ret) {
 735                transport_destroy_device(&rport->dev);
 736                put_device(&rport->dev);
 737                return ERR_PTR(ret);
 738        }
 739
 740        transport_add_device(&rport->dev);
 741        transport_configure_device(&rport->dev);
 742
 743        return rport;
 744}
 745EXPORT_SYMBOL_GPL(srp_rport_add);
 746
 747/**
 748 * srp_rport_del  -  remove a SRP remote port
 749 * @rport:      SRP remote port to remove
 750 *
 751 * Removes the specified SRP remote port.
 752 */
 753void srp_rport_del(struct srp_rport *rport)
 754{
 755        struct device *dev = &rport->dev;
 756
 757        transport_remove_device(dev);
 758        device_del(dev);
 759        transport_destroy_device(dev);
 760
 761        put_device(dev);
 762}
 763EXPORT_SYMBOL_GPL(srp_rport_del);
 764
 765static int do_srp_rport_del(struct device *dev, void *data)
 766{
 767        if (scsi_is_srp_rport(dev))
 768                srp_rport_del(dev_to_rport(dev));
 769        return 0;
 770}
 771
 772/**
 773 * srp_remove_host  -  tear down a Scsi_Host's SRP data structures
 774 * @shost:      Scsi Host that is torn down
 775 *
 776 * Removes all SRP remote ports for a given Scsi_Host.
 777 * Must be called just before scsi_remove_host for SRP HBAs.
 778 */
 779void srp_remove_host(struct Scsi_Host *shost)
 780{
 781        device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
 782}
 783EXPORT_SYMBOL_GPL(srp_remove_host);
 784
 785/**
 786 * srp_stop_rport_timers - stop the transport layer recovery timers
 787 * @rport: SRP remote port for which to stop the timers.
 788 *
 789 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
 790 * must hold a reference on the rport (rport->dev) and on the SCSI host
 791 * (rport->dev.parent).
 792 */
 793void srp_stop_rport_timers(struct srp_rport *rport)
 794{
 795        mutex_lock(&rport->mutex);
 796        if (rport->state == SRP_RPORT_BLOCKED)
 797                __rport_fail_io_fast(rport);
 798        srp_rport_set_state(rport, SRP_RPORT_LOST);
 799        mutex_unlock(&rport->mutex);
 800
 801        cancel_delayed_work_sync(&rport->reconnect_work);
 802        cancel_delayed_work_sync(&rport->fast_io_fail_work);
 803        cancel_delayed_work_sync(&rport->dev_loss_work);
 804}
 805EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
 806
 807/**
 808 * srp_attach_transport  -  instantiate SRP transport template
 809 * @ft:         SRP transport class function template
 810 */
 811struct scsi_transport_template *
 812srp_attach_transport(struct srp_function_template *ft)
 813{
 814        int count;
 815        struct srp_internal *i;
 816
 817        i = kzalloc(sizeof(*i), GFP_KERNEL);
 818        if (!i)
 819                return NULL;
 820
 821        i->t.host_size = sizeof(struct srp_host_attrs);
 822        i->t.host_attrs.ac.attrs = &i->host_attrs[0];
 823        i->t.host_attrs.ac.class = &srp_host_class.class;
 824        i->t.host_attrs.ac.match = srp_host_match;
 825        i->host_attrs[0] = NULL;
 826        transport_container_register(&i->t.host_attrs);
 827
 828        i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
 829        i->rport_attr_cont.ac.class = &srp_rport_class.class;
 830        i->rport_attr_cont.ac.match = srp_rport_match;
 831
 832        count = 0;
 833        i->rport_attrs[count++] = &dev_attr_port_id;
 834        i->rport_attrs[count++] = &dev_attr_roles;
 835        if (ft->has_rport_state) {
 836                i->rport_attrs[count++] = &dev_attr_state;
 837                i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
 838                i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
 839        }
 840        if (ft->reconnect) {
 841                i->rport_attrs[count++] = &dev_attr_reconnect_delay;
 842                i->rport_attrs[count++] = &dev_attr_failed_reconnects;
 843        }
 844        if (ft->rport_delete)
 845                i->rport_attrs[count++] = &dev_attr_delete;
 846        i->rport_attrs[count++] = NULL;
 847        BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
 848
 849        transport_container_register(&i->rport_attr_cont);
 850
 851        i->f = ft;
 852
 853        return &i->t;
 854}
 855EXPORT_SYMBOL_GPL(srp_attach_transport);
 856
 857/**
 858 * srp_release_transport  -  release SRP transport template instance
 859 * @t:          transport template instance
 860 */
 861void srp_release_transport(struct scsi_transport_template *t)
 862{
 863        struct srp_internal *i = to_srp_internal(t);
 864
 865        transport_container_unregister(&i->t.host_attrs);
 866        transport_container_unregister(&i->rport_attr_cont);
 867
 868        kfree(i);
 869}
 870EXPORT_SYMBOL_GPL(srp_release_transport);
 871
 872static __init int srp_transport_init(void)
 873{
 874        int ret;
 875
 876        ret = transport_class_register(&srp_host_class);
 877        if (ret)
 878                return ret;
 879        ret = transport_class_register(&srp_rport_class);
 880        if (ret)
 881                goto unregister_host_class;
 882
 883        return 0;
 884unregister_host_class:
 885        transport_class_unregister(&srp_host_class);
 886        return ret;
 887}
 888
 889static void __exit srp_transport_exit(void)
 890{
 891        transport_class_unregister(&srp_host_class);
 892        transport_class_unregister(&srp_rport_class);
 893}
 894
 895MODULE_AUTHOR("FUJITA Tomonori");
 896MODULE_DESCRIPTION("SRP Transport Attributes");
 897MODULE_LICENSE("GPL");
 898
 899module_init(srp_transport_init);
 900module_exit(srp_transport_exit);
 901