linux/drivers/scsi/scsi_pm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      scsi_pm.c       Copyright (C) 2010 Alan Stern
   4 *
   5 *      SCSI dynamic Power Management
   6 *              Initial version: Alan Stern <stern@rowland.harvard.edu>
   7 */
   8
   9#include <linux/pm_runtime.h>
  10#include <linux/export.h>
  11#include <linux/async.h>
  12#include <linux/blk-pm.h>
  13
  14#include <scsi/scsi.h>
  15#include <scsi/scsi_device.h>
  16#include <scsi/scsi_driver.h>
  17#include <scsi/scsi_host.h>
  18
  19#include "scsi_priv.h"
  20
  21#ifdef CONFIG_PM_SLEEP
  22
  23static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
  24{
  25        return pm && pm->suspend ? pm->suspend(dev) : 0;
  26}
  27
  28static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
  29{
  30        return pm && pm->freeze ? pm->freeze(dev) : 0;
  31}
  32
  33static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
  34{
  35        return pm && pm->poweroff ? pm->poweroff(dev) : 0;
  36}
  37
  38static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
  39{
  40        return pm && pm->resume ? pm->resume(dev) : 0;
  41}
  42
  43static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
  44{
  45        return pm && pm->thaw ? pm->thaw(dev) : 0;
  46}
  47
  48static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
  49{
  50        return pm && pm->restore ? pm->restore(dev) : 0;
  51}
  52
  53static int scsi_dev_type_suspend(struct device *dev,
  54                int (*cb)(struct device *, const struct dev_pm_ops *))
  55{
  56        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  57        int err;
  58
  59        /* flush pending in-flight resume operations, suspend is synchronous */
  60        async_synchronize_full_domain(&scsi_sd_pm_domain);
  61
  62        err = scsi_device_quiesce(to_scsi_device(dev));
  63        if (err == 0) {
  64                err = cb(dev, pm);
  65                if (err)
  66                        scsi_device_resume(to_scsi_device(dev));
  67        }
  68        dev_dbg(dev, "scsi suspend: %d\n", err);
  69        return err;
  70}
  71
  72static int scsi_dev_type_resume(struct device *dev,
  73                int (*cb)(struct device *, const struct dev_pm_ops *))
  74{
  75        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  76        int err = 0;
  77
  78        err = cb(dev, pm);
  79        scsi_device_resume(to_scsi_device(dev));
  80        dev_dbg(dev, "scsi resume: %d\n", err);
  81
  82        if (err == 0) {
  83                bool was_runtime_suspended;
  84
  85                was_runtime_suspended = pm_runtime_suspended(dev);
  86
  87                pm_runtime_disable(dev);
  88                err = pm_runtime_set_active(dev);
  89                pm_runtime_enable(dev);
  90
  91                /*
  92                 * Forcibly set runtime PM status of request queue to "active"
  93                 * to make sure we can again get requests from the queue
  94                 * (see also blk_pm_peek_request()).
  95                 *
  96                 * The resume hook will correct runtime PM status of the disk.
  97                 */
  98                if (!err && scsi_is_sdev_device(dev)) {
  99                        struct scsi_device *sdev = to_scsi_device(dev);
 100                        if (was_runtime_suspended)
 101                                blk_post_runtime_resume(sdev->request_queue, 0);
 102                        else
 103                                blk_set_runtime_active(sdev->request_queue);
 104                }
 105        }
 106
 107        return err;
 108}
 109
 110static int
 111scsi_bus_suspend_common(struct device *dev,
 112                int (*cb)(struct device *, const struct dev_pm_ops *))
 113{
 114        int err = 0;
 115
 116        if (scsi_is_sdev_device(dev)) {
 117                /*
 118                 * All the high-level SCSI drivers that implement runtime
 119                 * PM treat runtime suspend, system suspend, and system
 120                 * hibernate nearly identically. In all cases the requirements
 121                 * for runtime suspension are stricter.
 122                 */
 123                if (pm_runtime_suspended(dev))
 124                        return 0;
 125
 126                err = scsi_dev_type_suspend(dev, cb);
 127        }
 128
 129        return err;
 130}
 131
 132static void async_sdev_resume(void *dev, async_cookie_t cookie)
 133{
 134        scsi_dev_type_resume(dev, do_scsi_resume);
 135}
 136
 137static void async_sdev_thaw(void *dev, async_cookie_t cookie)
 138{
 139        scsi_dev_type_resume(dev, do_scsi_thaw);
 140}
 141
 142static void async_sdev_restore(void *dev, async_cookie_t cookie)
 143{
 144        scsi_dev_type_resume(dev, do_scsi_restore);
 145}
 146
 147static int scsi_bus_resume_common(struct device *dev,
 148                int (*cb)(struct device *, const struct dev_pm_ops *))
 149{
 150        async_func_t fn;
 151
 152        if (!scsi_is_sdev_device(dev))
 153                fn = NULL;
 154        else if (cb == do_scsi_resume)
 155                fn = async_sdev_resume;
 156        else if (cb == do_scsi_thaw)
 157                fn = async_sdev_thaw;
 158        else if (cb == do_scsi_restore)
 159                fn = async_sdev_restore;
 160        else
 161                fn = NULL;
 162
 163        if (fn) {
 164                async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
 165
 166                /*
 167                 * If a user has disabled async probing a likely reason
 168                 * is due to a storage enclosure that does not inject
 169                 * staggered spin-ups.  For safety, make resume
 170                 * synchronous as well in that case.
 171                 */
 172                if (strncmp(scsi_scan_type, "async", 5) != 0)
 173                        async_synchronize_full_domain(&scsi_sd_pm_domain);
 174        } else {
 175                pm_runtime_disable(dev);
 176                pm_runtime_set_active(dev);
 177                pm_runtime_enable(dev);
 178        }
 179        return 0;
 180}
 181
 182static int scsi_bus_prepare(struct device *dev)
 183{
 184        if (scsi_is_sdev_device(dev)) {
 185                /* sd probing uses async_schedule.  Wait until it finishes. */
 186                async_synchronize_full_domain(&scsi_sd_probe_domain);
 187
 188        } else if (scsi_is_host_device(dev)) {
 189                /* Wait until async scanning is finished */
 190                scsi_complete_async_scans();
 191        }
 192        return 0;
 193}
 194
 195static int scsi_bus_suspend(struct device *dev)
 196{
 197        return scsi_bus_suspend_common(dev, do_scsi_suspend);
 198}
 199
 200static int scsi_bus_resume(struct device *dev)
 201{
 202        return scsi_bus_resume_common(dev, do_scsi_resume);
 203}
 204
 205static int scsi_bus_freeze(struct device *dev)
 206{
 207        return scsi_bus_suspend_common(dev, do_scsi_freeze);
 208}
 209
 210static int scsi_bus_thaw(struct device *dev)
 211{
 212        return scsi_bus_resume_common(dev, do_scsi_thaw);
 213}
 214
 215static int scsi_bus_poweroff(struct device *dev)
 216{
 217        return scsi_bus_suspend_common(dev, do_scsi_poweroff);
 218}
 219
 220static int scsi_bus_restore(struct device *dev)
 221{
 222        return scsi_bus_resume_common(dev, do_scsi_restore);
 223}
 224
 225#else /* CONFIG_PM_SLEEP */
 226
 227#define scsi_bus_prepare                NULL
 228#define scsi_bus_suspend                NULL
 229#define scsi_bus_resume                 NULL
 230#define scsi_bus_freeze                 NULL
 231#define scsi_bus_thaw                   NULL
 232#define scsi_bus_poweroff               NULL
 233#define scsi_bus_restore                NULL
 234
 235#endif /* CONFIG_PM_SLEEP */
 236
 237static int sdev_runtime_suspend(struct device *dev)
 238{
 239        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 240        struct scsi_device *sdev = to_scsi_device(dev);
 241        int err = 0;
 242
 243        err = blk_pre_runtime_suspend(sdev->request_queue);
 244        if (err)
 245                return err;
 246        if (pm && pm->runtime_suspend)
 247                err = pm->runtime_suspend(dev);
 248        blk_post_runtime_suspend(sdev->request_queue, err);
 249
 250        return err;
 251}
 252
 253static int scsi_runtime_suspend(struct device *dev)
 254{
 255        int err = 0;
 256
 257        dev_dbg(dev, "scsi_runtime_suspend\n");
 258        if (scsi_is_sdev_device(dev))
 259                err = sdev_runtime_suspend(dev);
 260
 261        /* Insert hooks here for targets, hosts, and transport classes */
 262
 263        return err;
 264}
 265
 266static int sdev_runtime_resume(struct device *dev)
 267{
 268        struct scsi_device *sdev = to_scsi_device(dev);
 269        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 270        int err = 0;
 271
 272        blk_pre_runtime_resume(sdev->request_queue);
 273        if (pm && pm->runtime_resume)
 274                err = pm->runtime_resume(dev);
 275        blk_post_runtime_resume(sdev->request_queue, err);
 276
 277        return err;
 278}
 279
 280static int scsi_runtime_resume(struct device *dev)
 281{
 282        int err = 0;
 283
 284        dev_dbg(dev, "scsi_runtime_resume\n");
 285        if (scsi_is_sdev_device(dev))
 286                err = sdev_runtime_resume(dev);
 287
 288        /* Insert hooks here for targets, hosts, and transport classes */
 289
 290        return err;
 291}
 292
 293static int scsi_runtime_idle(struct device *dev)
 294{
 295        dev_dbg(dev, "scsi_runtime_idle\n");
 296
 297        /* Insert hooks here for targets, hosts, and transport classes */
 298
 299        if (scsi_is_sdev_device(dev)) {
 300                pm_runtime_mark_last_busy(dev);
 301                pm_runtime_autosuspend(dev);
 302                return -EBUSY;
 303        }
 304
 305        return 0;
 306}
 307
 308int scsi_autopm_get_device(struct scsi_device *sdev)
 309{
 310        int     err;
 311
 312        err = pm_runtime_get_sync(&sdev->sdev_gendev);
 313        if (err < 0 && err !=-EACCES)
 314                pm_runtime_put_sync(&sdev->sdev_gendev);
 315        else
 316                err = 0;
 317        return err;
 318}
 319EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
 320
 321void scsi_autopm_put_device(struct scsi_device *sdev)
 322{
 323        pm_runtime_put_sync(&sdev->sdev_gendev);
 324}
 325EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
 326
 327void scsi_autopm_get_target(struct scsi_target *starget)
 328{
 329        pm_runtime_get_sync(&starget->dev);
 330}
 331
 332void scsi_autopm_put_target(struct scsi_target *starget)
 333{
 334        pm_runtime_put_sync(&starget->dev);
 335}
 336
 337int scsi_autopm_get_host(struct Scsi_Host *shost)
 338{
 339        int     err;
 340
 341        err = pm_runtime_get_sync(&shost->shost_gendev);
 342        if (err < 0 && err !=-EACCES)
 343                pm_runtime_put_sync(&shost->shost_gendev);
 344        else
 345                err = 0;
 346        return err;
 347}
 348
 349void scsi_autopm_put_host(struct Scsi_Host *shost)
 350{
 351        pm_runtime_put_sync(&shost->shost_gendev);
 352}
 353
 354const struct dev_pm_ops scsi_bus_pm_ops = {
 355        .prepare =              scsi_bus_prepare,
 356        .suspend =              scsi_bus_suspend,
 357        .resume =               scsi_bus_resume,
 358        .freeze =               scsi_bus_freeze,
 359        .thaw =                 scsi_bus_thaw,
 360        .poweroff =             scsi_bus_poweroff,
 361        .restore =              scsi_bus_restore,
 362        .runtime_suspend =      scsi_runtime_suspend,
 363        .runtime_resume =       scsi_runtime_resume,
 364        .runtime_idle =         scsi_runtime_idle,
 365};
 366