linux/drivers/ide/ide-pm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/gfp.h>
   4#include <linux/ide.h>
   5
   6int generic_ide_suspend(struct device *dev, pm_message_t mesg)
   7{
   8        ide_drive_t *drive = to_ide_device(dev);
   9        ide_drive_t *pair = ide_get_pair_dev(drive);
  10        ide_hwif_t *hwif = drive->hwif;
  11        struct request *rq;
  12        struct ide_pm_state rqpm;
  13        int ret;
  14
  15        if (ide_port_acpi(hwif)) {
  16                /* call ACPI _GTM only once */
  17                if ((drive->dn & 1) == 0 || pair == NULL)
  18                        ide_acpi_get_timing(hwif);
  19        }
  20
  21        memset(&rqpm, 0, sizeof(rqpm));
  22        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
  23        ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
  24        ide_req(rq)->special = &rqpm;
  25        rqpm.pm_step = IDE_PM_START_SUSPEND;
  26        if (mesg.event == PM_EVENT_PRETHAW)
  27                mesg.event = PM_EVENT_FREEZE;
  28        rqpm.pm_state = mesg.event;
  29
  30        blk_execute_rq(drive->queue, NULL, rq, 0);
  31        ret = scsi_req(rq)->result ? -EIO : 0;
  32        blk_put_request(rq);
  33
  34        if (ret == 0 && ide_port_acpi(hwif)) {
  35                /* call ACPI _PS3 only after both devices are suspended */
  36                if ((drive->dn & 1) || pair == NULL)
  37                        ide_acpi_set_state(hwif, 0);
  38        }
  39
  40        return ret;
  41}
  42
  43static int ide_pm_execute_rq(struct request *rq)
  44{
  45        struct request_queue *q = rq->q;
  46
  47        if (unlikely(blk_queue_dying(q))) {
  48                rq->rq_flags |= RQF_QUIET;
  49                scsi_req(rq)->result = -ENXIO;
  50                blk_mq_end_request(rq, BLK_STS_OK);
  51                return -ENXIO;
  52        }
  53        blk_execute_rq(q, NULL, rq, true);
  54
  55        return scsi_req(rq)->result ? -EIO : 0;
  56}
  57
  58int generic_ide_resume(struct device *dev)
  59{
  60        ide_drive_t *drive = to_ide_device(dev);
  61        ide_drive_t *pair = ide_get_pair_dev(drive);
  62        ide_hwif_t *hwif = drive->hwif;
  63        struct request *rq;
  64        struct ide_pm_state rqpm;
  65        int err;
  66
  67        blk_mq_start_stopped_hw_queues(drive->queue, true);
  68
  69        if (ide_port_acpi(hwif)) {
  70                /* call ACPI _PS0 / _STM only once */
  71                if ((drive->dn & 1) == 0 || pair == NULL) {
  72                        ide_acpi_set_state(hwif, 1);
  73                        ide_acpi_push_timing(hwif);
  74                }
  75
  76                ide_acpi_exec_tfs(drive);
  77        }
  78
  79        memset(&rqpm, 0, sizeof(rqpm));
  80        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
  81        ide_req(rq)->type = ATA_PRIV_PM_RESUME;
  82        ide_req(rq)->special = &rqpm;
  83        rqpm.pm_step = IDE_PM_START_RESUME;
  84        rqpm.pm_state = PM_EVENT_ON;
  85
  86        err = ide_pm_execute_rq(rq);
  87        blk_put_request(rq);
  88
  89        if (err == 0 && dev->driver) {
  90                struct ide_driver *drv = to_ide_driver(dev->driver);
  91
  92                if (drv->resume)
  93                        drv->resume(drive);
  94        }
  95
  96        return err;
  97}
  98
  99void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
 100{
 101        struct ide_pm_state *pm = ide_req(rq)->special;
 102
 103#ifdef DEBUG_PM
 104        printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
 105                drive->name, pm->pm_step);
 106#endif
 107        if (drive->media != ide_disk)
 108                return;
 109
 110        switch (pm->pm_step) {
 111        case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
 112                if (pm->pm_state == PM_EVENT_FREEZE)
 113                        pm->pm_step = IDE_PM_COMPLETED;
 114                else
 115                        pm->pm_step = IDE_PM_STANDBY;
 116                break;
 117        case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
 118                pm->pm_step = IDE_PM_COMPLETED;
 119                break;
 120        case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
 121                pm->pm_step = IDE_PM_IDLE;
 122                break;
 123        case IDE_PM_IDLE:               /* Resume step 2 (idle)*/
 124                pm->pm_step = IDE_PM_RESTORE_DMA;
 125                break;
 126        }
 127}
 128
 129ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
 130{
 131        struct ide_pm_state *pm = ide_req(rq)->special;
 132        struct ide_cmd cmd = { };
 133
 134        switch (pm->pm_step) {
 135        case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
 136                if (drive->media != ide_disk)
 137                        break;
 138                /* Not supported? Switch to next step now. */
 139                if (ata_id_flush_enabled(drive->id) == 0 ||
 140                    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
 141                        ide_complete_power_step(drive, rq);
 142                        return ide_stopped;
 143                }
 144                if (ata_id_flush_ext_enabled(drive->id))
 145                        cmd.tf.command = ATA_CMD_FLUSH_EXT;
 146                else
 147                        cmd.tf.command = ATA_CMD_FLUSH;
 148                goto out_do_tf;
 149        case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
 150                cmd.tf.command = ATA_CMD_STANDBYNOW1;
 151                goto out_do_tf;
 152        case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
 153                ide_set_max_pio(drive);
 154                /*
 155                 * skip IDE_PM_IDLE for ATAPI devices
 156                 */
 157                if (drive->media != ide_disk)
 158                        pm->pm_step = IDE_PM_RESTORE_DMA;
 159                else
 160                        ide_complete_power_step(drive, rq);
 161                return ide_stopped;
 162        case IDE_PM_IDLE:               /* Resume step 2 (idle) */
 163                cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
 164                goto out_do_tf;
 165        case IDE_PM_RESTORE_DMA:        /* Resume step 3 (restore DMA) */
 166                /*
 167                 * Right now, all we do is call ide_set_dma(drive),
 168                 * we could be smarter and check for current xfer_speed
 169                 * in struct drive etc...
 170                 */
 171                if (drive->hwif->dma_ops == NULL)
 172                        break;
 173                /*
 174                 * TODO: respect IDE_DFLAG_USING_DMA
 175                 */
 176                ide_set_dma(drive);
 177                break;
 178        }
 179
 180        pm->pm_step = IDE_PM_COMPLETED;
 181
 182        return ide_stopped;
 183
 184out_do_tf:
 185        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
 186        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
 187        cmd.protocol = ATA_PROT_NODATA;
 188
 189        return do_rw_taskfile(drive, &cmd);
 190}
 191
 192/**
 193 *      ide_complete_pm_rq - end the current Power Management request
 194 *      @drive: target drive
 195 *      @rq: request
 196 *
 197 *      This function cleans up the current PM request and stops the queue
 198 *      if necessary.
 199 */
 200void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 201{
 202        struct request_queue *q = drive->queue;
 203        struct ide_pm_state *pm = ide_req(rq)->special;
 204
 205        ide_complete_power_step(drive, rq);
 206        if (pm->pm_step != IDE_PM_COMPLETED)
 207                return;
 208
 209#ifdef DEBUG_PM
 210        printk("%s: completing PM request, %s\n", drive->name,
 211               (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
 212#endif
 213        if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
 214                blk_mq_stop_hw_queues(q);
 215        else
 216                drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
 217
 218        drive->hwif->rq = NULL;
 219
 220        blk_mq_end_request(rq, BLK_STS_OK);
 221}
 222
 223void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
 224{
 225        struct ide_pm_state *pm = ide_req(rq)->special;
 226
 227        if (blk_rq_is_private(rq) &&
 228            ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
 229            pm->pm_step == IDE_PM_START_SUSPEND)
 230                /* Mark drive blocked when starting the suspend sequence. */
 231                drive->dev_flags |= IDE_DFLAG_BLOCKED;
 232        else if (blk_rq_is_private(rq) &&
 233                 ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
 234                 pm->pm_step == IDE_PM_START_RESUME) {
 235                /*
 236                 * The first thing we do on wakeup is to wait for BSY bit to
 237                 * go away (with a looong timeout) as a drive on this hwif may
 238                 * just be POSTing itself.
 239                 * We do that before even selecting as the "other" device on
 240                 * the bus may be broken enough to walk on our toes at this
 241                 * point.
 242                 */
 243                ide_hwif_t *hwif = drive->hwif;
 244                const struct ide_tp_ops *tp_ops = hwif->tp_ops;
 245                struct request_queue *q = drive->queue;
 246                int rc;
 247#ifdef DEBUG_PM
 248                printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
 249#endif
 250                rc = ide_wait_not_busy(hwif, 35000);
 251                if (rc)
 252                        printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
 253                tp_ops->dev_select(drive);
 254                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
 255                rc = ide_wait_not_busy(hwif, 100000);
 256                if (rc)
 257                        printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
 258
 259                blk_mq_start_hw_queues(q);
 260        }
 261}
 262