linux/arch/x86/platform/intel-mid/pwr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel MID Power Management Unit (PWRMU) device driver
   4 *
   5 * Copyright (C) 2016, Intel Corporation
   6 *
   7 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
   8 *
   9 * Intel MID Power Management Unit device driver handles the South Complex PCI
  10 * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core
  11 * modifies bits in PMCSR register in the PCI configuration space. This is not
  12 * enough on some SoCs like Intel Tangier. In such case PCI core sets a new
  13 * power state of the device in question through a PM hook registered in struct
  14 * pci_platform_pm_ops (see drivers/pci/pci-mid.c).
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/delay.h>
  20#include <linux/errno.h>
  21#include <linux/interrupt.h>
  22#include <linux/kernel.h>
  23#include <linux/export.h>
  24#include <linux/mutex.h>
  25#include <linux/pci.h>
  26
  27#include <asm/intel-mid.h>
  28
  29/* Registers */
  30#define PM_STS                  0x00
  31#define PM_CMD                  0x04
  32#define PM_ICS                  0x08
  33#define PM_WKC(x)               (0x10 + (x) * 4)
  34#define PM_WKS(x)               (0x18 + (x) * 4)
  35#define PM_SSC(x)               (0x20 + (x) * 4)
  36#define PM_SSS(x)               (0x30 + (x) * 4)
  37
  38/* Bits in PM_STS */
  39#define PM_STS_BUSY             (1 << 8)
  40
  41/* Bits in PM_CMD */
  42#define PM_CMD_CMD(x)           ((x) << 0)
  43#define PM_CMD_IOC              (1 << 8)
  44#define PM_CMD_CM_NOP           (0 << 9)
  45#define PM_CMD_CM_IMMEDIATE     (1 << 9)
  46#define PM_CMD_CM_DELAY         (2 << 9)
  47#define PM_CMD_CM_TRIGGER       (3 << 9)
  48
  49/* System states */
  50#define PM_CMD_SYS_STATE_S5     (5 << 16)
  51
  52/* Trigger variants */
  53#define PM_CMD_CFG_TRIGGER_NC   (3 << 19)
  54
  55/* Message to wait for TRIGGER_NC case */
  56#define TRIGGER_NC_MSG_2        (2 << 22)
  57
  58/* List of commands */
  59#define CMD_SET_CFG             0x01
  60
  61/* Bits in PM_ICS */
  62#define PM_ICS_INT_STATUS(x)    ((x) & 0xff)
  63#define PM_ICS_IE               (1 << 8)
  64#define PM_ICS_IP               (1 << 9)
  65#define PM_ICS_SW_INT_STS       (1 << 10)
  66
  67/* List of interrupts */
  68#define INT_INVALID             0
  69#define INT_CMD_COMPLETE        1
  70#define INT_CMD_ERR             2
  71#define INT_WAKE_EVENT          3
  72#define INT_LSS_POWER_ERR       4
  73#define INT_S0iX_MSG_ERR        5
  74#define INT_NO_C6               6
  75#define INT_TRIGGER_ERR         7
  76#define INT_INACTIVITY          8
  77
  78/* South Complex devices */
  79#define LSS_MAX_SHARED_DEVS     4
  80#define LSS_MAX_DEVS            64
  81
  82#define LSS_WS_BITS             1       /* wake state width */
  83#define LSS_PWS_BITS            2       /* power state width */
  84
  85/* Supported device IDs */
  86#define PCI_DEVICE_ID_PENWELL   0x0828
  87#define PCI_DEVICE_ID_TANGIER   0x11a1
  88
  89struct mid_pwr_dev {
  90        struct pci_dev *pdev;
  91        pci_power_t state;
  92};
  93
  94struct mid_pwr {
  95        struct device *dev;
  96        void __iomem *regs;
  97        int irq;
  98        bool available;
  99
 100        struct mutex lock;
 101        struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS];
 102};
 103
 104static struct mid_pwr *midpwr;
 105
 106static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg)
 107{
 108        return readl(pwr->regs + PM_SSS(reg));
 109}
 110
 111static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value)
 112{
 113        writel(value, pwr->regs + PM_SSC(reg));
 114}
 115
 116static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value)
 117{
 118        writel(value, pwr->regs + PM_WKC(reg));
 119}
 120
 121static void mid_pwr_interrupt_disable(struct mid_pwr *pwr)
 122{
 123        writel(~PM_ICS_IE, pwr->regs + PM_ICS);
 124}
 125
 126static bool mid_pwr_is_busy(struct mid_pwr *pwr)
 127{
 128        return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY);
 129}
 130
 131/* Wait 500ms that the latest PWRMU command finished */
 132static int mid_pwr_wait(struct mid_pwr *pwr)
 133{
 134        unsigned int count = 500000;
 135        bool busy;
 136
 137        do {
 138                busy = mid_pwr_is_busy(pwr);
 139                if (!busy)
 140                        return 0;
 141                udelay(1);
 142        } while (--count);
 143
 144        return -EBUSY;
 145}
 146
 147static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
 148{
 149        writel(PM_CMD_CMD(cmd) | PM_CMD_CM_IMMEDIATE, pwr->regs + PM_CMD);
 150        return mid_pwr_wait(pwr);
 151}
 152
 153static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new)
 154{
 155        int curstate;
 156        u32 power;
 157        int ret;
 158
 159        /* Check if the device is already in desired state */
 160        power = mid_pwr_get_state(pwr, reg);
 161        curstate = (power >> bit) & 3;
 162        if (curstate == new)
 163                return 0;
 164
 165        /* Update the power state */
 166        mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit));
 167
 168        /* Send command to SCU */
 169        ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
 170        if (ret)
 171                return ret;
 172
 173        /* Check if the device is already in desired state */
 174        power = mid_pwr_get_state(pwr, reg);
 175        curstate = (power >> bit) & 3;
 176        if (curstate != new)
 177                return -EAGAIN;
 178
 179        return 0;
 180}
 181
 182static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss,
 183                                              struct pci_dev *pdev,
 184                                              pci_power_t state)
 185{
 186        pci_power_t weakest = PCI_D3hot;
 187        unsigned int j;
 188
 189        /* Find device in cache or first free cell */
 190        for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
 191                if (lss[j].pdev == pdev || !lss[j].pdev)
 192                        break;
 193        }
 194
 195        /* Store the desired state in cache */
 196        if (j < LSS_MAX_SHARED_DEVS) {
 197                lss[j].pdev = pdev;
 198                lss[j].state = state;
 199        } else {
 200                dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n");
 201                weakest = state;
 202        }
 203
 204        /* Find the power state we may use */
 205        for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
 206                if (lss[j].state < weakest)
 207                        weakest = lss[j].state;
 208        }
 209
 210        return weakest;
 211}
 212
 213static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
 214                             pci_power_t state, int id, int reg, int bit)
 215{
 216        const char *name;
 217        int ret;
 218
 219        state = __find_weakest_power_state(pwr->lss[id], pdev, state);
 220        name = pci_power_name(state);
 221
 222        ret = __update_power_state(pwr, reg, bit, (__force int)state);
 223        if (ret) {
 224                dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret);
 225                return ret;
 226        }
 227
 228        dev_vdbg(&pdev->dev, "Set power state %s\n", name);
 229        return 0;
 230}
 231
 232static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
 233                                   pci_power_t state)
 234{
 235        int id, reg, bit;
 236        int ret;
 237
 238        id = intel_mid_pwr_get_lss_id(pdev);
 239        if (id < 0)
 240                return id;
 241
 242        reg = (id * LSS_PWS_BITS) / 32;
 243        bit = (id * LSS_PWS_BITS) % 32;
 244
 245        /* We support states between PCI_D0 and PCI_D3hot */
 246        if (state < PCI_D0)
 247                state = PCI_D0;
 248        if (state > PCI_D3hot)
 249                state = PCI_D3hot;
 250
 251        mutex_lock(&pwr->lock);
 252        ret = __set_power_state(pwr, pdev, state, id, reg, bit);
 253        mutex_unlock(&pwr->lock);
 254        return ret;
 255}
 256
 257int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
 258{
 259        struct mid_pwr *pwr = midpwr;
 260        int ret = 0;
 261
 262        might_sleep();
 263
 264        if (pwr && pwr->available)
 265                ret = mid_pwr_set_power_state(pwr, pdev, state);
 266        dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret);
 267
 268        return 0;
 269}
 270
 271pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
 272{
 273        struct mid_pwr *pwr = midpwr;
 274        int id, reg, bit;
 275        u32 power;
 276
 277        if (!pwr || !pwr->available)
 278                return PCI_UNKNOWN;
 279
 280        id = intel_mid_pwr_get_lss_id(pdev);
 281        if (id < 0)
 282                return PCI_UNKNOWN;
 283
 284        reg = (id * LSS_PWS_BITS) / 32;
 285        bit = (id * LSS_PWS_BITS) % 32;
 286        power = mid_pwr_get_state(pwr, reg);
 287        return (__force pci_power_t)((power >> bit) & 3);
 288}
 289
 290void intel_mid_pwr_power_off(void)
 291{
 292        struct mid_pwr *pwr = midpwr;
 293        u32 cmd = PM_CMD_SYS_STATE_S5 |
 294                  PM_CMD_CMD(CMD_SET_CFG) |
 295                  PM_CMD_CM_TRIGGER |
 296                  PM_CMD_CFG_TRIGGER_NC |
 297                  TRIGGER_NC_MSG_2;
 298
 299        /* Send command to SCU */
 300        writel(cmd, pwr->regs + PM_CMD);
 301        mid_pwr_wait(pwr);
 302}
 303
 304int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
 305{
 306        int vndr;
 307        u8 id;
 308
 309        /*
 310         * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of
 311         * Vendor capability.
 312         */
 313        vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
 314        if (!vndr)
 315                return -EINVAL;
 316
 317        /* Read the Logical SubSystem ID byte */
 318        pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id);
 319        if (!(id & INTEL_MID_PWR_LSS_TYPE))
 320                return -ENODEV;
 321
 322        id &= ~INTEL_MID_PWR_LSS_TYPE;
 323        if (id >= LSS_MAX_DEVS)
 324                return -ERANGE;
 325
 326        return id;
 327}
 328
 329static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id)
 330{
 331        struct mid_pwr *pwr = dev_id;
 332        u32 ics;
 333
 334        ics = readl(pwr->regs + PM_ICS);
 335        if (!(ics & PM_ICS_IP))
 336                return IRQ_NONE;
 337
 338        writel(ics | PM_ICS_IP, pwr->regs + PM_ICS);
 339
 340        dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics));
 341        return IRQ_HANDLED;
 342}
 343
 344struct mid_pwr_device_info {
 345        int (*set_initial_state)(struct mid_pwr *pwr);
 346};
 347
 348static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 349{
 350        struct mid_pwr_device_info *info = (void *)id->driver_data;
 351        struct device *dev = &pdev->dev;
 352        struct mid_pwr *pwr;
 353        int ret;
 354
 355        ret = pcim_enable_device(pdev);
 356        if (ret < 0) {
 357                dev_err(&pdev->dev, "error: could not enable device\n");
 358                return ret;
 359        }
 360
 361        ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
 362        if (ret) {
 363                dev_err(&pdev->dev, "I/O memory remapping failed\n");
 364                return ret;
 365        }
 366
 367        pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
 368        if (!pwr)
 369                return -ENOMEM;
 370
 371        pwr->dev = dev;
 372        pwr->regs = pcim_iomap_table(pdev)[0];
 373        pwr->irq = pdev->irq;
 374
 375        mutex_init(&pwr->lock);
 376
 377        /* Disable interrupts */
 378        mid_pwr_interrupt_disable(pwr);
 379
 380        if (info && info->set_initial_state) {
 381                ret = info->set_initial_state(pwr);
 382                if (ret)
 383                        dev_warn(dev, "Can't set initial state: %d\n", ret);
 384        }
 385
 386        ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler,
 387                               IRQF_NO_SUSPEND, pci_name(pdev), pwr);
 388        if (ret)
 389                return ret;
 390
 391        pwr->available = true;
 392        midpwr = pwr;
 393
 394        pci_set_drvdata(pdev, pwr);
 395        return 0;
 396}
 397
 398static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states)
 399{
 400        unsigned int i, j;
 401        int ret;
 402
 403        /*
 404         * Enable wake events.
 405         *
 406         * PWRMU supports up to 32 sources for wake up the system. Ungate them
 407         * all here.
 408         */
 409        mid_pwr_set_wake(pwr, 0, 0xffffffff);
 410        mid_pwr_set_wake(pwr, 1, 0xffffffff);
 411
 412        /*
 413         * Power off South Complex devices.
 414         *
 415         * There is a map (see a note below) of 64 devices with 2 bits per each
 416         * on 32-bit HW registers. The following calls set all devices to one
 417         * known initial state, i.e. PCI_D3hot. This is done in conjunction
 418         * with PMCSR setting in arch/x86/pci/intel_mid_pci.c.
 419         *
 420         * NOTE: The actual device mapping is provided by a platform at run
 421         * time using vendor capability of PCI configuration space.
 422         */
 423        mid_pwr_set_state(pwr, 0, states[0]);
 424        mid_pwr_set_state(pwr, 1, states[1]);
 425        mid_pwr_set_state(pwr, 2, states[2]);
 426        mid_pwr_set_state(pwr, 3, states[3]);
 427
 428        /* Send command to SCU */
 429        ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
 430        if (ret)
 431                return ret;
 432
 433        for (i = 0; i < LSS_MAX_DEVS; i++) {
 434                for (j = 0; j < LSS_MAX_SHARED_DEVS; j++)
 435                        pwr->lss[i][j].state = PCI_D3hot;
 436        }
 437
 438        return 0;
 439}
 440
 441static int pnw_set_initial_state(struct mid_pwr *pwr)
 442{
 443        /* On Penwell SRAM must stay powered on */
 444        static const u32 states[] = {
 445                0xf00fffff,             /* PM_SSC(0) */
 446                0xffffffff,             /* PM_SSC(1) */
 447                0xffffffff,             /* PM_SSC(2) */
 448                0xffffffff,             /* PM_SSC(3) */
 449        };
 450        return mid_set_initial_state(pwr, states);
 451}
 452
 453static int tng_set_initial_state(struct mid_pwr *pwr)
 454{
 455        static const u32 states[] = {
 456                0xffffffff,             /* PM_SSC(0) */
 457                0xffffffff,             /* PM_SSC(1) */
 458                0xffffffff,             /* PM_SSC(2) */
 459                0xffffffff,             /* PM_SSC(3) */
 460        };
 461        return mid_set_initial_state(pwr, states);
 462}
 463
 464static const struct mid_pwr_device_info pnw_info = {
 465        .set_initial_state = pnw_set_initial_state,
 466};
 467
 468static const struct mid_pwr_device_info tng_info = {
 469        .set_initial_state = tng_set_initial_state,
 470};
 471
 472/* This table should be in sync with the one in drivers/pci/pci-mid.c */
 473static const struct pci_device_id mid_pwr_pci_ids[] = {
 474        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&pnw_info },
 475        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info },
 476        {}
 477};
 478
 479static struct pci_driver mid_pwr_pci_driver = {
 480        .name           = "intel_mid_pwr",
 481        .probe          = mid_pwr_probe,
 482        .id_table       = mid_pwr_pci_ids,
 483};
 484
 485builtin_pci_driver(mid_pwr_pci_driver);
 486