linux/drivers/net/ipa/ipa_power.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/clk.h>
   8#include <linux/device.h>
   9#include <linux/interconnect.h>
  10#include <linux/pm.h>
  11#include <linux/pm_runtime.h>
  12#include <linux/bitops.h>
  13
  14#include "ipa.h"
  15#include "ipa_power.h"
  16#include "ipa_endpoint.h"
  17#include "ipa_modem.h"
  18#include "ipa_data.h"
  19
  20/**
  21 * DOC: IPA Power Management
  22 *
  23 * The IPA hardware is enabled when the IPA core clock and all the
  24 * interconnects (buses) it depends on are enabled.  Runtime power
  25 * management is used to determine whether the core clock and
  26 * interconnects are enabled, and if not in use to be suspended
  27 * automatically.
  28 *
  29 * The core clock currently runs at a fixed clock rate when enabled,
  30 * an all interconnects use a fixed average and peak bandwidth.
  31 */
  32
  33#define IPA_AUTOSUSPEND_DELAY   500     /* milliseconds */
  34
  35/**
  36 * struct ipa_interconnect - IPA interconnect information
  37 * @path:               Interconnect path
  38 * @average_bandwidth:  Average interconnect bandwidth (KB/second)
  39 * @peak_bandwidth:     Peak interconnect bandwidth (KB/second)
  40 */
  41struct ipa_interconnect {
  42        struct icc_path *path;
  43        u32 average_bandwidth;
  44        u32 peak_bandwidth;
  45};
  46
  47/**
  48 * enum ipa_power_flag - IPA power flags
  49 * @IPA_POWER_FLAG_RESUMED:     Whether resume from suspend has been signaled
  50 * @IPA_POWER_FLAG_SYSTEM:      Hardware is system (not runtime) suspended
  51 * @IPA_POWER_FLAG_STOPPED:     Modem TX is disabled by ipa_start_xmit()
  52 * @IPA_POWER_FLAG_STARTED:     Modem TX was enabled by ipa_runtime_resume()
  53 * @IPA_POWER_FLAG_COUNT:       Number of defined power flags
  54 */
  55enum ipa_power_flag {
  56        IPA_POWER_FLAG_RESUMED,
  57        IPA_POWER_FLAG_SYSTEM,
  58        IPA_POWER_FLAG_STOPPED,
  59        IPA_POWER_FLAG_STARTED,
  60        IPA_POWER_FLAG_COUNT,           /* Last; not a flag */
  61};
  62
  63/**
  64 * struct ipa_power - IPA power management information
  65 * @dev:                IPA device pointer
  66 * @core:               IPA core clock
  67 * @spinlock:           Protects modem TX queue enable/disable
  68 * @flags:              Boolean state flags
  69 * @interconnect_count: Number of elements in interconnect[]
  70 * @interconnect:       Interconnect array
  71 */
  72struct ipa_power {
  73        struct device *dev;
  74        struct clk *core;
  75        spinlock_t spinlock;    /* used with STOPPED/STARTED power flags */
  76        DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
  77        u32 interconnect_count;
  78        struct ipa_interconnect *interconnect;
  79};
  80
  81static int ipa_interconnect_init_one(struct device *dev,
  82                                     struct ipa_interconnect *interconnect,
  83                                     const struct ipa_interconnect_data *data)
  84{
  85        struct icc_path *path;
  86
  87        path = of_icc_get(dev, data->name);
  88        if (IS_ERR(path)) {
  89                int ret = PTR_ERR(path);
  90
  91                dev_err_probe(dev, ret, "error getting %s interconnect\n",
  92                              data->name);
  93
  94                return ret;
  95        }
  96
  97        interconnect->path = path;
  98        interconnect->average_bandwidth = data->average_bandwidth;
  99        interconnect->peak_bandwidth = data->peak_bandwidth;
 100
 101        return 0;
 102}
 103
 104static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
 105{
 106        icc_put(interconnect->path);
 107        memset(interconnect, 0, sizeof(*interconnect));
 108}
 109
 110/* Initialize interconnects required for IPA operation */
 111static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
 112                                 const struct ipa_interconnect_data *data)
 113{
 114        struct ipa_interconnect *interconnect;
 115        u32 count;
 116        int ret;
 117
 118        count = power->interconnect_count;
 119        interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
 120        if (!interconnect)
 121                return -ENOMEM;
 122        power->interconnect = interconnect;
 123
 124        while (count--) {
 125                ret = ipa_interconnect_init_one(dev, interconnect, data++);
 126                if (ret)
 127                        goto out_unwind;
 128                interconnect++;
 129        }
 130
 131        return 0;
 132
 133out_unwind:
 134        while (interconnect-- > power->interconnect)
 135                ipa_interconnect_exit_one(interconnect);
 136        kfree(power->interconnect);
 137        power->interconnect = NULL;
 138
 139        return ret;
 140}
 141
 142/* Inverse of ipa_interconnect_init() */
 143static void ipa_interconnect_exit(struct ipa_power *power)
 144{
 145        struct ipa_interconnect *interconnect;
 146
 147        interconnect = power->interconnect + power->interconnect_count;
 148        while (interconnect-- > power->interconnect)
 149                ipa_interconnect_exit_one(interconnect);
 150        kfree(power->interconnect);
 151        power->interconnect = NULL;
 152}
 153
 154/* Currently we only use one bandwidth level, so just "enable" interconnects */
 155static int ipa_interconnect_enable(struct ipa *ipa)
 156{
 157        struct ipa_interconnect *interconnect;
 158        struct ipa_power *power = ipa->power;
 159        int ret;
 160        u32 i;
 161
 162        interconnect = power->interconnect;
 163        for (i = 0; i < power->interconnect_count; i++) {
 164                ret = icc_set_bw(interconnect->path,
 165                                 interconnect->average_bandwidth,
 166                                 interconnect->peak_bandwidth);
 167                if (ret) {
 168                        dev_err(&ipa->pdev->dev,
 169                                "error %d enabling %s interconnect\n",
 170                                ret, icc_get_name(interconnect->path));
 171                        goto out_unwind;
 172                }
 173                interconnect++;
 174        }
 175
 176        return 0;
 177
 178out_unwind:
 179        while (interconnect-- > power->interconnect)
 180                (void)icc_set_bw(interconnect->path, 0, 0);
 181
 182        return ret;
 183}
 184
 185/* To disable an interconnect, we just its bandwidth to 0 */
 186static int ipa_interconnect_disable(struct ipa *ipa)
 187{
 188        struct ipa_interconnect *interconnect;
 189        struct ipa_power *power = ipa->power;
 190        struct device *dev = &ipa->pdev->dev;
 191        int result = 0;
 192        u32 count;
 193        int ret;
 194
 195        count = power->interconnect_count;
 196        interconnect = power->interconnect + count;
 197        while (count--) {
 198                interconnect--;
 199                ret = icc_set_bw(interconnect->path, 0, 0);
 200                if (ret) {
 201                        dev_err(dev, "error %d disabling %s interconnect\n",
 202                                ret, icc_get_name(interconnect->path));
 203                        /* Try to disable all; record only the first error */
 204                        if (!result)
 205                                result = ret;
 206                }
 207        }
 208
 209        return result;
 210}
 211
 212/* Enable IPA power, enabling interconnects and the core clock */
 213static int ipa_power_enable(struct ipa *ipa)
 214{
 215        int ret;
 216
 217        ret = ipa_interconnect_enable(ipa);
 218        if (ret)
 219                return ret;
 220
 221        ret = clk_prepare_enable(ipa->power->core);
 222        if (ret) {
 223                dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
 224                (void)ipa_interconnect_disable(ipa);
 225        }
 226
 227        return ret;
 228}
 229
 230/* Inverse of ipa_power_enable() */
 231static int ipa_power_disable(struct ipa *ipa)
 232{
 233        clk_disable_unprepare(ipa->power->core);
 234
 235        return ipa_interconnect_disable(ipa);
 236}
 237
 238static int ipa_runtime_suspend(struct device *dev)
 239{
 240        struct ipa *ipa = dev_get_drvdata(dev);
 241
 242        /* Endpoints aren't usable until setup is complete */
 243        if (ipa->setup_complete) {
 244                __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
 245                ipa_endpoint_suspend(ipa);
 246                gsi_suspend(&ipa->gsi);
 247        }
 248
 249        return ipa_power_disable(ipa);
 250}
 251
 252static int ipa_runtime_resume(struct device *dev)
 253{
 254        struct ipa *ipa = dev_get_drvdata(dev);
 255        int ret;
 256
 257        ret = ipa_power_enable(ipa);
 258        if (WARN_ON(ret < 0))
 259                return ret;
 260
 261        /* Endpoints aren't usable until setup is complete */
 262        if (ipa->setup_complete) {
 263                gsi_resume(&ipa->gsi);
 264                ipa_endpoint_resume(ipa);
 265        }
 266
 267        return 0;
 268}
 269
 270static int ipa_suspend(struct device *dev)
 271{
 272        struct ipa *ipa = dev_get_drvdata(dev);
 273
 274        __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
 275
 276        return pm_runtime_force_suspend(dev);
 277}
 278
 279static int ipa_resume(struct device *dev)
 280{
 281        struct ipa *ipa = dev_get_drvdata(dev);
 282        int ret;
 283
 284        ret = pm_runtime_force_resume(dev);
 285
 286        __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
 287
 288        return ret;
 289}
 290
 291/* Return the current IPA core clock rate */
 292u32 ipa_core_clock_rate(struct ipa *ipa)
 293{
 294        return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
 295}
 296
 297/**
 298 * ipa_suspend_handler() - Handle the suspend IPA interrupt
 299 * @ipa:        IPA pointer
 300 * @irq_id:     IPA interrupt type (unused)
 301 *
 302 * If an RX endpoint is suspended, and the IPA has a packet destined for
 303 * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
 304 * that it should resume the endpoint.  If we get one of these interrupts
 305 * we just wake up the system.
 306 */
 307static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
 308{
 309        /* To handle an IPA interrupt we will have resumed the hardware
 310         * just to handle the interrupt, so we're done.  If we are in a
 311         * system suspend, trigger a system resume.
 312         */
 313        if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
 314                if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
 315                        pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
 316
 317        /* Acknowledge/clear the suspend interrupt on all endpoints */
 318        ipa_interrupt_suspend_clear_all(ipa->interrupt);
 319}
 320
 321/* The next few functions coordinate stopping and starting the modem
 322 * network device transmit queue.
 323 *
 324 * Transmit can be running concurrent with power resume, and there's a
 325 * chance the resume completes before the transmit path stops the queue,
 326 * leaving the queue in a stopped state.  The next two functions are used
 327 * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
 328 * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
 329 * is used by ipa_runtime_resume() to conditionally restart it.
 330 *
 331 * Two flags and a spinlock are used.  If the queue is stopped, the STOPPED
 332 * power flag is set.  And if the queue is started, the STARTED flag is set.
 333 * The queue is only started on resume if the STOPPED flag is set.  And the
 334 * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
 335 * set.  As a result, the queue remains operational if the two activites
 336 * happen concurrently regardless of the order they complete.  The spinlock
 337 * ensures the flag and TX queue operations are done atomically.
 338 *
 339 * The first function stops the modem netdev transmit queue, but only if
 340 * the STARTED flag is *not* set.  That flag is cleared if it was set.
 341 * If the queue is stopped, the STOPPED flag is set.  This is called only
 342 * from the power ->runtime_resume operation.
 343 */
 344void ipa_power_modem_queue_stop(struct ipa *ipa)
 345{
 346        struct ipa_power *power = ipa->power;
 347        unsigned long flags;
 348
 349        spin_lock_irqsave(&power->spinlock, flags);
 350
 351        if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
 352                netif_stop_queue(ipa->modem_netdev);
 353                __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
 354        }
 355
 356        spin_unlock_irqrestore(&power->spinlock, flags);
 357}
 358
 359/* This function starts the modem netdev transmit queue, but only if the
 360 * STOPPED flag is set.  That flag is cleared if it was set.  If the queue
 361 * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
 362 * to skip stopping the queue in the event of a race.
 363 */
 364void ipa_power_modem_queue_wake(struct ipa *ipa)
 365{
 366        struct ipa_power *power = ipa->power;
 367        unsigned long flags;
 368
 369        spin_lock_irqsave(&power->spinlock, flags);
 370
 371        if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
 372                __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
 373                netif_wake_queue(ipa->modem_netdev);
 374        }
 375
 376        spin_unlock_irqrestore(&power->spinlock, flags);
 377}
 378
 379/* This function clears the STARTED flag once the TX queue is operating */
 380void ipa_power_modem_queue_active(struct ipa *ipa)
 381{
 382        clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
 383}
 384
 385int ipa_power_setup(struct ipa *ipa)
 386{
 387        int ret;
 388
 389        ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
 390                          ipa_suspend_handler);
 391
 392        ret = device_init_wakeup(&ipa->pdev->dev, true);
 393        if (ret)
 394                ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
 395
 396        return ret;
 397}
 398
 399void ipa_power_teardown(struct ipa *ipa)
 400{
 401        (void)device_init_wakeup(&ipa->pdev->dev, false);
 402        ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
 403}
 404
 405/* Initialize IPA power management */
 406struct ipa_power *
 407ipa_power_init(struct device *dev, const struct ipa_power_data *data)
 408{
 409        struct ipa_power *power;
 410        struct clk *clk;
 411        int ret;
 412
 413        clk = clk_get(dev, "core");
 414        if (IS_ERR(clk)) {
 415                dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
 416
 417                return ERR_CAST(clk);
 418        }
 419
 420        ret = clk_set_rate(clk, data->core_clock_rate);
 421        if (ret) {
 422                dev_err(dev, "error %d setting core clock rate to %u\n",
 423                        ret, data->core_clock_rate);
 424                goto err_clk_put;
 425        }
 426
 427        power = kzalloc(sizeof(*power), GFP_KERNEL);
 428        if (!power) {
 429                ret = -ENOMEM;
 430                goto err_clk_put;
 431        }
 432        power->dev = dev;
 433        power->core = clk;
 434        spin_lock_init(&power->spinlock);
 435        power->interconnect_count = data->interconnect_count;
 436
 437        ret = ipa_interconnect_init(power, dev, data->interconnect_data);
 438        if (ret)
 439                goto err_kfree;
 440
 441        pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
 442        pm_runtime_use_autosuspend(dev);
 443        pm_runtime_enable(dev);
 444
 445        return power;
 446
 447err_kfree:
 448        kfree(power);
 449err_clk_put:
 450        clk_put(clk);
 451
 452        return ERR_PTR(ret);
 453}
 454
 455/* Inverse of ipa_power_init() */
 456void ipa_power_exit(struct ipa_power *power)
 457{
 458        struct device *dev = power->dev;
 459        struct clk *clk = power->core;
 460
 461        pm_runtime_disable(dev);
 462        pm_runtime_dont_use_autosuspend(dev);
 463        ipa_interconnect_exit(power);
 464        kfree(power);
 465        clk_put(clk);
 466}
 467
 468const struct dev_pm_ops ipa_pm_ops = {
 469        .suspend                = ipa_suspend,
 470        .resume                 = ipa_resume,
 471        .runtime_suspend        = ipa_runtime_suspend,
 472        .runtime_resume         = ipa_runtime_resume,
 473};
 474