linux/drivers/base/power/qos.c
<<
>>
Prefs
   1/*
   2 * Devices PM QoS constraints management
   3 *
   4 * Copyright (C) 2011 Texas Instruments, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *
  11 * This module exposes the interface to kernel space for specifying
  12 * per-device PM QoS dependencies. It provides infrastructure for registration
  13 * of:
  14 *
  15 * Dependents on a QoS value : register requests
  16 * Watchers of QoS value : get notified when target QoS value changes
  17 *
  18 * This QoS design is best effort based. Dependents register their QoS needs.
  19 * Watchers register to keep track of the current QoS needs of the system.
  20 * Watchers can register different types of notification callbacks:
  21 *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
  22 *    The notification chain data is stored in the per-device constraint
  23 *    data struct.
  24 *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
  25 *    API. The notification chain data is stored in a static variable.
  26 *
  27 * Note about the per-device constraint data struct allocation:
  28 * . The per-device constraints data struct ptr is tored into the device
  29 *    dev_pm_info.
  30 * . To minimize the data usage by the per-device constraints, the data struct
  31 *   is only allocated at the first call to dev_pm_qos_add_request.
  32 * . The data is later free'd when the device is removed from the system.
  33 *  . A global mutex protects the constraints users from the data being
  34 *     allocated and free'd.
  35 */
  36
  37#include <linux/pm_qos.h>
  38#include <linux/spinlock.h>
  39#include <linux/slab.h>
  40#include <linux/device.h>
  41#include <linux/mutex.h>
  42#include <linux/export.h>
  43#include <linux/pm_runtime.h>
  44#include <linux/err.h>
  45#include <trace/events/power.h>
  46
  47#include "power.h"
  48
  49static DEFINE_MUTEX(dev_pm_qos_mtx);
  50static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
  51
  52static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
  53
  54/**
  55 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
  56 * @dev: Device to check the PM QoS flags for.
  57 * @mask: Flags to check against.
  58 *
  59 * This routine must be called with dev->power.lock held.
  60 */
  61enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
  62{
  63        struct dev_pm_qos *qos = dev->power.qos;
  64        struct pm_qos_flags *pqf;
  65        s32 val;
  66
  67        if (IS_ERR_OR_NULL(qos))
  68                return PM_QOS_FLAGS_UNDEFINED;
  69
  70        pqf = &qos->flags;
  71        if (list_empty(&pqf->list))
  72                return PM_QOS_FLAGS_UNDEFINED;
  73
  74        val = pqf->effective_flags & mask;
  75        if (val)
  76                return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
  77
  78        return PM_QOS_FLAGS_NONE;
  79}
  80
  81/**
  82 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
  83 * @dev: Device to check the PM QoS flags for.
  84 * @mask: Flags to check against.
  85 */
  86enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
  87{
  88        unsigned long irqflags;
  89        enum pm_qos_flags_status ret;
  90
  91        spin_lock_irqsave(&dev->power.lock, irqflags);
  92        ret = __dev_pm_qos_flags(dev, mask);
  93        spin_unlock_irqrestore(&dev->power.lock, irqflags);
  94
  95        return ret;
  96}
  97EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  98
  99/**
 100 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
 101 * @dev: Device to get the PM QoS constraint value for.
 102 *
 103 * This routine must be called with dev->power.lock held.
 104 */
 105s32 __dev_pm_qos_read_value(struct device *dev)
 106{
 107        return IS_ERR_OR_NULL(dev->power.qos) ?
 108                0 : pm_qos_read_value(&dev->power.qos->latency);
 109}
 110
 111/**
 112 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
 113 * @dev: Device to get the PM QoS constraint value for.
 114 */
 115s32 dev_pm_qos_read_value(struct device *dev)
 116{
 117        unsigned long flags;
 118        s32 ret;
 119
 120        spin_lock_irqsave(&dev->power.lock, flags);
 121        ret = __dev_pm_qos_read_value(dev);
 122        spin_unlock_irqrestore(&dev->power.lock, flags);
 123
 124        return ret;
 125}
 126
 127/**
 128 * apply_constraint - Add/modify/remove device PM QoS request.
 129 * @req: Constraint request to apply
 130 * @action: Action to perform (add/update/remove).
 131 * @value: Value to assign to the QoS request.
 132 *
 133 * Internal function to update the constraints list using the PM QoS core
 134 * code and if needed call the per-device and the global notification
 135 * callbacks
 136 */
 137static int apply_constraint(struct dev_pm_qos_request *req,
 138                            enum pm_qos_req_action action, s32 value)
 139{
 140        struct dev_pm_qos *qos = req->dev->power.qos;
 141        int ret;
 142
 143        switch(req->type) {
 144        case DEV_PM_QOS_LATENCY:
 145                ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
 146                                           action, value);
 147                if (ret) {
 148                        value = pm_qos_read_value(&qos->latency);
 149                        blocking_notifier_call_chain(&dev_pm_notifiers,
 150                                                     (unsigned long)value,
 151                                                     req);
 152                }
 153                break;
 154        case DEV_PM_QOS_LATENCY_TOLERANCE:
 155                ret = pm_qos_update_target(&qos->latency_tolerance,
 156                                           &req->data.pnode, action, value);
 157                if (ret) {
 158                        value = pm_qos_read_value(&qos->latency_tolerance);
 159                        req->dev->device_rh->power.set_latency_tolerance(
 160                                                               req->dev, value);
 161                }
 162                break;
 163        case DEV_PM_QOS_FLAGS:
 164                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
 165                                          action, value);
 166                break;
 167        default:
 168                ret = -EINVAL;
 169        }
 170
 171        return ret;
 172}
 173
 174/*
 175 * dev_pm_qos_constraints_allocate
 176 * @dev: device to allocate data for
 177 *
 178 * Called at the first call to add_request, for constraint data allocation
 179 * Must be called with the dev_pm_qos_mtx mutex held
 180 */
 181static int dev_pm_qos_constraints_allocate(struct device *dev)
 182{
 183        struct dev_pm_qos *qos;
 184        struct pm_qos_constraints *c;
 185        struct blocking_notifier_head *n;
 186
 187        qos = kzalloc(sizeof(*qos), GFP_KERNEL);
 188        if (!qos)
 189                return -ENOMEM;
 190
 191        n = kzalloc(sizeof(*n), GFP_KERNEL);
 192        if (!n) {
 193                kfree(qos);
 194                return -ENOMEM;
 195        }
 196        BLOCKING_INIT_NOTIFIER_HEAD(n);
 197
 198        c = &qos->latency;
 199        plist_head_init(&c->list);
 200        c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
 201        c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
 202        c->type = PM_QOS_MIN;
 203        c->notifiers = n;
 204
 205        c = &qos->latency_tolerance;
 206        plist_head_init(&c->list);
 207        c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
 208        c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
 209        c->type = PM_QOS_MIN;
 210
 211        INIT_LIST_HEAD(&qos->flags.list);
 212
 213        spin_lock_irq(&dev->power.lock);
 214        dev->power.qos = qos;
 215        spin_unlock_irq(&dev->power.lock);
 216
 217        return 0;
 218}
 219
 220static void __dev_pm_qos_hide_latency_limit(struct device *dev);
 221static void __dev_pm_qos_hide_flags(struct device *dev);
 222
 223/**
 224 * dev_pm_qos_constraints_destroy
 225 * @dev: target device
 226 *
 227 * Called from the device PM subsystem on device removal under device_pm_lock().
 228 */
 229void dev_pm_qos_constraints_destroy(struct device *dev)
 230{
 231        struct dev_pm_qos *qos;
 232        struct dev_pm_qos_request *req, *tmp;
 233        struct pm_qos_constraints *c;
 234        struct pm_qos_flags *f;
 235
 236        mutex_lock(&dev_pm_qos_sysfs_mtx);
 237
 238        /*
 239         * If the device's PM QoS resume latency limit or PM QoS flags have been
 240         * exposed to user space, they have to be hidden at this point.
 241         */
 242        pm_qos_sysfs_remove_latency(dev);
 243        pm_qos_sysfs_remove_flags(dev);
 244
 245        mutex_lock(&dev_pm_qos_mtx);
 246
 247        __dev_pm_qos_hide_latency_limit(dev);
 248        __dev_pm_qos_hide_flags(dev);
 249
 250        qos = dev->power.qos;
 251        if (!qos)
 252                goto out;
 253
 254        /* Flush the constraints lists for the device. */
 255        c = &qos->latency;
 256        plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
 257                /*
 258                 * Update constraints list and call the notification
 259                 * callbacks if needed
 260                 */
 261                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 262                memset(req, 0, sizeof(*req));
 263        }
 264        c = &qos->latency_tolerance;
 265        plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
 266                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 267                memset(req, 0, sizeof(*req));
 268        }
 269        f = &qos->flags;
 270        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
 271                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 272                memset(req, 0, sizeof(*req));
 273        }
 274
 275        spin_lock_irq(&dev->power.lock);
 276        dev->power.qos = ERR_PTR(-ENODEV);
 277        spin_unlock_irq(&dev->power.lock);
 278
 279        kfree(qos->latency.notifiers);
 280        kfree(qos);
 281
 282 out:
 283        mutex_unlock(&dev_pm_qos_mtx);
 284
 285        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 286}
 287
 288static bool dev_pm_qos_invalid_request(struct device *dev,
 289                                       struct dev_pm_qos_request *req)
 290{
 291        return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
 292                        && !dev->device_rh->power.set_latency_tolerance);
 293}
 294
 295static int __dev_pm_qos_add_request(struct device *dev,
 296                                    struct dev_pm_qos_request *req,
 297                                    enum dev_pm_qos_req_type type, s32 value)
 298{
 299        int ret = 0;
 300
 301        if (!dev || dev_pm_qos_invalid_request(dev, req))
 302                return -EINVAL;
 303
 304        if (WARN(dev_pm_qos_request_active(req),
 305                 "%s() called for already added request\n", __func__))
 306                return -EINVAL;
 307
 308        if (IS_ERR(dev->power.qos))
 309                ret = -ENODEV;
 310        else if (!dev->power.qos)
 311                ret = dev_pm_qos_constraints_allocate(dev);
 312
 313        trace_dev_pm_qos_add_request(dev_name(dev), type, value);
 314        if (!ret) {
 315                req->dev = dev;
 316                req->type = type;
 317                ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
 318        }
 319        return ret;
 320}
 321
 322/**
 323 * dev_pm_qos_add_request - inserts new qos request into the list
 324 * @dev: target device for the constraint
 325 * @req: pointer to a preallocated handle
 326 * @type: type of the request
 327 * @value: defines the qos request
 328 *
 329 * This function inserts a new entry in the device constraints list of
 330 * requested qos performance characteristics. It recomputes the aggregate
 331 * QoS expectations of parameters and initializes the dev_pm_qos_request
 332 * handle.  Caller needs to save this handle for later use in updates and
 333 * removal.
 334 *
 335 * Returns 1 if the aggregated constraint value has changed,
 336 * 0 if the aggregated constraint value has not changed,
 337 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
 338 * to allocate for data structures, -ENODEV if the device has just been removed
 339 * from the system.
 340 *
 341 * Callers should ensure that the target device is not RPM_SUSPENDED before
 342 * using this function for requests of type DEV_PM_QOS_FLAGS.
 343 */
 344int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
 345                           enum dev_pm_qos_req_type type, s32 value)
 346{
 347        int ret;
 348
 349        mutex_lock(&dev_pm_qos_mtx);
 350        ret = __dev_pm_qos_add_request(dev, req, type, value);
 351        mutex_unlock(&dev_pm_qos_mtx);
 352        return ret;
 353}
 354EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
 355
 356/**
 357 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
 358 * @req : PM QoS request to modify.
 359 * @new_value: New value to request.
 360 */
 361static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
 362                                       s32 new_value)
 363{
 364        s32 curr_value;
 365        int ret = 0;
 366
 367        if (!req) /*guard against callers passing in null */
 368                return -EINVAL;
 369
 370        if (WARN(!dev_pm_qos_request_active(req),
 371                 "%s() called for unknown object\n", __func__))
 372                return -EINVAL;
 373
 374        if (IS_ERR_OR_NULL(req->dev->power.qos))
 375                return -ENODEV;
 376
 377        switch(req->type) {
 378        case DEV_PM_QOS_LATENCY:
 379        case DEV_PM_QOS_LATENCY_TOLERANCE:
 380                curr_value = req->data.pnode.prio;
 381                break;
 382        case DEV_PM_QOS_FLAGS:
 383                curr_value = req->data.flr.flags;
 384                break;
 385        default:
 386                return -EINVAL;
 387        }
 388
 389        trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
 390                                        new_value);
 391        if (curr_value != new_value)
 392                ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
 393
 394        return ret;
 395}
 396
 397/**
 398 * dev_pm_qos_update_request - modifies an existing qos request
 399 * @req : handle to list element holding a dev_pm_qos request to use
 400 * @new_value: defines the qos request
 401 *
 402 * Updates an existing dev PM qos request along with updating the
 403 * target value.
 404 *
 405 * Attempts are made to make this code callable on hot code paths.
 406 *
 407 * Returns 1 if the aggregated constraint value has changed,
 408 * 0 if the aggregated constraint value has not changed,
 409 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 410 * removed from the system
 411 *
 412 * Callers should ensure that the target device is not RPM_SUSPENDED before
 413 * using this function for requests of type DEV_PM_QOS_FLAGS.
 414 */
 415int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
 416{
 417        int ret;
 418
 419        mutex_lock(&dev_pm_qos_mtx);
 420        ret = __dev_pm_qos_update_request(req, new_value);
 421        mutex_unlock(&dev_pm_qos_mtx);
 422        return ret;
 423}
 424EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
 425
 426static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
 427{
 428        int ret;
 429
 430        if (!req) /*guard against callers passing in null */
 431                return -EINVAL;
 432
 433        if (WARN(!dev_pm_qos_request_active(req),
 434                 "%s() called for unknown object\n", __func__))
 435                return -EINVAL;
 436
 437        if (IS_ERR_OR_NULL(req->dev->power.qos))
 438                return -ENODEV;
 439
 440        trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
 441                                        PM_QOS_DEFAULT_VALUE);
 442        ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 443        memset(req, 0, sizeof(*req));
 444        return ret;
 445}
 446
 447/**
 448 * dev_pm_qos_remove_request - modifies an existing qos request
 449 * @req: handle to request list element
 450 *
 451 * Will remove pm qos request from the list of constraints and
 452 * recompute the current target value. Call this on slow code paths.
 453 *
 454 * Returns 1 if the aggregated constraint value has changed,
 455 * 0 if the aggregated constraint value has not changed,
 456 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 457 * removed from the system
 458 *
 459 * Callers should ensure that the target device is not RPM_SUSPENDED before
 460 * using this function for requests of type DEV_PM_QOS_FLAGS.
 461 */
 462int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
 463{
 464        int ret;
 465
 466        mutex_lock(&dev_pm_qos_mtx);
 467        ret = __dev_pm_qos_remove_request(req);
 468        mutex_unlock(&dev_pm_qos_mtx);
 469        return ret;
 470}
 471EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
 472
 473/**
 474 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
 475 * of per-device PM QoS constraints
 476 *
 477 * @dev: target device for the constraint
 478 * @notifier: notifier block managed by caller.
 479 *
 480 * Will register the notifier into a notification chain that gets called
 481 * upon changes to the target value for the device.
 482 *
 483 * If the device's constraints object doesn't exist when this routine is called,
 484 * it will be created (or error code will be returned if that fails).
 485 */
 486int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
 487{
 488        int ret = 0;
 489
 490        mutex_lock(&dev_pm_qos_mtx);
 491
 492        if (IS_ERR(dev->power.qos))
 493                ret = -ENODEV;
 494        else if (!dev->power.qos)
 495                ret = dev_pm_qos_constraints_allocate(dev);
 496
 497        if (!ret)
 498                ret = blocking_notifier_chain_register(
 499                                dev->power.qos->latency.notifiers, notifier);
 500
 501        mutex_unlock(&dev_pm_qos_mtx);
 502        return ret;
 503}
 504EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
 505
 506/**
 507 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
 508 * of per-device PM QoS constraints
 509 *
 510 * @dev: target device for the constraint
 511 * @notifier: notifier block to be removed.
 512 *
 513 * Will remove the notifier from the notification chain that gets called
 514 * upon changes to the target value.
 515 */
 516int dev_pm_qos_remove_notifier(struct device *dev,
 517                               struct notifier_block *notifier)
 518{
 519        int retval = 0;
 520
 521        mutex_lock(&dev_pm_qos_mtx);
 522
 523        /* Silently return if the constraints object is not present. */
 524        if (!IS_ERR_OR_NULL(dev->power.qos))
 525                retval = blocking_notifier_chain_unregister(
 526                                dev->power.qos->latency.notifiers,
 527                                notifier);
 528
 529        mutex_unlock(&dev_pm_qos_mtx);
 530        return retval;
 531}
 532EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
 533
 534/**
 535 * dev_pm_qos_add_global_notifier - sets notification entry for changes to
 536 * target value of the PM QoS constraints for any device
 537 *
 538 * @notifier: notifier block managed by caller.
 539 *
 540 * Will register the notifier into a notification chain that gets called
 541 * upon changes to the target value for any device.
 542 */
 543int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
 544{
 545        return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
 546}
 547EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
 548
 549/**
 550 * dev_pm_qos_remove_global_notifier - deletes notification for changes to
 551 * target value of PM QoS constraints for any device
 552 *
 553 * @notifier: notifier block to be removed.
 554 *
 555 * Will remove the notifier from the notification chain that gets called
 556 * upon changes to the target value for any device.
 557 */
 558int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
 559{
 560        return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
 561}
 562EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
 563
 564/**
 565 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
 566 * @dev: Device whose ancestor to add the request for.
 567 * @req: Pointer to the preallocated handle.
 568 * @value: Constraint latency value.
 569 */
 570int dev_pm_qos_add_ancestor_request(struct device *dev,
 571                                    struct dev_pm_qos_request *req, s32 value)
 572{
 573        struct device *ancestor = dev->parent;
 574        int ret = -ENODEV;
 575
 576        while (ancestor && !ancestor->power.ignore_children)
 577                ancestor = ancestor->parent;
 578
 579        if (ancestor)
 580                ret = dev_pm_qos_add_request(ancestor, req,
 581                                             DEV_PM_QOS_LATENCY, value);
 582
 583        if (ret < 0)
 584                req->dev = NULL;
 585
 586        return ret;
 587}
 588EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
 589
 590#ifdef CONFIG_PM_RUNTIME
 591static void __dev_pm_qos_drop_user_request(struct device *dev,
 592                                           enum dev_pm_qos_req_type type)
 593{
 594        struct dev_pm_qos_request *req = NULL;
 595
 596        switch(type) {
 597        case DEV_PM_QOS_LATENCY:
 598                req = dev->power.qos->latency_req;
 599                dev->power.qos->latency_req = NULL;
 600                break;
 601        case DEV_PM_QOS_LATENCY_TOLERANCE:
 602                req = dev->power.qos->latency_tolerance_req;
 603                dev->power.qos->latency_tolerance_req = NULL;
 604                break;
 605        case DEV_PM_QOS_FLAGS:
 606                req = dev->power.qos->flags_req;
 607                dev->power.qos->flags_req = NULL;
 608                break;
 609        }
 610        __dev_pm_qos_remove_request(req);
 611        kfree(req);
 612}
 613
 614static void dev_pm_qos_drop_user_request(struct device *dev,
 615                                         enum dev_pm_qos_req_type type)
 616{
 617        mutex_lock(&dev_pm_qos_mtx);
 618        __dev_pm_qos_drop_user_request(dev, type);
 619        mutex_unlock(&dev_pm_qos_mtx);
 620}
 621
 622/**
 623 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
 624 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
 625 * @value: Initial value of the latency limit.
 626 */
 627int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 628{
 629        struct dev_pm_qos_request *req;
 630        int ret;
 631
 632        if (!device_is_registered(dev) || value < 0)
 633                return -EINVAL;
 634
 635        req = kzalloc(sizeof(*req), GFP_KERNEL);
 636        if (!req)
 637                return -ENOMEM;
 638
 639        ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
 640        if (ret < 0) {
 641                kfree(req);
 642                return ret;
 643        }
 644
 645        mutex_lock(&dev_pm_qos_sysfs_mtx);
 646
 647        mutex_lock(&dev_pm_qos_mtx);
 648
 649        if (IS_ERR_OR_NULL(dev->power.qos))
 650                ret = -ENODEV;
 651        else if (dev->power.qos->latency_req)
 652                ret = -EEXIST;
 653
 654        if (ret < 0) {
 655                __dev_pm_qos_remove_request(req);
 656                kfree(req);
 657                mutex_unlock(&dev_pm_qos_mtx);
 658                goto out;
 659        }
 660        dev->power.qos->latency_req = req;
 661
 662        mutex_unlock(&dev_pm_qos_mtx);
 663
 664        ret = pm_qos_sysfs_add_latency(dev);
 665        if (ret)
 666                dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
 667
 668 out:
 669        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 670        return ret;
 671}
 672EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 673
 674static void __dev_pm_qos_hide_latency_limit(struct device *dev)
 675{
 676        if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
 677                __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
 678}
 679
 680/**
 681 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
 682 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
 683 */
 684void dev_pm_qos_hide_latency_limit(struct device *dev)
 685{
 686        mutex_lock(&dev_pm_qos_sysfs_mtx);
 687
 688        pm_qos_sysfs_remove_latency(dev);
 689
 690        mutex_lock(&dev_pm_qos_mtx);
 691        __dev_pm_qos_hide_latency_limit(dev);
 692        mutex_unlock(&dev_pm_qos_mtx);
 693
 694        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 695}
 696EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
 697
 698/**
 699 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
 700 * @dev: Device whose PM QoS flags are to be exposed to user space.
 701 * @val: Initial values of the flags.
 702 */
 703int dev_pm_qos_expose_flags(struct device *dev, s32 val)
 704{
 705        struct dev_pm_qos_request *req;
 706        int ret;
 707
 708        if (!device_is_registered(dev))
 709                return -EINVAL;
 710
 711        req = kzalloc(sizeof(*req), GFP_KERNEL);
 712        if (!req)
 713                return -ENOMEM;
 714
 715        ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
 716        if (ret < 0) {
 717                kfree(req);
 718                return ret;
 719        }
 720
 721        pm_runtime_get_sync(dev);
 722        mutex_lock(&dev_pm_qos_sysfs_mtx);
 723
 724        mutex_lock(&dev_pm_qos_mtx);
 725
 726        if (IS_ERR_OR_NULL(dev->power.qos))
 727                ret = -ENODEV;
 728        else if (dev->power.qos->flags_req)
 729                ret = -EEXIST;
 730
 731        if (ret < 0) {
 732                __dev_pm_qos_remove_request(req);
 733                kfree(req);
 734                mutex_unlock(&dev_pm_qos_mtx);
 735                goto out;
 736        }
 737        dev->power.qos->flags_req = req;
 738
 739        mutex_unlock(&dev_pm_qos_mtx);
 740
 741        ret = pm_qos_sysfs_add_flags(dev);
 742        if (ret)
 743                dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
 744
 745 out:
 746        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 747        pm_runtime_put(dev);
 748        return ret;
 749}
 750EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
 751
 752static void __dev_pm_qos_hide_flags(struct device *dev)
 753{
 754        if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
 755                __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
 756}
 757
 758/**
 759 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
 760 * @dev: Device whose PM QoS flags are to be hidden from user space.
 761 */
 762void dev_pm_qos_hide_flags(struct device *dev)
 763{
 764        pm_runtime_get_sync(dev);
 765        mutex_lock(&dev_pm_qos_sysfs_mtx);
 766
 767        pm_qos_sysfs_remove_flags(dev);
 768
 769        mutex_lock(&dev_pm_qos_mtx);
 770        __dev_pm_qos_hide_flags(dev);
 771        mutex_unlock(&dev_pm_qos_mtx);
 772
 773        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 774        pm_runtime_put(dev);
 775}
 776EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
 777
 778/**
 779 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
 780 * @dev: Device to update the PM QoS flags request for.
 781 * @mask: Flags to set/clear.
 782 * @set: Whether to set or clear the flags (true means set).
 783 */
 784int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
 785{
 786        s32 value;
 787        int ret;
 788
 789        pm_runtime_get_sync(dev);
 790        mutex_lock(&dev_pm_qos_mtx);
 791
 792        if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
 793                ret = -EINVAL;
 794                goto out;
 795        }
 796
 797        value = dev_pm_qos_requested_flags(dev);
 798        if (set)
 799                value |= mask;
 800        else
 801                value &= ~mask;
 802
 803        ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
 804
 805 out:
 806        mutex_unlock(&dev_pm_qos_mtx);
 807        pm_runtime_put(dev);
 808        return ret;
 809}
 810
 811/**
 812 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
 813 * @dev: Device to obtain the user space latency tolerance for.
 814 */
 815s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
 816{
 817        s32 ret;
 818
 819        mutex_lock(&dev_pm_qos_mtx);
 820        ret = IS_ERR_OR_NULL(dev->power.qos)
 821                || !dev->power.qos->latency_tolerance_req ?
 822                        PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
 823                        dev->power.qos->latency_tolerance_req->data.pnode.prio;
 824        mutex_unlock(&dev_pm_qos_mtx);
 825        return ret;
 826}
 827
 828/**
 829 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
 830 * @dev: Device to update the user space latency tolerance for.
 831 * @val: New user space latency tolerance for @dev (negative values disable).
 832 */
 833int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 834{
 835        int ret;
 836
 837        mutex_lock(&dev_pm_qos_mtx);
 838
 839        if (IS_ERR_OR_NULL(dev->power.qos)
 840            || !dev->power.qos->latency_tolerance_req) {
 841                struct dev_pm_qos_request *req;
 842
 843                if (val < 0) {
 844                        if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
 845                                ret = 0;
 846                        else
 847                                ret = -EINVAL;
 848                        goto out;
 849                }
 850                req = kzalloc(sizeof(*req), GFP_KERNEL);
 851                if (!req) {
 852                        ret = -ENOMEM;
 853                        goto out;
 854                }
 855                ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
 856                if (ret < 0) {
 857                        kfree(req);
 858                        goto out;
 859                }
 860                dev->power.qos->latency_tolerance_req = req;
 861        } else {
 862                if (val < 0) {
 863                        __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
 864                        ret = 0;
 865                } else {
 866                        ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
 867                }
 868        }
 869
 870 out:
 871        mutex_unlock(&dev_pm_qos_mtx);
 872        return ret;
 873}
 874EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
 875
 876/**
 877 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
 878 * @dev: Device whose latency tolerance to expose
 879 */
 880int dev_pm_qos_expose_latency_tolerance(struct device *dev)
 881{
 882        int ret;
 883
 884        if (!dev->device_rh->power.set_latency_tolerance)
 885                return -EINVAL;
 886
 887        mutex_lock(&dev_pm_qos_sysfs_mtx);
 888        ret = pm_qos_sysfs_add_latency_tolerance(dev);
 889        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 890
 891        return ret;
 892}
 893EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
 894
 895/**
 896 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
 897 * @dev: Device whose latency tolerance to hide
 898 */
 899void dev_pm_qos_hide_latency_tolerance(struct device *dev)
 900{
 901        mutex_lock(&dev_pm_qos_sysfs_mtx);
 902        pm_qos_sysfs_remove_latency_tolerance(dev);
 903        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 904
 905        /* Remove the request from user space now */
 906        pm_runtime_get_sync(dev);
 907        dev_pm_qos_update_user_latency_tolerance(dev,
 908                PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
 909        pm_runtime_put(dev);
 910}
 911EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
 912#else /* !CONFIG_PM_RUNTIME */
 913static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
 914static void __dev_pm_qos_hide_flags(struct device *dev) {}
 915#endif /* CONFIG_PM_RUNTIME */
 916