linux/drivers/gpu/drm/radeon/radeon_pm.c
<<
>>
Prefs
   1/*
   2 * Permission is hereby granted, free of charge, to any person obtaining a
   3 * copy of this software and associated documentation files (the "Software"),
   4 * to deal in the Software without restriction, including without limitation
   5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   6 * and/or sell copies of the Software, and to permit persons to whom the
   7 * Software is furnished to do so, subject to the following conditions:
   8 *
   9 * The above copyright notice and this permission notice shall be included in
  10 * all copies or substantial portions of the Software.
  11 *
  12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18 * OTHER DEALINGS IN THE SOFTWARE.
  19 *
  20 * Authors: Rafał Miłecki <zajec5@gmail.com>
  21 *          Alex Deucher <alexdeucher@gmail.com>
  22 */
  23#include <drm/drmP.h>
  24#include "radeon.h"
  25#include "avivod.h"
  26#include "atom.h"
  27#include "r600_dpm.h"
  28#include <linux/power_supply.h>
  29#include <linux/hwmon.h>
  30#include <linux/hwmon-sysfs.h>
  31
  32#define RADEON_IDLE_LOOP_MS 100
  33#define RADEON_RECLOCK_DELAY_MS 200
  34#define RADEON_WAIT_VBLANK_TIMEOUT 200
  35
  36static const char *radeon_pm_state_type_name[5] = {
  37        "",
  38        "Powersave",
  39        "Battery",
  40        "Balanced",
  41        "Performance",
  42};
  43
  44static void radeon_dynpm_idle_work_handler(struct work_struct *work);
  45static int radeon_debugfs_pm_init(struct radeon_device *rdev);
  46static bool radeon_pm_in_vbl(struct radeon_device *rdev);
  47static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
  48static void radeon_pm_update_profile(struct radeon_device *rdev);
  49static void radeon_pm_set_clocks(struct radeon_device *rdev);
  50
  51int radeon_pm_get_type_index(struct radeon_device *rdev,
  52                             enum radeon_pm_state_type ps_type,
  53                             int instance)
  54{
  55        int i;
  56        int found_instance = -1;
  57
  58        for (i = 0; i < rdev->pm.num_power_states; i++) {
  59                if (rdev->pm.power_state[i].type == ps_type) {
  60                        found_instance++;
  61                        if (found_instance == instance)
  62                                return i;
  63                }
  64        }
  65        /* return default if no match */
  66        return rdev->pm.default_power_state_index;
  67}
  68
  69void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
  70{
  71        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
  72                mutex_lock(&rdev->pm.mutex);
  73                if (power_supply_is_system_supplied() > 0)
  74                        rdev->pm.dpm.ac_power = true;
  75                else
  76                        rdev->pm.dpm.ac_power = false;
  77                if (rdev->family == CHIP_ARUBA) {
  78                        if (rdev->asic->dpm.enable_bapm)
  79                                radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
  80                }
  81                mutex_unlock(&rdev->pm.mutex);
  82        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
  83                if (rdev->pm.profile == PM_PROFILE_AUTO) {
  84                        mutex_lock(&rdev->pm.mutex);
  85                        radeon_pm_update_profile(rdev);
  86                        radeon_pm_set_clocks(rdev);
  87                        mutex_unlock(&rdev->pm.mutex);
  88                }
  89        }
  90}
  91
  92static void radeon_pm_update_profile(struct radeon_device *rdev)
  93{
  94        switch (rdev->pm.profile) {
  95        case PM_PROFILE_DEFAULT:
  96                rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
  97                break;
  98        case PM_PROFILE_AUTO:
  99                if (power_supply_is_system_supplied() > 0) {
 100                        if (rdev->pm.active_crtc_count > 1)
 101                                rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
 102                        else
 103                                rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
 104                } else {
 105                        if (rdev->pm.active_crtc_count > 1)
 106                                rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
 107                        else
 108                                rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
 109                }
 110                break;
 111        case PM_PROFILE_LOW:
 112                if (rdev->pm.active_crtc_count > 1)
 113                        rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
 114                else
 115                        rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
 116                break;
 117        case PM_PROFILE_MID:
 118                if (rdev->pm.active_crtc_count > 1)
 119                        rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
 120                else
 121                        rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
 122                break;
 123        case PM_PROFILE_HIGH:
 124                if (rdev->pm.active_crtc_count > 1)
 125                        rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
 126                else
 127                        rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
 128                break;
 129        }
 130
 131        if (rdev->pm.active_crtc_count == 0) {
 132                rdev->pm.requested_power_state_index =
 133                        rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
 134                rdev->pm.requested_clock_mode_index =
 135                        rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
 136        } else {
 137                rdev->pm.requested_power_state_index =
 138                        rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
 139                rdev->pm.requested_clock_mode_index =
 140                        rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
 141        }
 142}
 143
 144static void radeon_unmap_vram_bos(struct radeon_device *rdev)
 145{
 146        struct radeon_bo *bo, *n;
 147
 148        if (list_empty(&rdev->gem.objects))
 149                return;
 150
 151        list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 152                if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 153                        ttm_bo_unmap_virtual(&bo->tbo);
 154        }
 155}
 156
 157static void radeon_sync_with_vblank(struct radeon_device *rdev)
 158{
 159        if (rdev->pm.active_crtcs) {
 160                rdev->pm.vblank_sync = false;
 161                wait_event_timeout(
 162                        rdev->irq.vblank_queue, rdev->pm.vblank_sync,
 163                        msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
 164        }
 165}
 166
 167static void radeon_set_power_state(struct radeon_device *rdev)
 168{
 169        u32 sclk, mclk;
 170        bool misc_after = false;
 171
 172        if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
 173            (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 174                return;
 175
 176        if (radeon_gui_idle(rdev)) {
 177                sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 178                        clock_info[rdev->pm.requested_clock_mode_index].sclk;
 179                if (sclk > rdev->pm.default_sclk)
 180                        sclk = rdev->pm.default_sclk;
 181
 182                /* starting with BTC, there is one state that is used for both
 183                 * MH and SH.  Difference is that we always use the high clock index for
 184                 * mclk and vddci.
 185                 */
 186                if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
 187                    (rdev->family >= CHIP_BARTS) &&
 188                    rdev->pm.active_crtc_count &&
 189                    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
 190                     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
 191                        mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 192                                clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
 193                else
 194                        mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 195                                clock_info[rdev->pm.requested_clock_mode_index].mclk;
 196
 197                if (mclk > rdev->pm.default_mclk)
 198                        mclk = rdev->pm.default_mclk;
 199
 200                /* upvolt before raising clocks, downvolt after lowering clocks */
 201                if (sclk < rdev->pm.current_sclk)
 202                        misc_after = true;
 203
 204                radeon_sync_with_vblank(rdev);
 205
 206                if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
 207                        if (!radeon_pm_in_vbl(rdev))
 208                                return;
 209                }
 210
 211                radeon_pm_prepare(rdev);
 212
 213                if (!misc_after)
 214                        /* voltage, pcie lanes, etc.*/
 215                        radeon_pm_misc(rdev);
 216
 217                /* set engine clock */
 218                if (sclk != rdev->pm.current_sclk) {
 219                        radeon_pm_debug_check_in_vbl(rdev, false);
 220                        radeon_set_engine_clock(rdev, sclk);
 221                        radeon_pm_debug_check_in_vbl(rdev, true);
 222                        rdev->pm.current_sclk = sclk;
 223                        DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
 224                }
 225
 226                /* set memory clock */
 227                if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
 228                        radeon_pm_debug_check_in_vbl(rdev, false);
 229                        radeon_set_memory_clock(rdev, mclk);
 230                        radeon_pm_debug_check_in_vbl(rdev, true);
 231                        rdev->pm.current_mclk = mclk;
 232                        DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
 233                }
 234
 235                if (misc_after)
 236                        /* voltage, pcie lanes, etc.*/
 237                        radeon_pm_misc(rdev);
 238
 239                radeon_pm_finish(rdev);
 240
 241                rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
 242                rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
 243        } else
 244                DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
 245}
 246
 247static void radeon_pm_set_clocks(struct radeon_device *rdev)
 248{
 249        int i, r;
 250
 251        /* no need to take locks, etc. if nothing's going to change */
 252        if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
 253            (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 254                return;
 255
 256        down_write(&rdev->pm.mclk_lock);
 257        mutex_lock(&rdev->ring_lock);
 258
 259        /* wait for the rings to drain */
 260        for (i = 0; i < RADEON_NUM_RINGS; i++) {
 261                struct radeon_ring *ring = &rdev->ring[i];
 262                if (!ring->ready) {
 263                        continue;
 264                }
 265                r = radeon_fence_wait_empty(rdev, i);
 266                if (r) {
 267                        /* needs a GPU reset dont reset here */
 268                        mutex_unlock(&rdev->ring_lock);
 269                        up_write(&rdev->pm.mclk_lock);
 270                        return;
 271                }
 272        }
 273
 274        radeon_unmap_vram_bos(rdev);
 275
 276        if (rdev->irq.installed) {
 277                for (i = 0; i < rdev->num_crtc; i++) {
 278                        if (rdev->pm.active_crtcs & (1 << i)) {
 279                                rdev->pm.req_vblank |= (1 << i);
 280                                drm_vblank_get(rdev->ddev, i);
 281                        }
 282                }
 283        }
 284
 285        radeon_set_power_state(rdev);
 286
 287        if (rdev->irq.installed) {
 288                for (i = 0; i < rdev->num_crtc; i++) {
 289                        if (rdev->pm.req_vblank & (1 << i)) {
 290                                rdev->pm.req_vblank &= ~(1 << i);
 291                                drm_vblank_put(rdev->ddev, i);
 292                        }
 293                }
 294        }
 295
 296        /* update display watermarks based on new power state */
 297        radeon_update_bandwidth_info(rdev);
 298        if (rdev->pm.active_crtc_count)
 299                radeon_bandwidth_update(rdev);
 300
 301        rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 302
 303        mutex_unlock(&rdev->ring_lock);
 304        up_write(&rdev->pm.mclk_lock);
 305}
 306
 307static void radeon_pm_print_states(struct radeon_device *rdev)
 308{
 309        int i, j;
 310        struct radeon_power_state *power_state;
 311        struct radeon_pm_clock_info *clock_info;
 312
 313        DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
 314        for (i = 0; i < rdev->pm.num_power_states; i++) {
 315                power_state = &rdev->pm.power_state[i];
 316                DRM_DEBUG_DRIVER("State %d: %s\n", i,
 317                        radeon_pm_state_type_name[power_state->type]);
 318                if (i == rdev->pm.default_power_state_index)
 319                        DRM_DEBUG_DRIVER("\tDefault");
 320                if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
 321                        DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
 322                if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
 323                        DRM_DEBUG_DRIVER("\tSingle display only\n");
 324                DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
 325                for (j = 0; j < power_state->num_clock_modes; j++) {
 326                        clock_info = &(power_state->clock_info[j]);
 327                        if (rdev->flags & RADEON_IS_IGP)
 328                                DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
 329                                                 j,
 330                                                 clock_info->sclk * 10);
 331                        else
 332                                DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
 333                                                 j,
 334                                                 clock_info->sclk * 10,
 335                                                 clock_info->mclk * 10,
 336                                                 clock_info->voltage.voltage);
 337                }
 338        }
 339}
 340
 341static ssize_t radeon_get_pm_profile(struct device *dev,
 342                                     struct device_attribute *attr,
 343                                     char *buf)
 344{
 345        struct drm_device *ddev = dev_get_drvdata(dev);
 346        struct radeon_device *rdev = ddev->dev_private;
 347        int cp = rdev->pm.profile;
 348
 349        return snprintf(buf, PAGE_SIZE, "%s\n",
 350                        (cp == PM_PROFILE_AUTO) ? "auto" :
 351                        (cp == PM_PROFILE_LOW) ? "low" :
 352                        (cp == PM_PROFILE_MID) ? "mid" :
 353                        (cp == PM_PROFILE_HIGH) ? "high" : "default");
 354}
 355
 356static ssize_t radeon_set_pm_profile(struct device *dev,
 357                                     struct device_attribute *attr,
 358                                     const char *buf,
 359                                     size_t count)
 360{
 361        struct drm_device *ddev = dev_get_drvdata(dev);
 362        struct radeon_device *rdev = ddev->dev_private;
 363
 364        /* Can't set profile when the card is off */
 365        if  ((rdev->flags & RADEON_IS_PX) &&
 366             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
 367                return -EINVAL;
 368
 369        mutex_lock(&rdev->pm.mutex);
 370        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 371                if (strncmp("default", buf, strlen("default")) == 0)
 372                        rdev->pm.profile = PM_PROFILE_DEFAULT;
 373                else if (strncmp("auto", buf, strlen("auto")) == 0)
 374                        rdev->pm.profile = PM_PROFILE_AUTO;
 375                else if (strncmp("low", buf, strlen("low")) == 0)
 376                        rdev->pm.profile = PM_PROFILE_LOW;
 377                else if (strncmp("mid", buf, strlen("mid")) == 0)
 378                        rdev->pm.profile = PM_PROFILE_MID;
 379                else if (strncmp("high", buf, strlen("high")) == 0)
 380                        rdev->pm.profile = PM_PROFILE_HIGH;
 381                else {
 382                        count = -EINVAL;
 383                        goto fail;
 384                }
 385                radeon_pm_update_profile(rdev);
 386                radeon_pm_set_clocks(rdev);
 387        } else
 388                count = -EINVAL;
 389
 390fail:
 391        mutex_unlock(&rdev->pm.mutex);
 392
 393        return count;
 394}
 395
 396static ssize_t radeon_get_pm_method(struct device *dev,
 397                                    struct device_attribute *attr,
 398                                    char *buf)
 399{
 400        struct drm_device *ddev = dev_get_drvdata(dev);
 401        struct radeon_device *rdev = ddev->dev_private;
 402        int pm = rdev->pm.pm_method;
 403
 404        return snprintf(buf, PAGE_SIZE, "%s\n",
 405                        (pm == PM_METHOD_DYNPM) ? "dynpm" :
 406                        (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
 407}
 408
 409static ssize_t radeon_set_pm_method(struct device *dev,
 410                                    struct device_attribute *attr,
 411                                    const char *buf,
 412                                    size_t count)
 413{
 414        struct drm_device *ddev = dev_get_drvdata(dev);
 415        struct radeon_device *rdev = ddev->dev_private;
 416
 417        /* Can't set method when the card is off */
 418        if  ((rdev->flags & RADEON_IS_PX) &&
 419             (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
 420                count = -EINVAL;
 421                goto fail;
 422        }
 423
 424        /* we don't support the legacy modes with dpm */
 425        if (rdev->pm.pm_method == PM_METHOD_DPM) {
 426                count = -EINVAL;
 427                goto fail;
 428        }
 429
 430        if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
 431                mutex_lock(&rdev->pm.mutex);
 432                rdev->pm.pm_method = PM_METHOD_DYNPM;
 433                rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
 434                rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 435                mutex_unlock(&rdev->pm.mutex);
 436        } else if (strncmp("profile", buf, strlen("profile")) == 0) {
 437                mutex_lock(&rdev->pm.mutex);
 438                /* disable dynpm */
 439                rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 440                rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 441                rdev->pm.pm_method = PM_METHOD_PROFILE;
 442                mutex_unlock(&rdev->pm.mutex);
 443                cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 444        } else {
 445                count = -EINVAL;
 446                goto fail;
 447        }
 448        radeon_pm_compute_clocks(rdev);
 449fail:
 450        return count;
 451}
 452
 453static ssize_t radeon_get_dpm_state(struct device *dev,
 454                                    struct device_attribute *attr,
 455                                    char *buf)
 456{
 457        struct drm_device *ddev = dev_get_drvdata(dev);
 458        struct radeon_device *rdev = ddev->dev_private;
 459        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 460
 461        return snprintf(buf, PAGE_SIZE, "%s\n",
 462                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
 463                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 464}
 465
 466static ssize_t radeon_set_dpm_state(struct device *dev,
 467                                    struct device_attribute *attr,
 468                                    const char *buf,
 469                                    size_t count)
 470{
 471        struct drm_device *ddev = dev_get_drvdata(dev);
 472        struct radeon_device *rdev = ddev->dev_private;
 473
 474        mutex_lock(&rdev->pm.mutex);
 475        if (strncmp("battery", buf, strlen("battery")) == 0)
 476                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
 477        else if (strncmp("balanced", buf, strlen("balanced")) == 0)
 478                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
 479        else if (strncmp("performance", buf, strlen("performance")) == 0)
 480                rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
 481        else {
 482                mutex_unlock(&rdev->pm.mutex);
 483                count = -EINVAL;
 484                goto fail;
 485        }
 486        mutex_unlock(&rdev->pm.mutex);
 487
 488        /* Can't set dpm state when the card is off */
 489        if (!(rdev->flags & RADEON_IS_PX) ||
 490            (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
 491                radeon_pm_compute_clocks(rdev);
 492
 493fail:
 494        return count;
 495}
 496
 497static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
 498                                                       struct device_attribute *attr,
 499                                                       char *buf)
 500{
 501        struct drm_device *ddev = dev_get_drvdata(dev);
 502        struct radeon_device *rdev = ddev->dev_private;
 503        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 504
 505        if  ((rdev->flags & RADEON_IS_PX) &&
 506             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
 507                return snprintf(buf, PAGE_SIZE, "off\n");
 508
 509        return snprintf(buf, PAGE_SIZE, "%s\n",
 510                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 511                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
 512}
 513
 514static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
 515                                                       struct device_attribute *attr,
 516                                                       const char *buf,
 517                                                       size_t count)
 518{
 519        struct drm_device *ddev = dev_get_drvdata(dev);
 520        struct radeon_device *rdev = ddev->dev_private;
 521        enum radeon_dpm_forced_level level;
 522        int ret = 0;
 523
 524        /* Can't force performance level when the card is off */
 525        if  ((rdev->flags & RADEON_IS_PX) &&
 526             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
 527                return -EINVAL;
 528
 529        mutex_lock(&rdev->pm.mutex);
 530        if (strncmp("low", buf, strlen("low")) == 0) {
 531                level = RADEON_DPM_FORCED_LEVEL_LOW;
 532        } else if (strncmp("high", buf, strlen("high")) == 0) {
 533                level = RADEON_DPM_FORCED_LEVEL_HIGH;
 534        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
 535                level = RADEON_DPM_FORCED_LEVEL_AUTO;
 536        } else {
 537                count = -EINVAL;
 538                goto fail;
 539        }
 540        if (rdev->asic->dpm.force_performance_level) {
 541                if (rdev->pm.dpm.thermal_active) {
 542                        count = -EINVAL;
 543                        goto fail;
 544                }
 545                ret = radeon_dpm_force_performance_level(rdev, level);
 546                if (ret)
 547                        count = -EINVAL;
 548        }
 549fail:
 550        mutex_unlock(&rdev->pm.mutex);
 551
 552        return count;
 553}
 554
 555static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
 556                                            struct device_attribute *attr,
 557                                            char *buf)
 558{
 559        struct radeon_device *rdev = dev_get_drvdata(dev);
 560        u32 pwm_mode = 0;
 561
 562        if (rdev->asic->dpm.fan_ctrl_get_mode)
 563                pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
 564
 565        /* never 0 (full-speed), fuse or smc-controlled always */
 566        return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
 567}
 568
 569static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
 570                                            struct device_attribute *attr,
 571                                            const char *buf,
 572                                            size_t count)
 573{
 574        struct radeon_device *rdev = dev_get_drvdata(dev);
 575        int err;
 576        int value;
 577
 578        if(!rdev->asic->dpm.fan_ctrl_set_mode)
 579                return -EINVAL;
 580
 581        err = kstrtoint(buf, 10, &value);
 582        if (err)
 583                return err;
 584
 585        switch (value) {
 586        case 1: /* manual, percent-based */
 587                rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
 588                break;
 589        default: /* disable */
 590                rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
 591                break;
 592        }
 593
 594        return count;
 595}
 596
 597static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
 598                                         struct device_attribute *attr,
 599                                         char *buf)
 600{
 601        return sprintf(buf, "%i\n", 0);
 602}
 603
 604static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
 605                                         struct device_attribute *attr,
 606                                         char *buf)
 607{
 608        return sprintf(buf, "%i\n", 255);
 609}
 610
 611static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
 612                                     struct device_attribute *attr,
 613                                     const char *buf, size_t count)
 614{
 615        struct radeon_device *rdev = dev_get_drvdata(dev);
 616        int err;
 617        u32 value;
 618
 619        err = kstrtou32(buf, 10, &value);
 620        if (err)
 621                return err;
 622
 623        value = (value * 100) / 255;
 624
 625        err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
 626        if (err)
 627                return err;
 628
 629        return count;
 630}
 631
 632static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
 633                                     struct device_attribute *attr,
 634                                     char *buf)
 635{
 636        struct radeon_device *rdev = dev_get_drvdata(dev);
 637        int err;
 638        u32 speed;
 639
 640        err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
 641        if (err)
 642                return err;
 643
 644        speed = (speed * 255) / 100;
 645
 646        return sprintf(buf, "%i\n", speed);
 647}
 648
 649static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
 650static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
 651static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
 652static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
 653                   radeon_get_dpm_forced_performance_level,
 654                   radeon_set_dpm_forced_performance_level);
 655
 656static ssize_t radeon_hwmon_show_temp(struct device *dev,
 657                                      struct device_attribute *attr,
 658                                      char *buf)
 659{
 660        struct radeon_device *rdev = dev_get_drvdata(dev);
 661        struct drm_device *ddev = rdev->ddev;
 662        int temp;
 663
 664        /* Can't get temperature when the card is off */
 665        if  ((rdev->flags & RADEON_IS_PX) &&
 666             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
 667                return -EINVAL;
 668
 669        if (rdev->asic->pm.get_temperature)
 670                temp = radeon_get_temperature(rdev);
 671        else
 672                temp = 0;
 673
 674        return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 675}
 676
 677static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
 678                                             struct device_attribute *attr,
 679                                             char *buf)
 680{
 681        struct radeon_device *rdev = dev_get_drvdata(dev);
 682        int hyst = to_sensor_dev_attr(attr)->index;
 683        int temp;
 684
 685        if (hyst)
 686                temp = rdev->pm.dpm.thermal.min_temp;
 687        else
 688                temp = rdev->pm.dpm.thermal.max_temp;
 689
 690        return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 691}
 692
 693static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
 694static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
 695static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
 696static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
 697static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
 698static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
 699static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
 700
 701
 702static struct attribute *hwmon_attributes[] = {
 703        &sensor_dev_attr_temp1_input.dev_attr.attr,
 704        &sensor_dev_attr_temp1_crit.dev_attr.attr,
 705        &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
 706        &sensor_dev_attr_pwm1.dev_attr.attr,
 707        &sensor_dev_attr_pwm1_enable.dev_attr.attr,
 708        &sensor_dev_attr_pwm1_min.dev_attr.attr,
 709        &sensor_dev_attr_pwm1_max.dev_attr.attr,
 710        NULL
 711};
 712
 713static umode_t hwmon_attributes_visible(struct kobject *kobj,
 714                                        struct attribute *attr, int index)
 715{
 716        struct device *dev = container_of(kobj, struct device, kobj);
 717        struct radeon_device *rdev = dev_get_drvdata(dev);
 718        umode_t effective_mode = attr->mode;
 719
 720        /* Skip attributes if DPM is not enabled */
 721        if (rdev->pm.pm_method != PM_METHOD_DPM &&
 722            (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
 723             attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
 724             attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
 725             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
 726             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
 727             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
 728                return 0;
 729
 730        /* Skip fan attributes if fan is not present */
 731        if (rdev->pm.no_fan &&
 732            (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
 733             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
 734             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
 735             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
 736                return 0;
 737
 738        /* mask fan attributes if we have no bindings for this asic to expose */
 739        if ((!rdev->asic->dpm.get_fan_speed_percent &&
 740             attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
 741            (!rdev->asic->dpm.fan_ctrl_get_mode &&
 742             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
 743                effective_mode &= ~S_IRUGO;
 744
 745        if ((!rdev->asic->dpm.set_fan_speed_percent &&
 746             attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
 747            (!rdev->asic->dpm.fan_ctrl_set_mode &&
 748             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
 749                effective_mode &= ~S_IWUSR;
 750
 751        /* hide max/min values if we can't both query and manage the fan */
 752        if ((!rdev->asic->dpm.set_fan_speed_percent &&
 753             !rdev->asic->dpm.get_fan_speed_percent) &&
 754            (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
 755             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
 756                return 0;
 757
 758        return effective_mode;
 759}
 760
 761static const struct attribute_group hwmon_attrgroup = {
 762        .attrs = hwmon_attributes,
 763        .is_visible = hwmon_attributes_visible,
 764};
 765
 766static const struct attribute_group *hwmon_groups[] = {
 767        &hwmon_attrgroup,
 768        NULL
 769};
 770
 771static int radeon_hwmon_init(struct radeon_device *rdev)
 772{
 773        int err = 0;
 774
 775        switch (rdev->pm.int_thermal_type) {
 776        case THERMAL_TYPE_RV6XX:
 777        case THERMAL_TYPE_RV770:
 778        case THERMAL_TYPE_EVERGREEN:
 779        case THERMAL_TYPE_NI:
 780        case THERMAL_TYPE_SUMO:
 781        case THERMAL_TYPE_SI:
 782        case THERMAL_TYPE_CI:
 783        case THERMAL_TYPE_KV:
 784                if (rdev->asic->pm.get_temperature == NULL)
 785                        return err;
 786                rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
 787                                                                           "radeon", rdev,
 788                                                                           hwmon_groups);
 789                if (IS_ERR(rdev->pm.int_hwmon_dev)) {
 790                        err = PTR_ERR(rdev->pm.int_hwmon_dev);
 791                        dev_err(rdev->dev,
 792                                "Unable to register hwmon device: %d\n", err);
 793                }
 794                break;
 795        default:
 796                break;
 797        }
 798
 799        return err;
 800}
 801
 802static void radeon_hwmon_fini(struct radeon_device *rdev)
 803{
 804        if (rdev->pm.int_hwmon_dev)
 805                hwmon_device_unregister(rdev->pm.int_hwmon_dev);
 806}
 807
 808static void radeon_dpm_thermal_work_handler(struct work_struct *work)
 809{
 810        struct radeon_device *rdev =
 811                container_of(work, struct radeon_device,
 812                             pm.dpm.thermal.work);
 813        /* switch to the thermal state */
 814        enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
 815
 816        if (!rdev->pm.dpm_enabled)
 817                return;
 818
 819        if (rdev->asic->pm.get_temperature) {
 820                int temp = radeon_get_temperature(rdev);
 821
 822                if (temp < rdev->pm.dpm.thermal.min_temp)
 823                        /* switch back the user state */
 824                        dpm_state = rdev->pm.dpm.user_state;
 825        } else {
 826                if (rdev->pm.dpm.thermal.high_to_low)
 827                        /* switch back the user state */
 828                        dpm_state = rdev->pm.dpm.user_state;
 829        }
 830        mutex_lock(&rdev->pm.mutex);
 831        if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
 832                rdev->pm.dpm.thermal_active = true;
 833        else
 834                rdev->pm.dpm.thermal_active = false;
 835        rdev->pm.dpm.state = dpm_state;
 836        mutex_unlock(&rdev->pm.mutex);
 837
 838        radeon_pm_compute_clocks(rdev);
 839}
 840
 841static bool radeon_dpm_single_display(struct radeon_device *rdev)
 842{
 843        bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
 844                true : false;
 845
 846        /* check if the vblank period is too short to adjust the mclk */
 847        if (single_display && rdev->asic->dpm.vblank_too_short) {
 848                if (radeon_dpm_vblank_too_short(rdev))
 849                        single_display = false;
 850        }
 851
 852        /* 120hz tends to be problematic even if they are under the
 853         * vblank limit.
 854         */
 855        if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
 856                single_display = false;
 857
 858        return single_display;
 859}
 860
 861static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
 862                                                     enum radeon_pm_state_type dpm_state)
 863{
 864        int i;
 865        struct radeon_ps *ps;
 866        u32 ui_class;
 867        bool single_display = radeon_dpm_single_display(rdev);
 868
 869        /* certain older asics have a separare 3D performance state,
 870         * so try that first if the user selected performance
 871         */
 872        if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
 873                dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
 874        /* balanced states don't exist at the moment */
 875        if (dpm_state == POWER_STATE_TYPE_BALANCED)
 876                dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 877
 878restart_search:
 879        /* Pick the best power state based on current conditions */
 880        for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
 881                ps = &rdev->pm.dpm.ps[i];
 882                ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
 883                switch (dpm_state) {
 884                /* user states */
 885                case POWER_STATE_TYPE_BATTERY:
 886                        if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
 887                                if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
 888                                        if (single_display)
 889                                                return ps;
 890                                } else
 891                                        return ps;
 892                        }
 893                        break;
 894                case POWER_STATE_TYPE_BALANCED:
 895                        if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
 896                                if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
 897                                        if (single_display)
 898                                                return ps;
 899                                } else
 900                                        return ps;
 901                        }
 902                        break;
 903                case POWER_STATE_TYPE_PERFORMANCE:
 904                        if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
 905                                if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
 906                                        if (single_display)
 907                                                return ps;
 908                                } else
 909                                        return ps;
 910                        }
 911                        break;
 912                /* internal states */
 913                case POWER_STATE_TYPE_INTERNAL_UVD:
 914                        if (rdev->pm.dpm.uvd_ps)
 915                                return rdev->pm.dpm.uvd_ps;
 916                        else
 917                                break;
 918                case POWER_STATE_TYPE_INTERNAL_UVD_SD:
 919                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
 920                                return ps;
 921                        break;
 922                case POWER_STATE_TYPE_INTERNAL_UVD_HD:
 923                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
 924                                return ps;
 925                        break;
 926                case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
 927                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
 928                                return ps;
 929                        break;
 930                case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
 931                        if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
 932                                return ps;
 933                        break;
 934                case POWER_STATE_TYPE_INTERNAL_BOOT:
 935                        return rdev->pm.dpm.boot_ps;
 936                case POWER_STATE_TYPE_INTERNAL_THERMAL:
 937                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
 938                                return ps;
 939                        break;
 940                case POWER_STATE_TYPE_INTERNAL_ACPI:
 941                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
 942                                return ps;
 943                        break;
 944                case POWER_STATE_TYPE_INTERNAL_ULV:
 945                        if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
 946                                return ps;
 947                        break;
 948                case POWER_STATE_TYPE_INTERNAL_3DPERF:
 949                        if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
 950                                return ps;
 951                        break;
 952                default:
 953                        break;
 954                }
 955        }
 956        /* use a fallback state if we didn't match */
 957        switch (dpm_state) {
 958        case POWER_STATE_TYPE_INTERNAL_UVD_SD:
 959                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
 960                goto restart_search;
 961        case POWER_STATE_TYPE_INTERNAL_UVD_HD:
 962        case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
 963        case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
 964                if (rdev->pm.dpm.uvd_ps) {
 965                        return rdev->pm.dpm.uvd_ps;
 966                } else {
 967                        dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 968                        goto restart_search;
 969                }
 970        case POWER_STATE_TYPE_INTERNAL_THERMAL:
 971                dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
 972                goto restart_search;
 973        case POWER_STATE_TYPE_INTERNAL_ACPI:
 974                dpm_state = POWER_STATE_TYPE_BATTERY;
 975                goto restart_search;
 976        case POWER_STATE_TYPE_BATTERY:
 977        case POWER_STATE_TYPE_BALANCED:
 978        case POWER_STATE_TYPE_INTERNAL_3DPERF:
 979                dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 980                goto restart_search;
 981        default:
 982                break;
 983        }
 984
 985        return NULL;
 986}
 987
 988static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
 989{
 990        int i;
 991        struct radeon_ps *ps;
 992        enum radeon_pm_state_type dpm_state;
 993        int ret;
 994        bool single_display = radeon_dpm_single_display(rdev);
 995
 996        /* if dpm init failed */
 997        if (!rdev->pm.dpm_enabled)
 998                return;
 999
1000        if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
1001                /* add other state override checks here */
1002                if ((!rdev->pm.dpm.thermal_active) &&
1003                    (!rdev->pm.dpm.uvd_active))
1004                        rdev->pm.dpm.state = rdev->pm.dpm.user_state;
1005        }
1006        dpm_state = rdev->pm.dpm.state;
1007
1008        ps = radeon_dpm_pick_power_state(rdev, dpm_state);
1009        if (ps)
1010                rdev->pm.dpm.requested_ps = ps;
1011        else
1012                return;
1013
1014        /* no need to reprogram if nothing changed unless we are on BTC+ */
1015        if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
1016                /* vce just modifies an existing state so force a change */
1017                if (ps->vce_active != rdev->pm.dpm.vce_active)
1018                        goto force;
1019                /* user has made a display change (such as timing) */
1020                if (rdev->pm.dpm.single_display != single_display)
1021                        goto force;
1022                if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1023                        /* for pre-BTC and APUs if the num crtcs changed but state is the same,
1024                         * all we need to do is update the display configuration.
1025                         */
1026                        if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
1027                                /* update display watermarks based on new power state */
1028                                radeon_bandwidth_update(rdev);
1029                                /* update displays */
1030                                radeon_dpm_display_configuration_changed(rdev);
1031                                rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1032                                rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1033                        }
1034                        return;
1035                } else {
1036                        /* for BTC+ if the num crtcs hasn't changed and state is the same,
1037                         * nothing to do, if the num crtcs is > 1 and state is the same,
1038                         * update display configuration.
1039                         */
1040                        if (rdev->pm.dpm.new_active_crtcs ==
1041                            rdev->pm.dpm.current_active_crtcs) {
1042                                return;
1043                        } else {
1044                                if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
1045                                    (rdev->pm.dpm.new_active_crtc_count > 1)) {
1046                                        /* update display watermarks based on new power state */
1047                                        radeon_bandwidth_update(rdev);
1048                                        /* update displays */
1049                                        radeon_dpm_display_configuration_changed(rdev);
1050                                        rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1051                                        rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1052                                        return;
1053                                }
1054                        }
1055                }
1056        }
1057
1058force:
1059        if (radeon_dpm == 1) {
1060                printk("switching from power state:\n");
1061                radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
1062                printk("switching to power state:\n");
1063                radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
1064        }
1065
1066        down_write(&rdev->pm.mclk_lock);
1067        mutex_lock(&rdev->ring_lock);
1068
1069        /* update whether vce is active */
1070        ps->vce_active = rdev->pm.dpm.vce_active;
1071
1072        ret = radeon_dpm_pre_set_power_state(rdev);
1073        if (ret)
1074                goto done;
1075
1076        /* update display watermarks based on new power state */
1077        radeon_bandwidth_update(rdev);
1078        /* update displays */
1079        radeon_dpm_display_configuration_changed(rdev);
1080
1081        rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1082        rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1083        rdev->pm.dpm.single_display = single_display;
1084
1085        /* wait for the rings to drain */
1086        for (i = 0; i < RADEON_NUM_RINGS; i++) {
1087                struct radeon_ring *ring = &rdev->ring[i];
1088                if (ring->ready)
1089                        radeon_fence_wait_empty(rdev, i);
1090        }
1091
1092        /* program the new power state */
1093        radeon_dpm_set_power_state(rdev);
1094
1095        /* update current power state */
1096        rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1097
1098        radeon_dpm_post_set_power_state(rdev);
1099
1100        if (rdev->asic->dpm.force_performance_level) {
1101                if (rdev->pm.dpm.thermal_active) {
1102                        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1103                        /* force low perf level for thermal */
1104                        radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1105                        /* save the user's level */
1106                        rdev->pm.dpm.forced_level = level;
1107                } else {
1108                        /* otherwise, user selected level */
1109                        radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1110                }
1111        }
1112
1113done:
1114        mutex_unlock(&rdev->ring_lock);
1115        up_write(&rdev->pm.mclk_lock);
1116}
1117
1118void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1119{
1120        enum radeon_pm_state_type dpm_state;
1121
1122        if (rdev->asic->dpm.powergate_uvd) {
1123                mutex_lock(&rdev->pm.mutex);
1124                /* don't powergate anything if we
1125                   have active but pause streams */
1126                enable |= rdev->pm.dpm.sd > 0;
1127                enable |= rdev->pm.dpm.hd > 0;
1128                /* enable/disable UVD */
1129                radeon_dpm_powergate_uvd(rdev, !enable);
1130                mutex_unlock(&rdev->pm.mutex);
1131        } else {
1132                if (enable) {
1133                        mutex_lock(&rdev->pm.mutex);
1134                        rdev->pm.dpm.uvd_active = true;
1135                        /* disable this for now */
1136#if 0
1137                        if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1138                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1139                        else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1140                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1141                        else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1142                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1143                        else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1144                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1145                        else
1146#endif
1147                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1148                        rdev->pm.dpm.state = dpm_state;
1149                        mutex_unlock(&rdev->pm.mutex);
1150                } else {
1151                        mutex_lock(&rdev->pm.mutex);
1152                        rdev->pm.dpm.uvd_active = false;
1153                        mutex_unlock(&rdev->pm.mutex);
1154                }
1155
1156                radeon_pm_compute_clocks(rdev);
1157        }
1158}
1159
1160void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1161{
1162        if (enable) {
1163                mutex_lock(&rdev->pm.mutex);
1164                rdev->pm.dpm.vce_active = true;
1165                /* XXX select vce level based on ring/task */
1166                rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1167                mutex_unlock(&rdev->pm.mutex);
1168        } else {
1169                mutex_lock(&rdev->pm.mutex);
1170                rdev->pm.dpm.vce_active = false;
1171                mutex_unlock(&rdev->pm.mutex);
1172        }
1173
1174        radeon_pm_compute_clocks(rdev);
1175}
1176
1177static void radeon_pm_suspend_old(struct radeon_device *rdev)
1178{
1179        mutex_lock(&rdev->pm.mutex);
1180        if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1181                if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1182                        rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1183        }
1184        mutex_unlock(&rdev->pm.mutex);
1185
1186        cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1187}
1188
1189static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1190{
1191        mutex_lock(&rdev->pm.mutex);
1192        /* disable dpm */
1193        radeon_dpm_disable(rdev);
1194        /* reset the power state */
1195        rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1196        rdev->pm.dpm_enabled = false;
1197        mutex_unlock(&rdev->pm.mutex);
1198}
1199
1200void radeon_pm_suspend(struct radeon_device *rdev)
1201{
1202        if (rdev->pm.pm_method == PM_METHOD_DPM)
1203                radeon_pm_suspend_dpm(rdev);
1204        else
1205                radeon_pm_suspend_old(rdev);
1206}
1207
1208static void radeon_pm_resume_old(struct radeon_device *rdev)
1209{
1210        /* set up the default clocks if the MC ucode is loaded */
1211        if ((rdev->family >= CHIP_BARTS) &&
1212            (rdev->family <= CHIP_CAYMAN) &&
1213            rdev->mc_fw) {
1214                if (rdev->pm.default_vddc)
1215                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1216                                                SET_VOLTAGE_TYPE_ASIC_VDDC);
1217                if (rdev->pm.default_vddci)
1218                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1219                                                SET_VOLTAGE_TYPE_ASIC_VDDCI);
1220                if (rdev->pm.default_sclk)
1221                        radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1222                if (rdev->pm.default_mclk)
1223                        radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1224        }
1225        /* asic init will reset the default power state */
1226        mutex_lock(&rdev->pm.mutex);
1227        rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1228        rdev->pm.current_clock_mode_index = 0;
1229        rdev->pm.current_sclk = rdev->pm.default_sclk;
1230        rdev->pm.current_mclk = rdev->pm.default_mclk;
1231        if (rdev->pm.power_state) {
1232                rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1233                rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1234        }
1235        if (rdev->pm.pm_method == PM_METHOD_DYNPM
1236            && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1237                rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1238                schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1239                                      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1240        }
1241        mutex_unlock(&rdev->pm.mutex);
1242        radeon_pm_compute_clocks(rdev);
1243}
1244
1245static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1246{
1247        int ret;
1248
1249        /* asic init will reset to the boot state */
1250        mutex_lock(&rdev->pm.mutex);
1251        rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1252        radeon_dpm_setup_asic(rdev);
1253        ret = radeon_dpm_enable(rdev);
1254        mutex_unlock(&rdev->pm.mutex);
1255        if (ret)
1256                goto dpm_resume_fail;
1257        rdev->pm.dpm_enabled = true;
1258        return;
1259
1260dpm_resume_fail:
1261        DRM_ERROR("radeon: dpm resume failed\n");
1262        if ((rdev->family >= CHIP_BARTS) &&
1263            (rdev->family <= CHIP_CAYMAN) &&
1264            rdev->mc_fw) {
1265                if (rdev->pm.default_vddc)
1266                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1267                                                SET_VOLTAGE_TYPE_ASIC_VDDC);
1268                if (rdev->pm.default_vddci)
1269                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1270                                                SET_VOLTAGE_TYPE_ASIC_VDDCI);
1271                if (rdev->pm.default_sclk)
1272                        radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1273                if (rdev->pm.default_mclk)
1274                        radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1275        }
1276}
1277
1278void radeon_pm_resume(struct radeon_device *rdev)
1279{
1280        if (rdev->pm.pm_method == PM_METHOD_DPM)
1281                radeon_pm_resume_dpm(rdev);
1282        else
1283                radeon_pm_resume_old(rdev);
1284}
1285
1286static int radeon_pm_init_old(struct radeon_device *rdev)
1287{
1288        int ret;
1289
1290        rdev->pm.profile = PM_PROFILE_DEFAULT;
1291        rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1292        rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1293        rdev->pm.dynpm_can_upclock = true;
1294        rdev->pm.dynpm_can_downclock = true;
1295        rdev->pm.default_sclk = rdev->clock.default_sclk;
1296        rdev->pm.default_mclk = rdev->clock.default_mclk;
1297        rdev->pm.current_sclk = rdev->clock.default_sclk;
1298        rdev->pm.current_mclk = rdev->clock.default_mclk;
1299        rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1300
1301        if (rdev->bios) {
1302                if (rdev->is_atom_bios)
1303                        radeon_atombios_get_power_modes(rdev);
1304                else
1305                        radeon_combios_get_power_modes(rdev);
1306                radeon_pm_print_states(rdev);
1307                radeon_pm_init_profile(rdev);
1308                /* set up the default clocks if the MC ucode is loaded */
1309                if ((rdev->family >= CHIP_BARTS) &&
1310                    (rdev->family <= CHIP_CAYMAN) &&
1311                    rdev->mc_fw) {
1312                        if (rdev->pm.default_vddc)
1313                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1314                                                        SET_VOLTAGE_TYPE_ASIC_VDDC);
1315                        if (rdev->pm.default_vddci)
1316                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1317                                                        SET_VOLTAGE_TYPE_ASIC_VDDCI);
1318                        if (rdev->pm.default_sclk)
1319                                radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1320                        if (rdev->pm.default_mclk)
1321                                radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1322                }
1323        }
1324
1325        /* set up the internal thermal sensor if applicable */
1326        ret = radeon_hwmon_init(rdev);
1327        if (ret)
1328                return ret;
1329
1330        INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1331
1332        if (rdev->pm.num_power_states > 1) {
1333                if (radeon_debugfs_pm_init(rdev)) {
1334                        DRM_ERROR("Failed to register debugfs file for PM!\n");
1335                }
1336
1337                DRM_INFO("radeon: power management initialized\n");
1338        }
1339
1340        return 0;
1341}
1342
1343static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1344{
1345        int i;
1346
1347        for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1348                printk("== power state %d ==\n", i);
1349                radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1350        }
1351}
1352
1353static int radeon_pm_init_dpm(struct radeon_device *rdev)
1354{
1355        int ret;
1356
1357        /* default to balanced state */
1358        rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1359        rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1360        rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1361        rdev->pm.default_sclk = rdev->clock.default_sclk;
1362        rdev->pm.default_mclk = rdev->clock.default_mclk;
1363        rdev->pm.current_sclk = rdev->clock.default_sclk;
1364        rdev->pm.current_mclk = rdev->clock.default_mclk;
1365        rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1366
1367        if (rdev->bios && rdev->is_atom_bios)
1368                radeon_atombios_get_power_modes(rdev);
1369        else
1370                return -EINVAL;
1371
1372        /* set up the internal thermal sensor if applicable */
1373        ret = radeon_hwmon_init(rdev);
1374        if (ret)
1375                return ret;
1376
1377        INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1378        mutex_lock(&rdev->pm.mutex);
1379        radeon_dpm_init(rdev);
1380        rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1381        if (radeon_dpm == 1)
1382                radeon_dpm_print_power_states(rdev);
1383        radeon_dpm_setup_asic(rdev);
1384        ret = radeon_dpm_enable(rdev);
1385        mutex_unlock(&rdev->pm.mutex);
1386        if (ret)
1387                goto dpm_failed;
1388        rdev->pm.dpm_enabled = true;
1389
1390        if (radeon_debugfs_pm_init(rdev)) {
1391                DRM_ERROR("Failed to register debugfs file for dpm!\n");
1392        }
1393
1394        DRM_INFO("radeon: dpm initialized\n");
1395
1396        return 0;
1397
1398dpm_failed:
1399        rdev->pm.dpm_enabled = false;
1400        if ((rdev->family >= CHIP_BARTS) &&
1401            (rdev->family <= CHIP_CAYMAN) &&
1402            rdev->mc_fw) {
1403                if (rdev->pm.default_vddc)
1404                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1405                                                SET_VOLTAGE_TYPE_ASIC_VDDC);
1406                if (rdev->pm.default_vddci)
1407                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1408                                                SET_VOLTAGE_TYPE_ASIC_VDDCI);
1409                if (rdev->pm.default_sclk)
1410                        radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1411                if (rdev->pm.default_mclk)
1412                        radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1413        }
1414        DRM_ERROR("radeon: dpm initialization failed\n");
1415        return ret;
1416}
1417
1418struct radeon_dpm_quirk {
1419        u32 chip_vendor;
1420        u32 chip_device;
1421        u32 subsys_vendor;
1422        u32 subsys_device;
1423};
1424
1425/* cards with dpm stability problems */
1426static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1427        /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1428        { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1429        /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1430        { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1431        { 0, 0, 0, 0 },
1432};
1433
1434int radeon_pm_init(struct radeon_device *rdev)
1435{
1436        struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1437        bool disable_dpm = false;
1438
1439        /* Apply dpm quirks */
1440        while (p && p->chip_device != 0) {
1441                if (rdev->pdev->vendor == p->chip_vendor &&
1442                    rdev->pdev->device == p->chip_device &&
1443                    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1444                    rdev->pdev->subsystem_device == p->subsys_device) {
1445                        disable_dpm = true;
1446                        break;
1447                }
1448                ++p;
1449        }
1450
1451        /* enable dpm on rv6xx+ */
1452        switch (rdev->family) {
1453        case CHIP_RV610:
1454        case CHIP_RV630:
1455        case CHIP_RV620:
1456        case CHIP_RV635:
1457        case CHIP_RV670:
1458        case CHIP_RS780:
1459        case CHIP_RS880:
1460        case CHIP_RV770:
1461                /* DPM requires the RLC, RV770+ dGPU requires SMC */
1462                if (!rdev->rlc_fw)
1463                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1464                else if ((rdev->family >= CHIP_RV770) &&
1465                         (!(rdev->flags & RADEON_IS_IGP)) &&
1466                         (!rdev->smc_fw))
1467                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1468                else if (radeon_dpm == 1)
1469                        rdev->pm.pm_method = PM_METHOD_DPM;
1470                else
1471                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1472                break;
1473        case CHIP_RV730:
1474        case CHIP_RV710:
1475        case CHIP_RV740:
1476        case CHIP_CEDAR:
1477        case CHIP_REDWOOD:
1478        case CHIP_JUNIPER:
1479        case CHIP_CYPRESS:
1480        case CHIP_HEMLOCK:
1481        case CHIP_PALM:
1482        case CHIP_SUMO:
1483        case CHIP_SUMO2:
1484        case CHIP_BARTS:
1485        case CHIP_TURKS:
1486        case CHIP_CAICOS:
1487        case CHIP_CAYMAN:
1488        case CHIP_ARUBA:
1489        case CHIP_TAHITI:
1490        case CHIP_PITCAIRN:
1491        case CHIP_VERDE:
1492        case CHIP_OLAND:
1493        case CHIP_HAINAN:
1494        case CHIP_BONAIRE:
1495        case CHIP_KABINI:
1496        case CHIP_KAVERI:
1497        case CHIP_HAWAII:
1498        case CHIP_MULLINS:
1499                /* DPM requires the RLC, RV770+ dGPU requires SMC */
1500                if (!rdev->rlc_fw)
1501                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1502                else if ((rdev->family >= CHIP_RV770) &&
1503                         (!(rdev->flags & RADEON_IS_IGP)) &&
1504                         (!rdev->smc_fw))
1505                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1506                else if (disable_dpm && (radeon_dpm == -1))
1507                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1508                else if (radeon_dpm == 0)
1509                        rdev->pm.pm_method = PM_METHOD_PROFILE;
1510                else
1511                        rdev->pm.pm_method = PM_METHOD_DPM;
1512                break;
1513        default:
1514                /* default to profile method */
1515                rdev->pm.pm_method = PM_METHOD_PROFILE;
1516                break;
1517        }
1518
1519        if (rdev->pm.pm_method == PM_METHOD_DPM)
1520                return radeon_pm_init_dpm(rdev);
1521        else
1522                return radeon_pm_init_old(rdev);
1523}
1524
1525int radeon_pm_late_init(struct radeon_device *rdev)
1526{
1527        int ret = 0;
1528
1529        if (rdev->pm.pm_method == PM_METHOD_DPM) {
1530                if (rdev->pm.dpm_enabled) {
1531                        if (!rdev->pm.sysfs_initialized) {
1532                                ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1533                                if (ret)
1534                                        DRM_ERROR("failed to create device file for dpm state\n");
1535                                ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1536                                if (ret)
1537                                        DRM_ERROR("failed to create device file for dpm state\n");
1538                                /* XXX: these are noops for dpm but are here for backwards compat */
1539                                ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1540                                if (ret)
1541                                        DRM_ERROR("failed to create device file for power profile\n");
1542                                ret = device_create_file(rdev->dev, &dev_attr_power_method);
1543                                if (ret)
1544                                        DRM_ERROR("failed to create device file for power method\n");
1545                                if (!ret)
1546                                        rdev->pm.sysfs_initialized = true;
1547                        }
1548
1549                        mutex_lock(&rdev->pm.mutex);
1550                        ret = radeon_dpm_late_enable(rdev);
1551                        mutex_unlock(&rdev->pm.mutex);
1552                        if (ret) {
1553                                rdev->pm.dpm_enabled = false;
1554                                DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1555                        } else {
1556                                /* set the dpm state for PX since there won't be
1557                                 * a modeset to call this.
1558                                 */
1559                                radeon_pm_compute_clocks(rdev);
1560                        }
1561                }
1562        } else {
1563                if ((rdev->pm.num_power_states > 1) &&
1564                    (!rdev->pm.sysfs_initialized)) {
1565                        /* where's the best place to put these? */
1566                        ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1567                        if (ret)
1568                                DRM_ERROR("failed to create device file for power profile\n");
1569                        ret = device_create_file(rdev->dev, &dev_attr_power_method);
1570                        if (ret)
1571                                DRM_ERROR("failed to create device file for power method\n");
1572                        if (!ret)
1573                                rdev->pm.sysfs_initialized = true;
1574                }
1575        }
1576        return ret;
1577}
1578
1579static void radeon_pm_fini_old(struct radeon_device *rdev)
1580{
1581        if (rdev->pm.num_power_states > 1) {
1582                mutex_lock(&rdev->pm.mutex);
1583                if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1584                        rdev->pm.profile = PM_PROFILE_DEFAULT;
1585                        radeon_pm_update_profile(rdev);
1586                        radeon_pm_set_clocks(rdev);
1587                } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1588                        /* reset default clocks */
1589                        rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1590                        rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1591                        radeon_pm_set_clocks(rdev);
1592                }
1593                mutex_unlock(&rdev->pm.mutex);
1594
1595                cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1596
1597                device_remove_file(rdev->dev, &dev_attr_power_profile);
1598                device_remove_file(rdev->dev, &dev_attr_power_method);
1599        }
1600
1601        radeon_hwmon_fini(rdev);
1602        kfree(rdev->pm.power_state);
1603}
1604
1605static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1606{
1607        if (rdev->pm.num_power_states > 1) {
1608                mutex_lock(&rdev->pm.mutex);
1609                radeon_dpm_disable(rdev);
1610                mutex_unlock(&rdev->pm.mutex);
1611
1612                device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1613                device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1614                /* XXX backwards compat */
1615                device_remove_file(rdev->dev, &dev_attr_power_profile);
1616                device_remove_file(rdev->dev, &dev_attr_power_method);
1617        }
1618        radeon_dpm_fini(rdev);
1619
1620        radeon_hwmon_fini(rdev);
1621        kfree(rdev->pm.power_state);
1622}
1623
1624void radeon_pm_fini(struct radeon_device *rdev)
1625{
1626        if (rdev->pm.pm_method == PM_METHOD_DPM)
1627                radeon_pm_fini_dpm(rdev);
1628        else
1629                radeon_pm_fini_old(rdev);
1630}
1631
1632static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1633{
1634        struct drm_device *ddev = rdev->ddev;
1635        struct drm_crtc *crtc;
1636        struct radeon_crtc *radeon_crtc;
1637
1638        if (rdev->pm.num_power_states < 2)
1639                return;
1640
1641        mutex_lock(&rdev->pm.mutex);
1642
1643        rdev->pm.active_crtcs = 0;
1644        rdev->pm.active_crtc_count = 0;
1645        if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1646                list_for_each_entry(crtc,
1647                                    &ddev->mode_config.crtc_list, head) {
1648                        radeon_crtc = to_radeon_crtc(crtc);
1649                        if (radeon_crtc->enabled) {
1650                                rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1651                                rdev->pm.active_crtc_count++;
1652                        }
1653                }
1654        }
1655
1656        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1657                radeon_pm_update_profile(rdev);
1658                radeon_pm_set_clocks(rdev);
1659        } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1660                if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1661                        if (rdev->pm.active_crtc_count > 1) {
1662                                if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1663                                        cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1664
1665                                        rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1666                                        rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1667                                        radeon_pm_get_dynpm_state(rdev);
1668                                        radeon_pm_set_clocks(rdev);
1669
1670                                        DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1671                                }
1672                        } else if (rdev->pm.active_crtc_count == 1) {
1673                                /* TODO: Increase clocks if needed for current mode */
1674
1675                                if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1676                                        rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1677                                        rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1678                                        radeon_pm_get_dynpm_state(rdev);
1679                                        radeon_pm_set_clocks(rdev);
1680
1681                                        schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1682                                                              msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1683                                } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1684                                        rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1685                                        schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1686                                                              msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1687                                        DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1688                                }
1689                        } else { /* count == 0 */
1690                                if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1691                                        cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1692
1693                                        rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1694                                        rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1695                                        radeon_pm_get_dynpm_state(rdev);
1696                                        radeon_pm_set_clocks(rdev);
1697                                }
1698                        }
1699                }
1700        }
1701
1702        mutex_unlock(&rdev->pm.mutex);
1703}
1704
1705static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1706{
1707        struct drm_device *ddev = rdev->ddev;
1708        struct drm_crtc *crtc;
1709        struct radeon_crtc *radeon_crtc;
1710
1711        if (!rdev->pm.dpm_enabled)
1712                return;
1713
1714        mutex_lock(&rdev->pm.mutex);
1715
1716        /* update active crtc counts */
1717        rdev->pm.dpm.new_active_crtcs = 0;
1718        rdev->pm.dpm.new_active_crtc_count = 0;
1719        if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1720                list_for_each_entry(crtc,
1721                                    &ddev->mode_config.crtc_list, head) {
1722                        radeon_crtc = to_radeon_crtc(crtc);
1723                        if (crtc->enabled) {
1724                                rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1725                                rdev->pm.dpm.new_active_crtc_count++;
1726                        }
1727                }
1728        }
1729
1730        /* update battery/ac status */
1731        if (power_supply_is_system_supplied() > 0)
1732                rdev->pm.dpm.ac_power = true;
1733        else
1734                rdev->pm.dpm.ac_power = false;
1735
1736        radeon_dpm_change_power_state_locked(rdev);
1737
1738        mutex_unlock(&rdev->pm.mutex);
1739
1740}
1741
1742void radeon_pm_compute_clocks(struct radeon_device *rdev)
1743{
1744        if (rdev->pm.pm_method == PM_METHOD_DPM)
1745                radeon_pm_compute_clocks_dpm(rdev);
1746        else
1747                radeon_pm_compute_clocks_old(rdev);
1748}
1749
1750static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1751{
1752        int  crtc, vpos, hpos, vbl_status;
1753        bool in_vbl = true;
1754
1755        /* Iterate over all active crtc's. All crtc's must be in vblank,
1756         * otherwise return in_vbl == false.
1757         */
1758        for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1759                if (rdev->pm.active_crtcs & (1 << crtc)) {
1760                        vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1761                        if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1762                            !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1763                                in_vbl = false;
1764                }
1765        }
1766
1767        return in_vbl;
1768}
1769
1770static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1771{
1772        u32 stat_crtc = 0;
1773        bool in_vbl = radeon_pm_in_vbl(rdev);
1774
1775        if (in_vbl == false)
1776                DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1777                         finish ? "exit" : "entry");
1778        return in_vbl;
1779}
1780
1781static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1782{
1783        struct radeon_device *rdev;
1784        int resched;
1785        rdev = container_of(work, struct radeon_device,
1786                                pm.dynpm_idle_work.work);
1787
1788        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1789        mutex_lock(&rdev->pm.mutex);
1790        if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1791                int not_processed = 0;
1792                int i;
1793
1794                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1795                        struct radeon_ring *ring = &rdev->ring[i];
1796
1797                        if (ring->ready) {
1798                                not_processed += radeon_fence_count_emitted(rdev, i);
1799                                if (not_processed >= 3)
1800                                        break;
1801                        }
1802                }
1803
1804                if (not_processed >= 3) { /* should upclock */
1805                        if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1806                                rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1807                        } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1808                                   rdev->pm.dynpm_can_upclock) {
1809                                rdev->pm.dynpm_planned_action =
1810                                        DYNPM_ACTION_UPCLOCK;
1811                                rdev->pm.dynpm_action_timeout = jiffies +
1812                                msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1813                        }
1814                } else if (not_processed == 0) { /* should downclock */
1815                        if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1816                                rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1817                        } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1818                                   rdev->pm.dynpm_can_downclock) {
1819                                rdev->pm.dynpm_planned_action =
1820                                        DYNPM_ACTION_DOWNCLOCK;
1821                                rdev->pm.dynpm_action_timeout = jiffies +
1822                                msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1823                        }
1824                }
1825
1826                /* Note, radeon_pm_set_clocks is called with static_switch set
1827                 * to false since we want to wait for vbl to avoid flicker.
1828                 */
1829                if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1830                    jiffies > rdev->pm.dynpm_action_timeout) {
1831                        radeon_pm_get_dynpm_state(rdev);
1832                        radeon_pm_set_clocks(rdev);
1833                }
1834
1835                schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1836                                      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1837        }
1838        mutex_unlock(&rdev->pm.mutex);
1839        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1840}
1841
1842/*
1843 * Debugfs info
1844 */
1845#if defined(CONFIG_DEBUG_FS)
1846
1847static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1848{
1849        struct drm_info_node *node = (struct drm_info_node *) m->private;
1850        struct drm_device *dev = node->minor->dev;
1851        struct radeon_device *rdev = dev->dev_private;
1852        struct drm_device *ddev = rdev->ddev;
1853
1854        if  ((rdev->flags & RADEON_IS_PX) &&
1855             (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1856                seq_printf(m, "PX asic powered off\n");
1857        } else if (rdev->pm.dpm_enabled) {
1858                mutex_lock(&rdev->pm.mutex);
1859                if (rdev->asic->dpm.debugfs_print_current_performance_level)
1860                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1861                else
1862                        seq_printf(m, "Debugfs support not implemented for this asic\n");
1863                mutex_unlock(&rdev->pm.mutex);
1864        } else {
1865                seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1866                /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1867                if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1868                        seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1869                else
1870                        seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1871                seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1872                if (rdev->asic->pm.get_memory_clock)
1873                        seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1874                if (rdev->pm.current_vddc)
1875                        seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1876                if (rdev->asic->pm.get_pcie_lanes)
1877                        seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1878        }
1879
1880        return 0;
1881}
1882
1883static struct drm_info_list radeon_pm_info_list[] = {
1884        {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1885};
1886#endif
1887
1888static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1889{
1890#if defined(CONFIG_DEBUG_FS)
1891        return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1892#else
1893        return 0;
1894#endif
1895}
1896