linux/drivers/gpu/drm/radeon/radeon_device.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/console.h>
  29#include <linux/slab.h>
  30#include <drm/drmP.h>
  31#include <drm/drm_crtc_helper.h>
  32#include <drm/radeon_drm.h>
  33#include <linux/pm_runtime.h>
  34#include <linux/vgaarb.h>
  35#include <linux/vga_switcheroo.h>
  36#include <linux/efi.h>
  37#include "radeon_reg.h"
  38#include "radeon.h"
  39#include "atom.h"
  40
  41static const char radeon_family_name[][16] = {
  42        "R100",
  43        "RV100",
  44        "RS100",
  45        "RV200",
  46        "RS200",
  47        "R200",
  48        "RV250",
  49        "RS300",
  50        "RV280",
  51        "R300",
  52        "R350",
  53        "RV350",
  54        "RV380",
  55        "R420",
  56        "R423",
  57        "RV410",
  58        "RS400",
  59        "RS480",
  60        "RS600",
  61        "RS690",
  62        "RS740",
  63        "RV515",
  64        "R520",
  65        "RV530",
  66        "RV560",
  67        "RV570",
  68        "R580",
  69        "R600",
  70        "RV610",
  71        "RV630",
  72        "RV670",
  73        "RV620",
  74        "RV635",
  75        "RS780",
  76        "RS880",
  77        "RV770",
  78        "RV730",
  79        "RV710",
  80        "RV740",
  81        "CEDAR",
  82        "REDWOOD",
  83        "JUNIPER",
  84        "CYPRESS",
  85        "HEMLOCK",
  86        "PALM",
  87        "SUMO",
  88        "SUMO2",
  89        "BARTS",
  90        "TURKS",
  91        "CAICOS",
  92        "CAYMAN",
  93        "ARUBA",
  94        "TAHITI",
  95        "PITCAIRN",
  96        "VERDE",
  97        "OLAND",
  98        "HAINAN",
  99        "BONAIRE",
 100        "KAVERI",
 101        "KABINI",
 102        "HAWAII",
 103        "MULLINS",
 104        "LAST",
 105};
 106
 107#if defined(CONFIG_VGA_SWITCHEROO)
 108bool radeon_has_atpx_dgpu_power_cntl(void);
 109bool radeon_is_atpx_hybrid(void);
 110#else
 111static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
 112static inline bool radeon_is_atpx_hybrid(void) { return false; }
 113#endif
 114
 115#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 116
 117struct radeon_px_quirk {
 118        u32 chip_vendor;
 119        u32 chip_device;
 120        u32 subsys_vendor;
 121        u32 subsys_device;
 122        u32 px_quirk_flags;
 123};
 124
 125static struct radeon_px_quirk radeon_px_quirk_list[] = {
 126        /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
 127         * https://bugzilla.kernel.org/show_bug.cgi?id=74551
 128         */
 129        { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
 130        /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
 131         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
 132         */
 133        { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
 134        /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
 135         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
 136         */
 137        { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
 138        /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
 139         * https://bugs.freedesktop.org/show_bug.cgi?id=101491
 140         */
 141        { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
 142        { 0, 0, 0, 0, 0 },
 143};
 144
 145bool radeon_is_px(struct drm_device *dev)
 146{
 147        struct radeon_device *rdev = dev->dev_private;
 148
 149        if (rdev->flags & RADEON_IS_PX)
 150                return true;
 151        return false;
 152}
 153
 154static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
 155{
 156        struct radeon_px_quirk *p = radeon_px_quirk_list;
 157
 158        /* Apply PX quirks */
 159        while (p && p->chip_device != 0) {
 160                if (rdev->pdev->vendor == p->chip_vendor &&
 161                    rdev->pdev->device == p->chip_device &&
 162                    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
 163                    rdev->pdev->subsystem_device == p->subsys_device) {
 164                        rdev->px_quirk_flags = p->px_quirk_flags;
 165                        break;
 166                }
 167                ++p;
 168        }
 169
 170        if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
 171                rdev->flags &= ~RADEON_IS_PX;
 172
 173        /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
 174        if (!radeon_is_atpx_hybrid() &&
 175            !radeon_has_atpx_dgpu_power_cntl())
 176                rdev->flags &= ~RADEON_IS_PX;
 177}
 178
 179/**
 180 * radeon_program_register_sequence - program an array of registers.
 181 *
 182 * @rdev: radeon_device pointer
 183 * @registers: pointer to the register array
 184 * @array_size: size of the register array
 185 *
 186 * Programs an array or registers with and and or masks.
 187 * This is a helper for setting golden registers.
 188 */
 189void radeon_program_register_sequence(struct radeon_device *rdev,
 190                                      const u32 *registers,
 191                                      const u32 array_size)
 192{
 193        u32 tmp, reg, and_mask, or_mask;
 194        int i;
 195
 196        if (array_size % 3)
 197                return;
 198
 199        for (i = 0; i < array_size; i +=3) {
 200                reg = registers[i + 0];
 201                and_mask = registers[i + 1];
 202                or_mask = registers[i + 2];
 203
 204                if (and_mask == 0xffffffff) {
 205                        tmp = or_mask;
 206                } else {
 207                        tmp = RREG32(reg);
 208                        tmp &= ~and_mask;
 209                        tmp |= or_mask;
 210                }
 211                WREG32(reg, tmp);
 212        }
 213}
 214
 215void radeon_pci_config_reset(struct radeon_device *rdev)
 216{
 217        pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
 218}
 219
 220/**
 221 * radeon_surface_init - Clear GPU surface registers.
 222 *
 223 * @rdev: radeon_device pointer
 224 *
 225 * Clear GPU surface registers (r1xx-r5xx).
 226 */
 227void radeon_surface_init(struct radeon_device *rdev)
 228{
 229        /* FIXME: check this out */
 230        if (rdev->family < CHIP_R600) {
 231                int i;
 232
 233                for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
 234                        if (rdev->surface_regs[i].bo)
 235                                radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
 236                        else
 237                                radeon_clear_surface_reg(rdev, i);
 238                }
 239                /* enable surfaces */
 240                WREG32(RADEON_SURFACE_CNTL, 0);
 241        }
 242}
 243
 244/*
 245 * GPU scratch registers helpers function.
 246 */
 247/**
 248 * radeon_scratch_init - Init scratch register driver information.
 249 *
 250 * @rdev: radeon_device pointer
 251 *
 252 * Init CP scratch register driver information (r1xx-r5xx)
 253 */
 254void radeon_scratch_init(struct radeon_device *rdev)
 255{
 256        int i;
 257
 258        /* FIXME: check this out */
 259        if (rdev->family < CHIP_R300) {
 260                rdev->scratch.num_reg = 5;
 261        } else {
 262                rdev->scratch.num_reg = 7;
 263        }
 264        rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
 265        for (i = 0; i < rdev->scratch.num_reg; i++) {
 266                rdev->scratch.free[i] = true;
 267                rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
 268        }
 269}
 270
 271/**
 272 * radeon_scratch_get - Allocate a scratch register
 273 *
 274 * @rdev: radeon_device pointer
 275 * @reg: scratch register mmio offset
 276 *
 277 * Allocate a CP scratch register for use by the driver (all asics).
 278 * Returns 0 on success or -EINVAL on failure.
 279 */
 280int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
 281{
 282        int i;
 283
 284        for (i = 0; i < rdev->scratch.num_reg; i++) {
 285                if (rdev->scratch.free[i]) {
 286                        rdev->scratch.free[i] = false;
 287                        *reg = rdev->scratch.reg[i];
 288                        return 0;
 289                }
 290        }
 291        return -EINVAL;
 292}
 293
 294/**
 295 * radeon_scratch_free - Free a scratch register
 296 *
 297 * @rdev: radeon_device pointer
 298 * @reg: scratch register mmio offset
 299 *
 300 * Free a CP scratch register allocated for use by the driver (all asics)
 301 */
 302void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
 303{
 304        int i;
 305
 306        for (i = 0; i < rdev->scratch.num_reg; i++) {
 307                if (rdev->scratch.reg[i] == reg) {
 308                        rdev->scratch.free[i] = true;
 309                        return;
 310                }
 311        }
 312}
 313
 314/*
 315 * GPU doorbell aperture helpers function.
 316 */
 317/**
 318 * radeon_doorbell_init - Init doorbell driver information.
 319 *
 320 * @rdev: radeon_device pointer
 321 *
 322 * Init doorbell driver information (CIK)
 323 * Returns 0 on success, error on failure.
 324 */
 325static int radeon_doorbell_init(struct radeon_device *rdev)
 326{
 327        /* doorbell bar mapping */
 328        rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
 329        rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
 330
 331        rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
 332        if (rdev->doorbell.num_doorbells == 0)
 333                return -EINVAL;
 334
 335        rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
 336        if (rdev->doorbell.ptr == NULL) {
 337                return -ENOMEM;
 338        }
 339        DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
 340        DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
 341
 342        memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
 343
 344        return 0;
 345}
 346
 347/**
 348 * radeon_doorbell_fini - Tear down doorbell driver information.
 349 *
 350 * @rdev: radeon_device pointer
 351 *
 352 * Tear down doorbell driver information (CIK)
 353 */
 354static void radeon_doorbell_fini(struct radeon_device *rdev)
 355{
 356        iounmap(rdev->doorbell.ptr);
 357        rdev->doorbell.ptr = NULL;
 358}
 359
 360/**
 361 * radeon_doorbell_get - Allocate a doorbell entry
 362 *
 363 * @rdev: radeon_device pointer
 364 * @doorbell: doorbell index
 365 *
 366 * Allocate a doorbell for use by the driver (all asics).
 367 * Returns 0 on success or -EINVAL on failure.
 368 */
 369int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
 370{
 371        unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
 372        if (offset < rdev->doorbell.num_doorbells) {
 373                __set_bit(offset, rdev->doorbell.used);
 374                *doorbell = offset;
 375                return 0;
 376        } else {
 377                return -EINVAL;
 378        }
 379}
 380
 381/**
 382 * radeon_doorbell_free - Free a doorbell entry
 383 *
 384 * @rdev: radeon_device pointer
 385 * @doorbell: doorbell index
 386 *
 387 * Free a doorbell allocated for use by the driver (all asics)
 388 */
 389void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
 390{
 391        if (doorbell < rdev->doorbell.num_doorbells)
 392                __clear_bit(doorbell, rdev->doorbell.used);
 393}
 394
 395/**
 396 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
 397 *                                setup KFD
 398 *
 399 * @rdev: radeon_device pointer
 400 * @aperture_base: output returning doorbell aperture base physical address
 401 * @aperture_size: output returning doorbell aperture size in bytes
 402 * @start_offset: output returning # of doorbell bytes reserved for radeon.
 403 *
 404 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
 405 * takes doorbells required for its own rings and reports the setup to KFD.
 406 * Radeon reserved doorbells are at the start of the doorbell aperture.
 407 */
 408void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
 409                                  phys_addr_t *aperture_base,
 410                                  size_t *aperture_size,
 411                                  size_t *start_offset)
 412{
 413        /* The first num_doorbells are used by radeon.
 414         * KFD takes whatever's left in the aperture. */
 415        if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
 416                *aperture_base = rdev->doorbell.base;
 417                *aperture_size = rdev->doorbell.size;
 418                *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
 419        } else {
 420                *aperture_base = 0;
 421                *aperture_size = 0;
 422                *start_offset = 0;
 423        }
 424}
 425
 426/*
 427 * radeon_wb_*()
 428 * Writeback is the the method by which the the GPU updates special pages
 429 * in memory with the status of certain GPU events (fences, ring pointers,
 430 * etc.).
 431 */
 432
 433/**
 434 * radeon_wb_disable - Disable Writeback
 435 *
 436 * @rdev: radeon_device pointer
 437 *
 438 * Disables Writeback (all asics).  Used for suspend.
 439 */
 440void radeon_wb_disable(struct radeon_device *rdev)
 441{
 442        rdev->wb.enabled = false;
 443}
 444
 445/**
 446 * radeon_wb_fini - Disable Writeback and free memory
 447 *
 448 * @rdev: radeon_device pointer
 449 *
 450 * Disables Writeback and frees the Writeback memory (all asics).
 451 * Used at driver shutdown.
 452 */
 453void radeon_wb_fini(struct radeon_device *rdev)
 454{
 455        radeon_wb_disable(rdev);
 456        if (rdev->wb.wb_obj) {
 457                if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
 458                        radeon_bo_kunmap(rdev->wb.wb_obj);
 459                        radeon_bo_unpin(rdev->wb.wb_obj);
 460                        radeon_bo_unreserve(rdev->wb.wb_obj);
 461                }
 462                radeon_bo_unref(&rdev->wb.wb_obj);
 463                rdev->wb.wb = NULL;
 464                rdev->wb.wb_obj = NULL;
 465        }
 466}
 467
 468/**
 469 * radeon_wb_init- Init Writeback driver info and allocate memory
 470 *
 471 * @rdev: radeon_device pointer
 472 *
 473 * Disables Writeback and frees the Writeback memory (all asics).
 474 * Used at driver startup.
 475 * Returns 0 on success or an -error on failure.
 476 */
 477int radeon_wb_init(struct radeon_device *rdev)
 478{
 479        int r;
 480
 481        if (rdev->wb.wb_obj == NULL) {
 482                r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
 483                                     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
 484                                     &rdev->wb.wb_obj);
 485                if (r) {
 486                        dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 487                        return r;
 488                }
 489                r = radeon_bo_reserve(rdev->wb.wb_obj, false);
 490                if (unlikely(r != 0)) {
 491                        radeon_wb_fini(rdev);
 492                        return r;
 493                }
 494                r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
 495                                &rdev->wb.gpu_addr);
 496                if (r) {
 497                        radeon_bo_unreserve(rdev->wb.wb_obj);
 498                        dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
 499                        radeon_wb_fini(rdev);
 500                        return r;
 501                }
 502                r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
 503                radeon_bo_unreserve(rdev->wb.wb_obj);
 504                if (r) {
 505                        dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
 506                        radeon_wb_fini(rdev);
 507                        return r;
 508                }
 509        }
 510
 511        /* clear wb memory */
 512        memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
 513        /* disable event_write fences */
 514        rdev->wb.use_event = false;
 515        /* disabled via module param */
 516        if (radeon_no_wb == 1) {
 517                rdev->wb.enabled = false;
 518        } else {
 519                if (rdev->flags & RADEON_IS_AGP) {
 520                        /* often unreliable on AGP */
 521                        rdev->wb.enabled = false;
 522                } else if (rdev->family < CHIP_R300) {
 523                        /* often unreliable on pre-r300 */
 524                        rdev->wb.enabled = false;
 525                } else {
 526                        rdev->wb.enabled = true;
 527                        /* event_write fences are only available on r600+ */
 528                        if (rdev->family >= CHIP_R600) {
 529                                rdev->wb.use_event = true;
 530                        }
 531                }
 532        }
 533        /* always use writeback/events on NI, APUs */
 534        if (rdev->family >= CHIP_PALM) {
 535                rdev->wb.enabled = true;
 536                rdev->wb.use_event = true;
 537        }
 538
 539        dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
 540
 541        return 0;
 542}
 543
 544/**
 545 * radeon_vram_location - try to find VRAM location
 546 * @rdev: radeon device structure holding all necessary informations
 547 * @mc: memory controller structure holding memory informations
 548 * @base: base address at which to put VRAM
 549 *
 550 * Function will place try to place VRAM at base address provided
 551 * as parameter (which is so far either PCI aperture address or
 552 * for IGP TOM base address).
 553 *
 554 * If there is not enough space to fit the unvisible VRAM in the 32bits
 555 * address space then we limit the VRAM size to the aperture.
 556 *
 557 * If we are using AGP and if the AGP aperture doesn't allow us to have
 558 * room for all the VRAM than we restrict the VRAM to the PCI aperture
 559 * size and print a warning.
 560 *
 561 * This function will never fails, worst case are limiting VRAM.
 562 *
 563 * Note: GTT start, end, size should be initialized before calling this
 564 * function on AGP platform.
 565 *
 566 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
 567 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 568 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 569 * not IGP.
 570 *
 571 * Note: we use mc_vram_size as on some board we need to program the mc to
 572 * cover the whole aperture even if VRAM size is inferior to aperture size
 573 * Novell bug 204882 + along with lots of ubuntu ones
 574 *
 575 * Note: when limiting vram it's safe to overwritte real_vram_size because
 576 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 577 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 578 * ones)
 579 *
 580 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 581 * explicitly check for that thought.
 582 *
 583 * FIXME: when reducing VRAM size align new size on power of 2.
 584 */
 585void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
 586{
 587        uint64_t limit = (uint64_t)radeon_vram_limit << 20;
 588
 589        mc->vram_start = base;
 590        if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
 591                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 592                mc->real_vram_size = mc->aper_size;
 593                mc->mc_vram_size = mc->aper_size;
 594        }
 595        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 596        if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
 597                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 598                mc->real_vram_size = mc->aper_size;
 599                mc->mc_vram_size = mc->aper_size;
 600        }
 601        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 602        if (limit && limit < mc->real_vram_size)
 603                mc->real_vram_size = limit;
 604        dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 605                        mc->mc_vram_size >> 20, mc->vram_start,
 606                        mc->vram_end, mc->real_vram_size >> 20);
 607}
 608
 609/**
 610 * radeon_gtt_location - try to find GTT location
 611 * @rdev: radeon device structure holding all necessary informations
 612 * @mc: memory controller structure holding memory informations
 613 *
 614 * Function will place try to place GTT before or after VRAM.
 615 *
 616 * If GTT size is bigger than space left then we ajust GTT size.
 617 * Thus function will never fails.
 618 *
 619 * FIXME: when reducing GTT size align new size on power of 2.
 620 */
 621void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 622{
 623        u64 size_af, size_bf;
 624
 625        size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
 626        size_bf = mc->vram_start & ~mc->gtt_base_align;
 627        if (size_bf > size_af) {
 628                if (mc->gtt_size > size_bf) {
 629                        dev_warn(rdev->dev, "limiting GTT\n");
 630                        mc->gtt_size = size_bf;
 631                }
 632                mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
 633        } else {
 634                if (mc->gtt_size > size_af) {
 635                        dev_warn(rdev->dev, "limiting GTT\n");
 636                        mc->gtt_size = size_af;
 637                }
 638                mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
 639        }
 640        mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
 641        dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 642                        mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
 643}
 644
 645/*
 646 * GPU helpers function.
 647 */
 648
 649/**
 650 * radeon_device_is_virtual - check if we are running is a virtual environment
 651 *
 652 * Check if the asic has been passed through to a VM (all asics).
 653 * Used at driver startup.
 654 * Returns true if virtual or false if not.
 655 */
 656bool radeon_device_is_virtual(void)
 657{
 658#ifdef CONFIG_X86
 659        return boot_cpu_has(X86_FEATURE_HYPERVISOR);
 660#else
 661        return false;
 662#endif
 663}
 664
 665/**
 666 * radeon_card_posted - check if the hw has already been initialized
 667 *
 668 * @rdev: radeon_device pointer
 669 *
 670 * Check if the asic has been initialized (all asics).
 671 * Used at driver startup.
 672 * Returns true if initialized or false if not.
 673 */
 674bool radeon_card_posted(struct radeon_device *rdev)
 675{
 676        uint32_t reg;
 677
 678        /* for pass through, always force asic_init for CI */
 679        if (rdev->family >= CHIP_BONAIRE &&
 680            radeon_device_is_virtual())
 681                return false;
 682
 683        /* required for EFI mode on macbook2,1 which uses an r5xx asic */
 684        if (efi_enabled(EFI_BOOT) &&
 685            (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
 686            (rdev->family < CHIP_R600))
 687                return false;
 688
 689        if (ASIC_IS_NODCE(rdev))
 690                goto check_memsize;
 691
 692        /* first check CRTCs */
 693        if (ASIC_IS_DCE4(rdev)) {
 694                reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 695                        RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
 696                        if (rdev->num_crtc >= 4) {
 697                                reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
 698                                        RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
 699                        }
 700                        if (rdev->num_crtc >= 6) {
 701                                reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
 702                                        RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
 703                        }
 704                if (reg & EVERGREEN_CRTC_MASTER_EN)
 705                        return true;
 706        } else if (ASIC_IS_AVIVO(rdev)) {
 707                reg = RREG32(AVIVO_D1CRTC_CONTROL) |
 708                      RREG32(AVIVO_D2CRTC_CONTROL);
 709                if (reg & AVIVO_CRTC_EN) {
 710                        return true;
 711                }
 712        } else {
 713                reg = RREG32(RADEON_CRTC_GEN_CNTL) |
 714                      RREG32(RADEON_CRTC2_GEN_CNTL);
 715                if (reg & RADEON_CRTC_EN) {
 716                        return true;
 717                }
 718        }
 719
 720check_memsize:
 721        /* then check MEM_SIZE, in case the crtcs are off */
 722        if (rdev->family >= CHIP_R600)
 723                reg = RREG32(R600_CONFIG_MEMSIZE);
 724        else
 725                reg = RREG32(RADEON_CONFIG_MEMSIZE);
 726
 727        if (reg)
 728                return true;
 729
 730        return false;
 731
 732}
 733
 734/**
 735 * radeon_update_bandwidth_info - update display bandwidth params
 736 *
 737 * @rdev: radeon_device pointer
 738 *
 739 * Used when sclk/mclk are switched or display modes are set.
 740 * params are used to calculate display watermarks (all asics)
 741 */
 742void radeon_update_bandwidth_info(struct radeon_device *rdev)
 743{
 744        fixed20_12 a;
 745        u32 sclk = rdev->pm.current_sclk;
 746        u32 mclk = rdev->pm.current_mclk;
 747
 748        /* sclk/mclk in Mhz */
 749        a.full = dfixed_const(100);
 750        rdev->pm.sclk.full = dfixed_const(sclk);
 751        rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
 752        rdev->pm.mclk.full = dfixed_const(mclk);
 753        rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 754
 755        if (rdev->flags & RADEON_IS_IGP) {
 756                a.full = dfixed_const(16);
 757                /* core_bandwidth = sclk(Mhz) * 16 */
 758                rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
 759        }
 760}
 761
 762/**
 763 * radeon_boot_test_post_card - check and possibly initialize the hw
 764 *
 765 * @rdev: radeon_device pointer
 766 *
 767 * Check if the asic is initialized and if not, attempt to initialize
 768 * it (all asics).
 769 * Returns true if initialized or false if not.
 770 */
 771bool radeon_boot_test_post_card(struct radeon_device *rdev)
 772{
 773        if (radeon_card_posted(rdev))
 774                return true;
 775
 776        if (rdev->bios) {
 777                DRM_INFO("GPU not posted. posting now...\n");
 778                if (rdev->is_atom_bios)
 779                        atom_asic_init(rdev->mode_info.atom_context);
 780                else
 781                        radeon_combios_asic_init(rdev->ddev);
 782                return true;
 783        } else {
 784                dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 785                return false;
 786        }
 787}
 788
 789/**
 790 * radeon_dummy_page_init - init dummy page used by the driver
 791 *
 792 * @rdev: radeon_device pointer
 793 *
 794 * Allocate the dummy page used by the driver (all asics).
 795 * This dummy page is used by the driver as a filler for gart entries
 796 * when pages are taken out of the GART
 797 * Returns 0 on sucess, -ENOMEM on failure.
 798 */
 799int radeon_dummy_page_init(struct radeon_device *rdev)
 800{
 801        if (rdev->dummy_page.page)
 802                return 0;
 803        rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
 804        if (rdev->dummy_page.page == NULL)
 805                return -ENOMEM;
 806        rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
 807                                        0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 808        if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
 809                dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
 810                __free_page(rdev->dummy_page.page);
 811                rdev->dummy_page.page = NULL;
 812                return -ENOMEM;
 813        }
 814        rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
 815                                                            RADEON_GART_PAGE_DUMMY);
 816        return 0;
 817}
 818
 819/**
 820 * radeon_dummy_page_fini - free dummy page used by the driver
 821 *
 822 * @rdev: radeon_device pointer
 823 *
 824 * Frees the dummy page used by the driver (all asics).
 825 */
 826void radeon_dummy_page_fini(struct radeon_device *rdev)
 827{
 828        if (rdev->dummy_page.page == NULL)
 829                return;
 830        pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
 831                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 832        __free_page(rdev->dummy_page.page);
 833        rdev->dummy_page.page = NULL;
 834}
 835
 836
 837/* ATOM accessor methods */
 838/*
 839 * ATOM is an interpreted byte code stored in tables in the vbios.  The
 840 * driver registers callbacks to access registers and the interpreter
 841 * in the driver parses the tables and executes then to program specific
 842 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
 843 * atombios.h, and atom.c
 844 */
 845
 846/**
 847 * cail_pll_read - read PLL register
 848 *
 849 * @info: atom card_info pointer
 850 * @reg: PLL register offset
 851 *
 852 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 853 * Returns the value of the PLL register.
 854 */
 855static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
 856{
 857        struct radeon_device *rdev = info->dev->dev_private;
 858        uint32_t r;
 859
 860        r = rdev->pll_rreg(rdev, reg);
 861        return r;
 862}
 863
 864/**
 865 * cail_pll_write - write PLL register
 866 *
 867 * @info: atom card_info pointer
 868 * @reg: PLL register offset
 869 * @val: value to write to the pll register
 870 *
 871 * Provides a PLL register accessor for the atom interpreter (r4xx+).
 872 */
 873static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
 874{
 875        struct radeon_device *rdev = info->dev->dev_private;
 876
 877        rdev->pll_wreg(rdev, reg, val);
 878}
 879
 880/**
 881 * cail_mc_read - read MC (Memory Controller) register
 882 *
 883 * @info: atom card_info pointer
 884 * @reg: MC register offset
 885 *
 886 * Provides an MC register accessor for the atom interpreter (r4xx+).
 887 * Returns the value of the MC register.
 888 */
 889static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
 890{
 891        struct radeon_device *rdev = info->dev->dev_private;
 892        uint32_t r;
 893
 894        r = rdev->mc_rreg(rdev, reg);
 895        return r;
 896}
 897
 898/**
 899 * cail_mc_write - write MC (Memory Controller) register
 900 *
 901 * @info: atom card_info pointer
 902 * @reg: MC register offset
 903 * @val: value to write to the pll register
 904 *
 905 * Provides a MC register accessor for the atom interpreter (r4xx+).
 906 */
 907static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
 908{
 909        struct radeon_device *rdev = info->dev->dev_private;
 910
 911        rdev->mc_wreg(rdev, reg, val);
 912}
 913
 914/**
 915 * cail_reg_write - write MMIO register
 916 *
 917 * @info: atom card_info pointer
 918 * @reg: MMIO register offset
 919 * @val: value to write to the pll register
 920 *
 921 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
 922 */
 923static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
 924{
 925        struct radeon_device *rdev = info->dev->dev_private;
 926
 927        WREG32(reg*4, val);
 928}
 929
 930/**
 931 * cail_reg_read - read MMIO register
 932 *
 933 * @info: atom card_info pointer
 934 * @reg: MMIO register offset
 935 *
 936 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
 937 * Returns the value of the MMIO register.
 938 */
 939static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
 940{
 941        struct radeon_device *rdev = info->dev->dev_private;
 942        uint32_t r;
 943
 944        r = RREG32(reg*4);
 945        return r;
 946}
 947
 948/**
 949 * cail_ioreg_write - write IO register
 950 *
 951 * @info: atom card_info pointer
 952 * @reg: IO register offset
 953 * @val: value to write to the pll register
 954 *
 955 * Provides a IO register accessor for the atom interpreter (r4xx+).
 956 */
 957static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
 958{
 959        struct radeon_device *rdev = info->dev->dev_private;
 960
 961        WREG32_IO(reg*4, val);
 962}
 963
 964/**
 965 * cail_ioreg_read - read IO register
 966 *
 967 * @info: atom card_info pointer
 968 * @reg: IO register offset
 969 *
 970 * Provides an IO register accessor for the atom interpreter (r4xx+).
 971 * Returns the value of the IO register.
 972 */
 973static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
 974{
 975        struct radeon_device *rdev = info->dev->dev_private;
 976        uint32_t r;
 977
 978        r = RREG32_IO(reg*4);
 979        return r;
 980}
 981
 982/**
 983 * radeon_atombios_init - init the driver info and callbacks for atombios
 984 *
 985 * @rdev: radeon_device pointer
 986 *
 987 * Initializes the driver info and register access callbacks for the
 988 * ATOM interpreter (r4xx+).
 989 * Returns 0 on sucess, -ENOMEM on failure.
 990 * Called at driver startup.
 991 */
 992int radeon_atombios_init(struct radeon_device *rdev)
 993{
 994        struct card_info *atom_card_info =
 995            kzalloc(sizeof(struct card_info), GFP_KERNEL);
 996
 997        if (!atom_card_info)
 998                return -ENOMEM;
 999
1000        rdev->mode_info.atom_card_info = atom_card_info;
1001        atom_card_info->dev = rdev->ddev;
1002        atom_card_info->reg_read = cail_reg_read;
1003        atom_card_info->reg_write = cail_reg_write;
1004        /* needed for iio ops */
1005        if (rdev->rio_mem) {
1006                atom_card_info->ioreg_read = cail_ioreg_read;
1007                atom_card_info->ioreg_write = cail_ioreg_write;
1008        } else {
1009                DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1010                atom_card_info->ioreg_read = cail_reg_read;
1011                atom_card_info->ioreg_write = cail_reg_write;
1012        }
1013        atom_card_info->mc_read = cail_mc_read;
1014        atom_card_info->mc_write = cail_mc_write;
1015        atom_card_info->pll_read = cail_pll_read;
1016        atom_card_info->pll_write = cail_pll_write;
1017
1018        rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1019        if (!rdev->mode_info.atom_context) {
1020                radeon_atombios_fini(rdev);
1021                return -ENOMEM;
1022        }
1023
1024        mutex_init(&rdev->mode_info.atom_context->mutex);
1025        mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1026        radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1027        atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1028        return 0;
1029}
1030
1031/**
1032 * radeon_atombios_fini - free the driver info and callbacks for atombios
1033 *
1034 * @rdev: radeon_device pointer
1035 *
1036 * Frees the driver info and register access callbacks for the ATOM
1037 * interpreter (r4xx+).
1038 * Called at driver shutdown.
1039 */
1040void radeon_atombios_fini(struct radeon_device *rdev)
1041{
1042        if (rdev->mode_info.atom_context) {
1043                kfree(rdev->mode_info.atom_context->scratch);
1044        }
1045        kfree(rdev->mode_info.atom_context);
1046        rdev->mode_info.atom_context = NULL;
1047        kfree(rdev->mode_info.atom_card_info);
1048        rdev->mode_info.atom_card_info = NULL;
1049}
1050
1051/* COMBIOS */
1052/*
1053 * COMBIOS is the bios format prior to ATOM. It provides
1054 * command tables similar to ATOM, but doesn't have a unified
1055 * parser.  See radeon_combios.c
1056 */
1057
1058/**
1059 * radeon_combios_init - init the driver info for combios
1060 *
1061 * @rdev: radeon_device pointer
1062 *
1063 * Initializes the driver info for combios (r1xx-r3xx).
1064 * Returns 0 on sucess.
1065 * Called at driver startup.
1066 */
1067int radeon_combios_init(struct radeon_device *rdev)
1068{
1069        radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1070        return 0;
1071}
1072
1073/**
1074 * radeon_combios_fini - free the driver info for combios
1075 *
1076 * @rdev: radeon_device pointer
1077 *
1078 * Frees the driver info for combios (r1xx-r3xx).
1079 * Called at driver shutdown.
1080 */
1081void radeon_combios_fini(struct radeon_device *rdev)
1082{
1083}
1084
1085/* if we get transitioned to only one device, take VGA back */
1086/**
1087 * radeon_vga_set_decode - enable/disable vga decode
1088 *
1089 * @cookie: radeon_device pointer
1090 * @state: enable/disable vga decode
1091 *
1092 * Enable/disable vga decode (all asics).
1093 * Returns VGA resource flags.
1094 */
1095static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1096{
1097        struct radeon_device *rdev = cookie;
1098        radeon_vga_set_state(rdev, state);
1099        if (state)
1100                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1101                       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1102        else
1103                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1104}
1105
1106/**
1107 * radeon_check_pot_argument - check that argument is a power of two
1108 *
1109 * @arg: value to check
1110 *
1111 * Validates that a certain argument is a power of two (all asics).
1112 * Returns true if argument is valid.
1113 */
1114static bool radeon_check_pot_argument(int arg)
1115{
1116        return (arg & (arg - 1)) == 0;
1117}
1118
1119/**
1120 * Determine a sensible default GART size according to ASIC family.
1121 *
1122 * @family ASIC family name
1123 */
1124static int radeon_gart_size_auto(enum radeon_family family)
1125{
1126        /* default to a larger gart size on newer asics */
1127        if (family >= CHIP_TAHITI)
1128                return 2048;
1129        else if (family >= CHIP_RV770)
1130                return 1024;
1131        else
1132                return 512;
1133}
1134
1135/**
1136 * radeon_check_arguments - validate module params
1137 *
1138 * @rdev: radeon_device pointer
1139 *
1140 * Validates certain module parameters and updates
1141 * the associated values used by the driver (all asics).
1142 */
1143static void radeon_check_arguments(struct radeon_device *rdev)
1144{
1145        /* vramlimit must be a power of two */
1146        if (!radeon_check_pot_argument(radeon_vram_limit)) {
1147                dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1148                                radeon_vram_limit);
1149                radeon_vram_limit = 0;
1150        }
1151
1152        if (radeon_gart_size == -1) {
1153                radeon_gart_size = radeon_gart_size_auto(rdev->family);
1154        }
1155        /* gtt size must be power of two and greater or equal to 32M */
1156        if (radeon_gart_size < 32) {
1157                dev_warn(rdev->dev, "gart size (%d) too small\n",
1158                                radeon_gart_size);
1159                radeon_gart_size = radeon_gart_size_auto(rdev->family);
1160        } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1161                dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1162                                radeon_gart_size);
1163                radeon_gart_size = radeon_gart_size_auto(rdev->family);
1164        }
1165        rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1166
1167        /* AGP mode can only be -1, 1, 2, 4, 8 */
1168        switch (radeon_agpmode) {
1169        case -1:
1170        case 0:
1171        case 1:
1172        case 2:
1173        case 4:
1174        case 8:
1175                break;
1176        default:
1177                dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1178                                "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1179                radeon_agpmode = 0;
1180                break;
1181        }
1182
1183        if (!radeon_check_pot_argument(radeon_vm_size)) {
1184                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1185                         radeon_vm_size);
1186                radeon_vm_size = 4;
1187        }
1188
1189        if (radeon_vm_size < 1) {
1190                dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1191                         radeon_vm_size);
1192                radeon_vm_size = 4;
1193        }
1194
1195        /*
1196         * Max GPUVM size for Cayman, SI and CI are 40 bits.
1197         */
1198        if (radeon_vm_size > 1024) {
1199                dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1200                         radeon_vm_size);
1201                radeon_vm_size = 4;
1202        }
1203
1204        /* defines number of bits in page table versus page directory,
1205         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1206         * page table and the remaining bits are in the page directory */
1207        if (radeon_vm_block_size == -1) {
1208
1209                /* Total bits covered by PD + PTs */
1210                unsigned bits = ilog2(radeon_vm_size) + 18;
1211
1212                /* Make sure the PD is 4K in size up to 8GB address space.
1213                   Above that split equal between PD and PTs */
1214                if (radeon_vm_size <= 8)
1215                        radeon_vm_block_size = bits - 9;
1216                else
1217                        radeon_vm_block_size = (bits + 3) / 2;
1218
1219        } else if (radeon_vm_block_size < 9) {
1220                dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1221                         radeon_vm_block_size);
1222                radeon_vm_block_size = 9;
1223        }
1224
1225        if (radeon_vm_block_size > 24 ||
1226            (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1227                dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1228                         radeon_vm_block_size);
1229                radeon_vm_block_size = 9;
1230        }
1231}
1232
1233/**
1234 * radeon_switcheroo_set_state - set switcheroo state
1235 *
1236 * @pdev: pci dev pointer
1237 * @state: vga_switcheroo state
1238 *
1239 * Callback for the switcheroo driver.  Suspends or resumes the
1240 * the asics before or after it is powered up using ACPI methods.
1241 */
1242static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1243{
1244        struct drm_device *dev = pci_get_drvdata(pdev);
1245
1246        if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1247                return;
1248
1249        if (state == VGA_SWITCHEROO_ON) {
1250                pr_info("radeon: switched on\n");
1251                /* don't suspend or resume card normally */
1252                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1253
1254                radeon_resume_kms(dev, true, true);
1255
1256                dev->switch_power_state = DRM_SWITCH_POWER_ON;
1257                drm_kms_helper_poll_enable(dev);
1258        } else {
1259                pr_info("radeon: switched off\n");
1260                drm_kms_helper_poll_disable(dev);
1261                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1262                radeon_suspend_kms(dev, true, true, false);
1263                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1264        }
1265}
1266
1267/**
1268 * radeon_switcheroo_can_switch - see if switcheroo state can change
1269 *
1270 * @pdev: pci dev pointer
1271 *
1272 * Callback for the switcheroo driver.  Check of the switcheroo
1273 * state can be changed.
1274 * Returns true if the state can be changed, false if not.
1275 */
1276static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1277{
1278        struct drm_device *dev = pci_get_drvdata(pdev);
1279
1280        /*
1281         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1282         * locking inversion with the driver load path. And the access here is
1283         * completely racy anyway. So don't bother with locking for now.
1284         */
1285        return dev->open_count == 0;
1286}
1287
1288static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1289        .set_gpu_state = radeon_switcheroo_set_state,
1290        .reprobe = NULL,
1291        .can_switch = radeon_switcheroo_can_switch,
1292};
1293
1294/**
1295 * radeon_device_init - initialize the driver
1296 *
1297 * @rdev: radeon_device pointer
1298 * @pdev: drm dev pointer
1299 * @pdev: pci dev pointer
1300 * @flags: driver flags
1301 *
1302 * Initializes the driver info and hw (all asics).
1303 * Returns 0 for success or an error on failure.
1304 * Called at driver startup.
1305 */
1306int radeon_device_init(struct radeon_device *rdev,
1307                       struct drm_device *ddev,
1308                       struct pci_dev *pdev,
1309                       uint32_t flags)
1310{
1311        int r, i;
1312        int dma_bits;
1313        bool runtime = false;
1314
1315        rdev->shutdown = false;
1316        rdev->dev = &pdev->dev;
1317        rdev->ddev = ddev;
1318        rdev->pdev = pdev;
1319        rdev->flags = flags;
1320        rdev->family = flags & RADEON_FAMILY_MASK;
1321        rdev->is_atom_bios = false;
1322        rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1323        rdev->mc.gtt_size = 512 * 1024 * 1024;
1324        rdev->accel_working = false;
1325        /* set up ring ids */
1326        for (i = 0; i < RADEON_NUM_RINGS; i++) {
1327                rdev->ring[i].idx = i;
1328        }
1329        rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1330
1331        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1332                 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1333                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1334
1335        /* mutex initialization are all done here so we
1336         * can recall function without having locking issues */
1337        mutex_init(&rdev->ring_lock);
1338        mutex_init(&rdev->dc_hw_i2c_mutex);
1339        atomic_set(&rdev->ih.lock, 0);
1340        mutex_init(&rdev->gem.mutex);
1341        mutex_init(&rdev->pm.mutex);
1342        mutex_init(&rdev->gpu_clock_mutex);
1343        mutex_init(&rdev->srbm_mutex);
1344        mutex_init(&rdev->grbm_idx_mutex);
1345        init_rwsem(&rdev->pm.mclk_lock);
1346        init_rwsem(&rdev->exclusive_lock);
1347        init_waitqueue_head(&rdev->irq.vblank_queue);
1348        mutex_init(&rdev->mn_lock);
1349        hash_init(rdev->mn_hash);
1350        r = radeon_gem_init(rdev);
1351        if (r)
1352                return r;
1353
1354        radeon_check_arguments(rdev);
1355        /* Adjust VM size here.
1356         * Max GPUVM size for cayman+ is 40 bits.
1357         */
1358        rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1359
1360        /* Set asic functions */
1361        r = radeon_asic_init(rdev);
1362        if (r)
1363                return r;
1364
1365        /* all of the newer IGP chips have an internal gart
1366         * However some rs4xx report as AGP, so remove that here.
1367         */
1368        if ((rdev->family >= CHIP_RS400) &&
1369            (rdev->flags & RADEON_IS_IGP)) {
1370                rdev->flags &= ~RADEON_IS_AGP;
1371        }
1372
1373        if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1374                radeon_agp_disable(rdev);
1375        }
1376
1377        /* Set the internal MC address mask
1378         * This is the max address of the GPU's
1379         * internal address space.
1380         */
1381        if (rdev->family >= CHIP_CAYMAN)
1382                rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1383        else if (rdev->family >= CHIP_CEDAR)
1384                rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1385        else
1386                rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1387
1388        /* set DMA mask + need_dma32 flags.
1389         * PCIE - can handle 40-bits.
1390         * IGP - can handle 40-bits
1391         * AGP - generally dma32 is safest
1392         * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1393         */
1394        rdev->need_dma32 = false;
1395        if (rdev->flags & RADEON_IS_AGP)
1396                rdev->need_dma32 = true;
1397        if ((rdev->flags & RADEON_IS_PCI) &&
1398            (rdev->family <= CHIP_RS740))
1399                rdev->need_dma32 = true;
1400
1401        dma_bits = rdev->need_dma32 ? 32 : 40;
1402        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1403        if (r) {
1404                rdev->need_dma32 = true;
1405                dma_bits = 32;
1406                pr_warn("radeon: No suitable DMA available\n");
1407        }
1408        r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1409        if (r) {
1410                pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1411                pr_warn("radeon: No coherent DMA available\n");
1412        }
1413
1414        /* Registers mapping */
1415        /* TODO: block userspace mapping of io register */
1416        spin_lock_init(&rdev->mmio_idx_lock);
1417        spin_lock_init(&rdev->smc_idx_lock);
1418        spin_lock_init(&rdev->pll_idx_lock);
1419        spin_lock_init(&rdev->mc_idx_lock);
1420        spin_lock_init(&rdev->pcie_idx_lock);
1421        spin_lock_init(&rdev->pciep_idx_lock);
1422        spin_lock_init(&rdev->pif_idx_lock);
1423        spin_lock_init(&rdev->cg_idx_lock);
1424        spin_lock_init(&rdev->uvd_idx_lock);
1425        spin_lock_init(&rdev->rcu_idx_lock);
1426        spin_lock_init(&rdev->didt_idx_lock);
1427        spin_lock_init(&rdev->end_idx_lock);
1428        if (rdev->family >= CHIP_BONAIRE) {
1429                rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1430                rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1431        } else {
1432                rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1433                rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1434        }
1435        rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1436        if (rdev->rmmio == NULL)
1437                return -ENOMEM;
1438
1439        /* doorbell bar mapping */
1440        if (rdev->family >= CHIP_BONAIRE)
1441                radeon_doorbell_init(rdev);
1442
1443        /* io port mapping */
1444        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1445                if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1446                        rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1447                        rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1448                        break;
1449                }
1450        }
1451        if (rdev->rio_mem == NULL)
1452                DRM_ERROR("Unable to find PCI I/O BAR\n");
1453
1454        if (rdev->flags & RADEON_IS_PX)
1455                radeon_device_handle_px_quirks(rdev);
1456
1457        /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1458        /* this will fail for cards that aren't VGA class devices, just
1459         * ignore it */
1460        vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1461
1462        if (rdev->flags & RADEON_IS_PX)
1463                runtime = true;
1464        if (!pci_is_thunderbolt_attached(rdev->pdev))
1465                vga_switcheroo_register_client(rdev->pdev,
1466                                               &radeon_switcheroo_ops, runtime);
1467        if (runtime)
1468                vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1469
1470        r = radeon_init(rdev);
1471        if (r)
1472                goto failed;
1473
1474        r = radeon_gem_debugfs_init(rdev);
1475        if (r) {
1476                DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1477        }
1478
1479        r = radeon_mst_debugfs_init(rdev);
1480        if (r) {
1481                DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1482        }
1483
1484        if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1485                /* Acceleration not working on AGP card try again
1486                 * with fallback to PCI or PCIE GART
1487                 */
1488                radeon_asic_reset(rdev);
1489                radeon_fini(rdev);
1490                radeon_agp_disable(rdev);
1491                r = radeon_init(rdev);
1492                if (r)
1493                        goto failed;
1494        }
1495
1496        r = radeon_ib_ring_tests(rdev);
1497        if (r)
1498                DRM_ERROR("ib ring test failed (%d).\n", r);
1499
1500        /*
1501         * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1502         * after the CP ring have chew one packet at least. Hence here we stop
1503         * and restart DPM after the radeon_ib_ring_tests().
1504         */
1505        if (rdev->pm.dpm_enabled &&
1506            (rdev->pm.pm_method == PM_METHOD_DPM) &&
1507            (rdev->family == CHIP_TURKS) &&
1508            (rdev->flags & RADEON_IS_MOBILITY)) {
1509                mutex_lock(&rdev->pm.mutex);
1510                radeon_dpm_disable(rdev);
1511                radeon_dpm_enable(rdev);
1512                mutex_unlock(&rdev->pm.mutex);
1513        }
1514
1515        if ((radeon_testing & 1)) {
1516                if (rdev->accel_working)
1517                        radeon_test_moves(rdev);
1518                else
1519                        DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1520        }
1521        if ((radeon_testing & 2)) {
1522                if (rdev->accel_working)
1523                        radeon_test_syncing(rdev);
1524                else
1525                        DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1526        }
1527        if (radeon_benchmarking) {
1528                if (rdev->accel_working)
1529                        radeon_benchmark(rdev, radeon_benchmarking);
1530                else
1531                        DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1532        }
1533        return 0;
1534
1535failed:
1536        /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1537        if (radeon_is_px(ddev))
1538                pm_runtime_put_noidle(ddev->dev);
1539        if (runtime)
1540                vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1541        return r;
1542}
1543
1544/**
1545 * radeon_device_fini - tear down the driver
1546 *
1547 * @rdev: radeon_device pointer
1548 *
1549 * Tear down the driver info (all asics).
1550 * Called at driver shutdown.
1551 */
1552void radeon_device_fini(struct radeon_device *rdev)
1553{
1554        DRM_INFO("radeon: finishing device.\n");
1555        rdev->shutdown = true;
1556        /* evict vram memory */
1557        radeon_bo_evict_vram(rdev);
1558        radeon_fini(rdev);
1559        if (!pci_is_thunderbolt_attached(rdev->pdev))
1560                vga_switcheroo_unregister_client(rdev->pdev);
1561        if (rdev->flags & RADEON_IS_PX)
1562                vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1563        vga_client_register(rdev->pdev, NULL, NULL, NULL);
1564        if (rdev->rio_mem)
1565                pci_iounmap(rdev->pdev, rdev->rio_mem);
1566        rdev->rio_mem = NULL;
1567        iounmap(rdev->rmmio);
1568        rdev->rmmio = NULL;
1569        if (rdev->family >= CHIP_BONAIRE)
1570                radeon_doorbell_fini(rdev);
1571}
1572
1573
1574/*
1575 * Suspend & resume.
1576 */
1577/**
1578 * radeon_suspend_kms - initiate device suspend
1579 *
1580 * @pdev: drm dev pointer
1581 * @state: suspend state
1582 *
1583 * Puts the hw in the suspend state (all asics).
1584 * Returns 0 for success or an error on failure.
1585 * Called at driver suspend.
1586 */
1587int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1588                       bool fbcon, bool freeze)
1589{
1590        struct radeon_device *rdev;
1591        struct drm_crtc *crtc;
1592        struct drm_connector *connector;
1593        int i, r;
1594
1595        if (dev == NULL || dev->dev_private == NULL) {
1596                return -ENODEV;
1597        }
1598
1599        rdev = dev->dev_private;
1600
1601        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1602                return 0;
1603
1604        drm_kms_helper_poll_disable(dev);
1605
1606        drm_modeset_lock_all(dev);
1607        /* turn off display hw */
1608        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1609                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1610        }
1611        drm_modeset_unlock_all(dev);
1612
1613        /* unpin the front buffers and cursors */
1614        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1615                struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1616                struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1617                struct radeon_bo *robj;
1618
1619                if (radeon_crtc->cursor_bo) {
1620                        struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1621                        r = radeon_bo_reserve(robj, false);
1622                        if (r == 0) {
1623                                radeon_bo_unpin(robj);
1624                                radeon_bo_unreserve(robj);
1625                        }
1626                }
1627
1628                if (rfb == NULL || rfb->obj == NULL) {
1629                        continue;
1630                }
1631                robj = gem_to_radeon_bo(rfb->obj);
1632                /* don't unpin kernel fb objects */
1633                if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1634                        r = radeon_bo_reserve(robj, false);
1635                        if (r == 0) {
1636                                radeon_bo_unpin(robj);
1637                                radeon_bo_unreserve(robj);
1638                        }
1639                }
1640        }
1641        /* evict vram memory */
1642        radeon_bo_evict_vram(rdev);
1643
1644        /* wait for gpu to finish processing current batch */
1645        for (i = 0; i < RADEON_NUM_RINGS; i++) {
1646                r = radeon_fence_wait_empty(rdev, i);
1647                if (r) {
1648                        /* delay GPU reset to resume */
1649                        radeon_fence_driver_force_completion(rdev, i);
1650                }
1651        }
1652
1653        radeon_save_bios_scratch_regs(rdev);
1654
1655        radeon_suspend(rdev);
1656        radeon_hpd_fini(rdev);
1657        /* evict remaining vram memory
1658         * This second call to evict vram is to evict the gart page table
1659         * using the CPU.
1660         */
1661        radeon_bo_evict_vram(rdev);
1662
1663        radeon_agp_suspend(rdev);
1664
1665        pci_save_state(dev->pdev);
1666        if (freeze && rdev->family >= CHIP_CEDAR) {
1667                rdev->asic->asic_reset(rdev, true);
1668                pci_restore_state(dev->pdev);
1669        } else if (suspend) {
1670                /* Shut down the device */
1671                pci_disable_device(dev->pdev);
1672                pci_set_power_state(dev->pdev, PCI_D3hot);
1673        }
1674
1675        if (fbcon) {
1676                console_lock();
1677                radeon_fbdev_set_suspend(rdev, 1);
1678                console_unlock();
1679        }
1680        return 0;
1681}
1682
1683/**
1684 * radeon_resume_kms - initiate device resume
1685 *
1686 * @pdev: drm dev pointer
1687 *
1688 * Bring the hw back to operating state (all asics).
1689 * Returns 0 for success or an error on failure.
1690 * Called at driver resume.
1691 */
1692int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1693{
1694        struct drm_connector *connector;
1695        struct radeon_device *rdev = dev->dev_private;
1696        struct drm_crtc *crtc;
1697        int r;
1698
1699        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1700                return 0;
1701
1702        if (fbcon) {
1703                console_lock();
1704        }
1705        if (resume) {
1706                pci_set_power_state(dev->pdev, PCI_D0);
1707                pci_restore_state(dev->pdev);
1708                if (pci_enable_device(dev->pdev)) {
1709                        if (fbcon)
1710                                console_unlock();
1711                        return -1;
1712                }
1713        }
1714        /* resume AGP if in use */
1715        radeon_agp_resume(rdev);
1716        radeon_resume(rdev);
1717
1718        r = radeon_ib_ring_tests(rdev);
1719        if (r)
1720                DRM_ERROR("ib ring test failed (%d).\n", r);
1721
1722        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1723                /* do dpm late init */
1724                r = radeon_pm_late_init(rdev);
1725                if (r) {
1726                        rdev->pm.dpm_enabled = false;
1727                        DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1728                }
1729        } else {
1730                /* resume old pm late */
1731                radeon_pm_resume(rdev);
1732        }
1733
1734        radeon_restore_bios_scratch_regs(rdev);
1735
1736        /* pin cursors */
1737        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1738                struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1739
1740                if (radeon_crtc->cursor_bo) {
1741                        struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1742                        r = radeon_bo_reserve(robj, false);
1743                        if (r == 0) {
1744                                /* Only 27 bit offset for legacy cursor */
1745                                r = radeon_bo_pin_restricted(robj,
1746                                                             RADEON_GEM_DOMAIN_VRAM,
1747                                                             ASIC_IS_AVIVO(rdev) ?
1748                                                             0 : 1 << 27,
1749                                                             &radeon_crtc->cursor_addr);
1750                                if (r != 0)
1751                                        DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1752                                radeon_bo_unreserve(robj);
1753                        }
1754                }
1755        }
1756
1757        /* init dig PHYs, disp eng pll */
1758        if (rdev->is_atom_bios) {
1759                radeon_atom_encoder_init(rdev);
1760                radeon_atom_disp_eng_pll_init(rdev);
1761                /* turn on the BL */
1762                if (rdev->mode_info.bl_encoder) {
1763                        u8 bl_level = radeon_get_backlight_level(rdev,
1764                                                                 rdev->mode_info.bl_encoder);
1765                        radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1766                                                   bl_level);
1767                }
1768        }
1769        /* reset hpd state */
1770        radeon_hpd_init(rdev);
1771        /* blat the mode back in */
1772        if (fbcon) {
1773                drm_helper_resume_force_mode(dev);
1774                /* turn on display hw */
1775                drm_modeset_lock_all(dev);
1776                list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1777                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1778                }
1779                drm_modeset_unlock_all(dev);
1780        }
1781
1782        drm_kms_helper_poll_enable(dev);
1783
1784        /* set the power state here in case we are a PX system or headless */
1785        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1786                radeon_pm_compute_clocks(rdev);
1787
1788        if (fbcon) {
1789                radeon_fbdev_set_suspend(rdev, 0);
1790                console_unlock();
1791        }
1792
1793        return 0;
1794}
1795
1796/**
1797 * radeon_gpu_reset - reset the asic
1798 *
1799 * @rdev: radeon device pointer
1800 *
1801 * Attempt the reset the GPU if it has hung (all asics).
1802 * Returns 0 for success or an error on failure.
1803 */
1804int radeon_gpu_reset(struct radeon_device *rdev)
1805{
1806        unsigned ring_sizes[RADEON_NUM_RINGS];
1807        uint32_t *ring_data[RADEON_NUM_RINGS];
1808
1809        bool saved = false;
1810
1811        int i, r;
1812        int resched;
1813
1814        down_write(&rdev->exclusive_lock);
1815
1816        if (!rdev->needs_reset) {
1817                up_write(&rdev->exclusive_lock);
1818                return 0;
1819        }
1820
1821        atomic_inc(&rdev->gpu_reset_counter);
1822
1823        radeon_save_bios_scratch_regs(rdev);
1824        /* block TTM */
1825        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1826        radeon_suspend(rdev);
1827        radeon_hpd_fini(rdev);
1828
1829        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1830                ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1831                                                   &ring_data[i]);
1832                if (ring_sizes[i]) {
1833                        saved = true;
1834                        dev_info(rdev->dev, "Saved %d dwords of commands "
1835                                 "on ring %d.\n", ring_sizes[i], i);
1836                }
1837        }
1838
1839        r = radeon_asic_reset(rdev);
1840        if (!r) {
1841                dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1842                radeon_resume(rdev);
1843        }
1844
1845        radeon_restore_bios_scratch_regs(rdev);
1846
1847        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1848                if (!r && ring_data[i]) {
1849                        radeon_ring_restore(rdev, &rdev->ring[i],
1850                                            ring_sizes[i], ring_data[i]);
1851                } else {
1852                        radeon_fence_driver_force_completion(rdev, i);
1853                        kfree(ring_data[i]);
1854                }
1855        }
1856
1857        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1858                /* do dpm late init */
1859                r = radeon_pm_late_init(rdev);
1860                if (r) {
1861                        rdev->pm.dpm_enabled = false;
1862                        DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1863                }
1864        } else {
1865                /* resume old pm late */
1866                radeon_pm_resume(rdev);
1867        }
1868
1869        /* init dig PHYs, disp eng pll */
1870        if (rdev->is_atom_bios) {
1871                radeon_atom_encoder_init(rdev);
1872                radeon_atom_disp_eng_pll_init(rdev);
1873                /* turn on the BL */
1874                if (rdev->mode_info.bl_encoder) {
1875                        u8 bl_level = radeon_get_backlight_level(rdev,
1876                                                                 rdev->mode_info.bl_encoder);
1877                        radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1878                                                   bl_level);
1879                }
1880        }
1881        /* reset hpd state */
1882        radeon_hpd_init(rdev);
1883
1884        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1885
1886        rdev->in_reset = true;
1887        rdev->needs_reset = false;
1888
1889        downgrade_write(&rdev->exclusive_lock);
1890
1891        drm_helper_resume_force_mode(rdev->ddev);
1892
1893        /* set the power state here in case we are a PX system or headless */
1894        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1895                radeon_pm_compute_clocks(rdev);
1896
1897        if (!r) {
1898                r = radeon_ib_ring_tests(rdev);
1899                if (r && saved)
1900                        r = -EAGAIN;
1901        } else {
1902                /* bad news, how to tell it to userspace ? */
1903                dev_info(rdev->dev, "GPU reset failed\n");
1904        }
1905
1906        rdev->needs_reset = r == -EAGAIN;
1907        rdev->in_reset = false;
1908
1909        up_read(&rdev->exclusive_lock);
1910        return r;
1911}
1912
1913
1914/*
1915 * Debugfs
1916 */
1917int radeon_debugfs_add_files(struct radeon_device *rdev,
1918                             struct drm_info_list *files,
1919                             unsigned nfiles)
1920{
1921        unsigned i;
1922
1923        for (i = 0; i < rdev->debugfs_count; i++) {
1924                if (rdev->debugfs[i].files == files) {
1925                        /* Already registered */
1926                        return 0;
1927                }
1928        }
1929
1930        i = rdev->debugfs_count + 1;
1931        if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1932                DRM_ERROR("Reached maximum number of debugfs components.\n");
1933                DRM_ERROR("Report so we increase "
1934                          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1935                return -EINVAL;
1936        }
1937        rdev->debugfs[rdev->debugfs_count].files = files;
1938        rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1939        rdev->debugfs_count = i;
1940#if defined(CONFIG_DEBUG_FS)
1941        drm_debugfs_create_files(files, nfiles,
1942                                 rdev->ddev->primary->debugfs_root,
1943                                 rdev->ddev->primary);
1944#endif
1945        return 0;
1946}
1947