linux/drivers/gpu/drm/msm/adreno/adreno_gpu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 *
   6 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
   7 */
   8
   9#include <linux/ascii85.h>
  10#include <linux/interconnect.h>
  11#include <linux/qcom_scm.h>
  12#include <linux/kernel.h>
  13#include <linux/of_address.h>
  14#include <linux/pm_opp.h>
  15#include <linux/slab.h>
  16#include <linux/soc/qcom/mdt_loader.h>
  17#include <soc/qcom/ocmem.h>
  18#include "adreno_gpu.h"
  19#include "a6xx_gpu.h"
  20#include "msm_gem.h"
  21#include "msm_mmu.h"
  22
  23static bool zap_available = true;
  24
  25static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
  26                u32 pasid)
  27{
  28        struct device *dev = &gpu->pdev->dev;
  29        const struct firmware *fw;
  30        const char *signed_fwname = NULL;
  31        struct device_node *np, *mem_np;
  32        struct resource r;
  33        phys_addr_t mem_phys;
  34        ssize_t mem_size;
  35        void *mem_region = NULL;
  36        int ret;
  37
  38        if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
  39                zap_available = false;
  40                return -EINVAL;
  41        }
  42
  43        np = of_get_child_by_name(dev->of_node, "zap-shader");
  44        if (!np) {
  45                zap_available = false;
  46                return -ENODEV;
  47        }
  48
  49        mem_np = of_parse_phandle(np, "memory-region", 0);
  50        of_node_put(np);
  51        if (!mem_np) {
  52                zap_available = false;
  53                return -EINVAL;
  54        }
  55
  56        ret = of_address_to_resource(mem_np, 0, &r);
  57        of_node_put(mem_np);
  58        if (ret)
  59                return ret;
  60
  61        mem_phys = r.start;
  62
  63        /*
  64         * Check for a firmware-name property.  This is the new scheme
  65         * to handle firmware that may be signed with device specific
  66         * keys, allowing us to have a different zap fw path for different
  67         * devices.
  68         *
  69         * If the firmware-name property is found, we bypass the
  70         * adreno_request_fw() mechanism, because we don't need to handle
  71         * the /lib/firmware/qcom/... vs /lib/firmware/... case.
  72         *
  73         * If the firmware-name property is not found, for backwards
  74         * compatibility we fall back to the fwname from the gpulist
  75         * table.
  76         */
  77        of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
  78        if (signed_fwname) {
  79                fwname = signed_fwname;
  80                ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
  81                if (ret)
  82                        fw = ERR_PTR(ret);
  83        } else if (fwname) {
  84                /* Request the MDT file from the default location: */
  85                fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
  86        } else {
  87                /*
  88                 * For new targets, we require the firmware-name property,
  89                 * if a zap-shader is required, rather than falling back
  90                 * to a firmware name specified in gpulist.
  91                 *
  92                 * Because the firmware is signed with a (potentially)
  93                 * device specific key, having the name come from gpulist
  94                 * was a bad idea, and is only provided for backwards
  95                 * compatibility for older targets.
  96                 */
  97                return -ENODEV;
  98        }
  99
 100        if (IS_ERR(fw)) {
 101                DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
 102                return PTR_ERR(fw);
 103        }
 104
 105        /* Figure out how much memory we need */
 106        mem_size = qcom_mdt_get_size(fw);
 107        if (mem_size < 0) {
 108                ret = mem_size;
 109                goto out;
 110        }
 111
 112        if (mem_size > resource_size(&r)) {
 113                DRM_DEV_ERROR(dev,
 114                        "memory region is too small to load the MDT\n");
 115                ret = -E2BIG;
 116                goto out;
 117        }
 118
 119        /* Allocate memory for the firmware image */
 120        mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
 121        if (!mem_region) {
 122                ret = -ENOMEM;
 123                goto out;
 124        }
 125
 126        /*
 127         * Load the rest of the MDT
 128         *
 129         * Note that we could be dealing with two different paths, since
 130         * with upstream linux-firmware it would be in a qcom/ subdir..
 131         * adreno_request_fw() handles this, but qcom_mdt_load() does
 132         * not.  But since we've already gotten through adreno_request_fw()
 133         * we know which of the two cases it is:
 134         */
 135        if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
 136                ret = qcom_mdt_load(dev, fw, fwname, pasid,
 137                                mem_region, mem_phys, mem_size, NULL);
 138        } else {
 139                char *newname;
 140
 141                newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
 142
 143                ret = qcom_mdt_load(dev, fw, newname, pasid,
 144                                mem_region, mem_phys, mem_size, NULL);
 145                kfree(newname);
 146        }
 147        if (ret)
 148                goto out;
 149
 150        /* Send the image to the secure world */
 151        ret = qcom_scm_pas_auth_and_reset(pasid);
 152
 153        /*
 154         * If the scm call returns -EOPNOTSUPP we assume that this target
 155         * doesn't need/support the zap shader so quietly fail
 156         */
 157        if (ret == -EOPNOTSUPP)
 158                zap_available = false;
 159        else if (ret)
 160                DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 161
 162out:
 163        if (mem_region)
 164                memunmap(mem_region);
 165
 166        release_firmware(fw);
 167
 168        return ret;
 169}
 170
 171int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
 172{
 173        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 174        struct platform_device *pdev = gpu->pdev;
 175
 176        /* Short cut if we determine the zap shader isn't available/needed */
 177        if (!zap_available)
 178                return -ENODEV;
 179
 180        /* We need SCM to be able to load the firmware */
 181        if (!qcom_scm_is_available()) {
 182                DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
 183                return -EPROBE_DEFER;
 184        }
 185
 186        return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
 187}
 188
 189void adreno_set_llc_attributes(struct iommu_domain *iommu)
 190{
 191        iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
 192}
 193
 194struct msm_gem_address_space *
 195adreno_iommu_create_address_space(struct msm_gpu *gpu,
 196                struct platform_device *pdev)
 197{
 198        struct iommu_domain *iommu;
 199        struct msm_mmu *mmu;
 200        struct msm_gem_address_space *aspace;
 201        u64 start, size;
 202
 203        iommu = iommu_domain_alloc(&platform_bus_type);
 204        if (!iommu)
 205                return NULL;
 206
 207        mmu = msm_iommu_new(&pdev->dev, iommu);
 208        if (IS_ERR(mmu)) {
 209                iommu_domain_free(iommu);
 210                return ERR_CAST(mmu);
 211        }
 212
 213        /*
 214         * Use the aperture start or SZ_16M, whichever is greater. This will
 215         * ensure that we align with the allocated pagetable range while still
 216         * allowing room in the lower 32 bits for GMEM and whatnot
 217         */
 218        start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
 219        size = iommu->geometry.aperture_end - start + 1;
 220
 221        aspace = msm_gem_address_space_create(mmu, "gpu",
 222                start & GENMASK_ULL(48, 0), size);
 223
 224        if (IS_ERR(aspace) && !IS_ERR(mmu))
 225                mmu->funcs->destroy(mmu);
 226
 227        return aspace;
 228}
 229
 230int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 231{
 232        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 233
 234        switch (param) {
 235        case MSM_PARAM_GPU_ID:
 236                *value = adreno_gpu->info->revn;
 237                return 0;
 238        case MSM_PARAM_GMEM_SIZE:
 239                *value = adreno_gpu->gmem;
 240                return 0;
 241        case MSM_PARAM_GMEM_BASE:
 242                *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
 243                return 0;
 244        case MSM_PARAM_CHIP_ID:
 245                *value = adreno_gpu->rev.patchid |
 246                                (adreno_gpu->rev.minor << 8) |
 247                                (adreno_gpu->rev.major << 16) |
 248                                (adreno_gpu->rev.core << 24);
 249                return 0;
 250        case MSM_PARAM_MAX_FREQ:
 251                *value = adreno_gpu->base.fast_rate;
 252                return 0;
 253        case MSM_PARAM_TIMESTAMP:
 254                if (adreno_gpu->funcs->get_timestamp) {
 255                        int ret;
 256
 257                        pm_runtime_get_sync(&gpu->pdev->dev);
 258                        ret = adreno_gpu->funcs->get_timestamp(gpu, value);
 259                        pm_runtime_put_autosuspend(&gpu->pdev->dev);
 260
 261                        return ret;
 262                }
 263                return -EINVAL;
 264        case MSM_PARAM_PRIORITIES:
 265                *value = gpu->nr_rings * NR_SCHED_PRIORITIES;
 266                return 0;
 267        case MSM_PARAM_PP_PGTABLE:
 268                *value = 0;
 269                return 0;
 270        case MSM_PARAM_FAULTS:
 271                *value = gpu->global_faults;
 272                return 0;
 273        case MSM_PARAM_SUSPENDS:
 274                *value = gpu->suspend_count;
 275                return 0;
 276        default:
 277                DBG("%s: invalid param: %u", gpu->name, param);
 278                return -EINVAL;
 279        }
 280}
 281
 282const struct firmware *
 283adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 284{
 285        struct drm_device *drm = adreno_gpu->base.dev;
 286        const struct firmware *fw = NULL;
 287        char *newname;
 288        int ret;
 289
 290        newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
 291        if (!newname)
 292                return ERR_PTR(-ENOMEM);
 293
 294        /*
 295         * Try first to load from qcom/$fwfile using a direct load (to avoid
 296         * a potential timeout waiting for usermode helper)
 297         */
 298        if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 299            (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
 300
 301                ret = request_firmware_direct(&fw, newname, drm->dev);
 302                if (!ret) {
 303                        DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
 304                                newname);
 305                        adreno_gpu->fwloc = FW_LOCATION_NEW;
 306                        goto out;
 307                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 308                        DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 309                                newname, ret);
 310                        fw = ERR_PTR(ret);
 311                        goto out;
 312                }
 313        }
 314
 315        /*
 316         * Then try the legacy location without qcom/ prefix
 317         */
 318        if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 319            (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
 320
 321                ret = request_firmware_direct(&fw, fwname, drm->dev);
 322                if (!ret) {
 323                        DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
 324                                newname);
 325                        adreno_gpu->fwloc = FW_LOCATION_LEGACY;
 326                        goto out;
 327                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 328                        DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 329                                fwname, ret);
 330                        fw = ERR_PTR(ret);
 331                        goto out;
 332                }
 333        }
 334
 335        /*
 336         * Finally fall back to request_firmware() for cases where the
 337         * usermode helper is needed (I think mainly android)
 338         */
 339        if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
 340            (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
 341
 342                ret = request_firmware(&fw, newname, drm->dev);
 343                if (!ret) {
 344                        DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
 345                                newname);
 346                        adreno_gpu->fwloc = FW_LOCATION_HELPER;
 347                        goto out;
 348                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
 349                        DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 350                                newname, ret);
 351                        fw = ERR_PTR(ret);
 352                        goto out;
 353                }
 354        }
 355
 356        DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
 357        fw = ERR_PTR(-ENOENT);
 358out:
 359        kfree(newname);
 360        return fw;
 361}
 362
 363int adreno_load_fw(struct adreno_gpu *adreno_gpu)
 364{
 365        int i;
 366
 367        for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
 368                const struct firmware *fw;
 369
 370                if (!adreno_gpu->info->fw[i])
 371                        continue;
 372
 373                /* Skip if the firmware has already been loaded */
 374                if (adreno_gpu->fw[i])
 375                        continue;
 376
 377                fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
 378                if (IS_ERR(fw))
 379                        return PTR_ERR(fw);
 380
 381                adreno_gpu->fw[i] = fw;
 382        }
 383
 384        return 0;
 385}
 386
 387struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
 388                const struct firmware *fw, u64 *iova)
 389{
 390        struct drm_gem_object *bo;
 391        void *ptr;
 392
 393        ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
 394                MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
 395
 396        if (IS_ERR(ptr))
 397                return ERR_CAST(ptr);
 398
 399        memcpy(ptr, &fw->data[4], fw->size - 4);
 400
 401        msm_gem_put_vaddr(bo);
 402
 403        return bo;
 404}
 405
 406int adreno_hw_init(struct msm_gpu *gpu)
 407{
 408        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 409        int ret, i;
 410
 411        VERB("%s", gpu->name);
 412
 413        ret = adreno_load_fw(adreno_gpu);
 414        if (ret)
 415                return ret;
 416
 417        for (i = 0; i < gpu->nr_rings; i++) {
 418                struct msm_ringbuffer *ring = gpu->rb[i];
 419
 420                if (!ring)
 421                        continue;
 422
 423                ring->cur = ring->start;
 424                ring->next = ring->start;
 425
 426                /* reset completed fence seqno: */
 427                ring->memptrs->fence = ring->fctx->completed_fence;
 428                ring->memptrs->rptr = 0;
 429        }
 430
 431        return 0;
 432}
 433
 434/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
 435static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
 436                struct msm_ringbuffer *ring)
 437{
 438        struct msm_gpu *gpu = &adreno_gpu->base;
 439
 440        return gpu->funcs->get_rptr(gpu, ring);
 441}
 442
 443struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
 444{
 445        return gpu->rb[0];
 446}
 447
 448void adreno_recover(struct msm_gpu *gpu)
 449{
 450        struct drm_device *dev = gpu->dev;
 451        int ret;
 452
 453        // XXX pm-runtime??  we *need* the device to be off after this
 454        // so maybe continuing to call ->pm_suspend/resume() is better?
 455
 456        gpu->funcs->pm_suspend(gpu);
 457        gpu->funcs->pm_resume(gpu);
 458
 459        ret = msm_gpu_hw_init(gpu);
 460        if (ret) {
 461                DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
 462                /* hmm, oh well? */
 463        }
 464}
 465
 466void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
 467{
 468        uint32_t wptr;
 469
 470        /* Copy the shadow to the actual register */
 471        ring->cur = ring->next;
 472
 473        /*
 474         * Mask wptr value that we calculate to fit in the HW range. This is
 475         * to account for the possibility that the last command fit exactly into
 476         * the ringbuffer and rb->next hasn't wrapped to zero yet
 477         */
 478        wptr = get_wptr(ring);
 479
 480        /* ensure writes to ringbuffer have hit system memory: */
 481        mb();
 482
 483        gpu_write(gpu, reg, wptr);
 484}
 485
 486bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 487{
 488        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 489        uint32_t wptr = get_wptr(ring);
 490
 491        /* wait for CP to drain ringbuffer: */
 492        if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
 493                return true;
 494
 495        /* TODO maybe we need to reset GPU here to recover from hang? */
 496        DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
 497                gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
 498
 499        return false;
 500}
 501
 502int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
 503{
 504        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 505        int i, count = 0;
 506
 507        kref_init(&state->ref);
 508
 509        ktime_get_real_ts64(&state->time);
 510
 511        for (i = 0; i < gpu->nr_rings; i++) {
 512                int size = 0, j;
 513
 514                state->ring[i].fence = gpu->rb[i]->memptrs->fence;
 515                state->ring[i].iova = gpu->rb[i]->iova;
 516                state->ring[i].seqno = gpu->rb[i]->seqno;
 517                state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
 518                state->ring[i].wptr = get_wptr(gpu->rb[i]);
 519
 520                /* Copy at least 'wptr' dwords of the data */
 521                size = state->ring[i].wptr;
 522
 523                /* After wptr find the last non zero dword to save space */
 524                for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
 525                        if (gpu->rb[i]->start[j])
 526                                size = j + 1;
 527
 528                if (size) {
 529                        state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
 530                        if (state->ring[i].data) {
 531                                memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
 532                                state->ring[i].data_size = size << 2;
 533                        }
 534                }
 535        }
 536
 537        /* Some targets prefer to collect their own registers */
 538        if (!adreno_gpu->registers)
 539                return 0;
 540
 541        /* Count the number of registers */
 542        for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
 543                count += adreno_gpu->registers[i + 1] -
 544                        adreno_gpu->registers[i] + 1;
 545
 546        state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
 547        if (state->registers) {
 548                int pos = 0;
 549
 550                for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
 551                        u32 start = adreno_gpu->registers[i];
 552                        u32 end   = adreno_gpu->registers[i + 1];
 553                        u32 addr;
 554
 555                        for (addr = start; addr <= end; addr++) {
 556                                state->registers[pos++] = addr;
 557                                state->registers[pos++] = gpu_read(gpu, addr);
 558                        }
 559                }
 560
 561                state->nr_registers = count;
 562        }
 563
 564        return 0;
 565}
 566
 567void adreno_gpu_state_destroy(struct msm_gpu_state *state)
 568{
 569        int i;
 570
 571        for (i = 0; i < ARRAY_SIZE(state->ring); i++)
 572                kvfree(state->ring[i].data);
 573
 574        for (i = 0; state->bos && i < state->nr_bos; i++)
 575                kvfree(state->bos[i].data);
 576
 577        kfree(state->bos);
 578        kfree(state->comm);
 579        kfree(state->cmd);
 580        kfree(state->registers);
 581}
 582
 583static void adreno_gpu_state_kref_destroy(struct kref *kref)
 584{
 585        struct msm_gpu_state *state = container_of(kref,
 586                struct msm_gpu_state, ref);
 587
 588        adreno_gpu_state_destroy(state);
 589        kfree(state);
 590}
 591
 592int adreno_gpu_state_put(struct msm_gpu_state *state)
 593{
 594        if (IS_ERR_OR_NULL(state))
 595                return 1;
 596
 597        return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
 598}
 599
 600#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 601
 602static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
 603{
 604        void *buf;
 605        size_t buf_itr = 0, buffer_size;
 606        char out[ASCII85_BUFSZ];
 607        long l;
 608        int i;
 609
 610        if (!src || !len)
 611                return NULL;
 612
 613        l = ascii85_encode_len(len);
 614
 615        /*
 616         * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
 617         * account for the worst case of 5 bytes per dword plus the 1 for '\0'
 618         */
 619        buffer_size = (l * 5) + 1;
 620
 621        buf = kvmalloc(buffer_size, GFP_KERNEL);
 622        if (!buf)
 623                return NULL;
 624
 625        for (i = 0; i < l; i++)
 626                buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
 627                                ascii85_encode(src[i], out));
 628
 629        return buf;
 630}
 631
 632/* len is expected to be in bytes */
 633static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
 634                bool *encoded)
 635{
 636        if (!*ptr || !len)
 637                return;
 638
 639        if (!*encoded) {
 640                long datalen, i;
 641                u32 *buf = *ptr;
 642
 643                /*
 644                 * Only dump the non-zero part of the buffer - rarely will
 645                 * any data completely fill the entire allocated size of
 646                 * the buffer.
 647                 */
 648                for (datalen = 0, i = 0; i < len >> 2; i++)
 649                        if (buf[i])
 650                                datalen = ((i + 1) << 2);
 651
 652                /*
 653                 * If we reach here, then the originally captured binary buffer
 654                 * will be replaced with the ascii85 encoded string
 655                 */
 656                *ptr = adreno_gpu_ascii85_encode(buf, datalen);
 657
 658                kvfree(buf);
 659
 660                *encoded = true;
 661        }
 662
 663        if (!*ptr)
 664                return;
 665
 666        drm_puts(p, "    data: !!ascii85 |\n");
 667        drm_puts(p, "     ");
 668
 669        drm_puts(p, *ptr);
 670
 671        drm_puts(p, "\n");
 672}
 673
 674void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
 675                struct drm_printer *p)
 676{
 677        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 678        int i;
 679
 680        if (IS_ERR_OR_NULL(state))
 681                return;
 682
 683        drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
 684                        adreno_gpu->info->revn, adreno_gpu->rev.core,
 685                        adreno_gpu->rev.major, adreno_gpu->rev.minor,
 686                        adreno_gpu->rev.patchid);
 687        /*
 688         * If this is state collected due to iova fault, so fault related info
 689         *
 690         * TTBR0 would not be zero, so this is a good way to distinguish
 691         */
 692        if (state->fault_info.ttbr0) {
 693                const struct msm_gpu_fault_info *info = &state->fault_info;
 694
 695                drm_puts(p, "fault-info:\n");
 696                drm_printf(p, "  - ttbr0=%.16llx\n", info->ttbr0);
 697                drm_printf(p, "  - iova=%.16lx\n", info->iova);
 698                drm_printf(p, "  - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
 699                drm_printf(p, "  - type=%s\n", info->type);
 700                drm_printf(p, "  - source=%s\n", info->block);
 701        }
 702
 703        drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
 704
 705        drm_puts(p, "ringbuffer:\n");
 706
 707        for (i = 0; i < gpu->nr_rings; i++) {
 708                drm_printf(p, "  - id: %d\n", i);
 709                drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
 710                drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
 711                drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
 712                drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
 713                drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
 714                drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
 715
 716                adreno_show_object(p, &state->ring[i].data,
 717                        state->ring[i].data_size, &state->ring[i].encoded);
 718        }
 719
 720        if (state->bos) {
 721                drm_puts(p, "bos:\n");
 722
 723                for (i = 0; i < state->nr_bos; i++) {
 724                        drm_printf(p, "  - iova: 0x%016llx\n",
 725                                state->bos[i].iova);
 726                        drm_printf(p, "    size: %zd\n", state->bos[i].size);
 727
 728                        adreno_show_object(p, &state->bos[i].data,
 729                                state->bos[i].size, &state->bos[i].encoded);
 730                }
 731        }
 732
 733        if (state->nr_registers) {
 734                drm_puts(p, "registers:\n");
 735
 736                for (i = 0; i < state->nr_registers; i++) {
 737                        drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
 738                                state->registers[i * 2] << 2,
 739                                state->registers[(i * 2) + 1]);
 740                }
 741        }
 742}
 743#endif
 744
 745/* Dump common gpu status and scratch registers on any hang, to make
 746 * the hangcheck logs more useful.  The scratch registers seem always
 747 * safe to read when GPU has hung (unlike some other regs, depending
 748 * on how the GPU hung), and they are useful to match up to cmdstream
 749 * dumps when debugging hangs:
 750 */
 751void adreno_dump_info(struct msm_gpu *gpu)
 752{
 753        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 754        int i;
 755
 756        printk("revision: %d (%d.%d.%d.%d)\n",
 757                        adreno_gpu->info->revn, adreno_gpu->rev.core,
 758                        adreno_gpu->rev.major, adreno_gpu->rev.minor,
 759                        adreno_gpu->rev.patchid);
 760
 761        for (i = 0; i < gpu->nr_rings; i++) {
 762                struct msm_ringbuffer *ring = gpu->rb[i];
 763
 764                printk("rb %d: fence:    %d/%d\n", i,
 765                        ring->memptrs->fence,
 766                        ring->seqno);
 767
 768                printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
 769                printk("rb wptr:  %d\n", get_wptr(ring));
 770        }
 771}
 772
 773/* would be nice to not have to duplicate the _show() stuff with printk(): */
 774void adreno_dump(struct msm_gpu *gpu)
 775{
 776        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 777        int i;
 778
 779        if (!adreno_gpu->registers)
 780                return;
 781
 782        /* dump these out in a form that can be parsed by demsm: */
 783        printk("IO:region %s 00000000 00020000\n", gpu->name);
 784        for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
 785                uint32_t start = adreno_gpu->registers[i];
 786                uint32_t end   = adreno_gpu->registers[i+1];
 787                uint32_t addr;
 788
 789                for (addr = start; addr <= end; addr++) {
 790                        uint32_t val = gpu_read(gpu, addr);
 791                        printk("IO:R %08x %08x\n", addr<<2, val);
 792                }
 793        }
 794}
 795
 796static uint32_t ring_freewords(struct msm_ringbuffer *ring)
 797{
 798        struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
 799        uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
 800        /* Use ring->next to calculate free size */
 801        uint32_t wptr = ring->next - ring->start;
 802        uint32_t rptr = get_rptr(adreno_gpu, ring);
 803        return (rptr + (size - 1) - wptr) % size;
 804}
 805
 806void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
 807{
 808        if (spin_until(ring_freewords(ring) >= ndwords))
 809                DRM_DEV_ERROR(ring->gpu->dev->dev,
 810                        "timeout waiting for space in ringbuffer %d\n",
 811                        ring->id);
 812}
 813
 814/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
 815static int adreno_get_legacy_pwrlevels(struct device *dev)
 816{
 817        struct device_node *child, *node;
 818        int ret;
 819
 820        node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
 821        if (!node) {
 822                DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
 823                return -ENXIO;
 824        }
 825
 826        for_each_child_of_node(node, child) {
 827                unsigned int val;
 828
 829                ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
 830                if (ret)
 831                        continue;
 832
 833                /*
 834                 * Skip the intentionally bogus clock value found at the bottom
 835                 * of most legacy frequency tables
 836                 */
 837                if (val != 27000000)
 838                        dev_pm_opp_add(dev, val, 0);
 839        }
 840
 841        of_node_put(node);
 842
 843        return 0;
 844}
 845
 846static void adreno_get_pwrlevels(struct device *dev,
 847                struct msm_gpu *gpu)
 848{
 849        unsigned long freq = ULONG_MAX;
 850        struct dev_pm_opp *opp;
 851        int ret;
 852
 853        gpu->fast_rate = 0;
 854
 855        /* You down with OPP? */
 856        if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
 857                ret = adreno_get_legacy_pwrlevels(dev);
 858        else {
 859                ret = devm_pm_opp_of_add_table(dev);
 860                if (ret)
 861                        DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
 862        }
 863
 864        if (!ret) {
 865                /* Find the fastest defined rate */
 866                opp = dev_pm_opp_find_freq_floor(dev, &freq);
 867                if (!IS_ERR(opp)) {
 868                        gpu->fast_rate = freq;
 869                        dev_pm_opp_put(opp);
 870                }
 871        }
 872
 873        if (!gpu->fast_rate) {
 874                dev_warn(dev,
 875                        "Could not find a clock rate. Using a reasonable default\n");
 876                /* Pick a suitably safe clock speed for any target */
 877                gpu->fast_rate = 200000000;
 878        }
 879
 880        DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
 881}
 882
 883int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
 884                          struct adreno_ocmem *adreno_ocmem)
 885{
 886        struct ocmem_buf *ocmem_hdl;
 887        struct ocmem *ocmem;
 888
 889        ocmem = of_get_ocmem(dev);
 890        if (IS_ERR(ocmem)) {
 891                if (PTR_ERR(ocmem) == -ENODEV) {
 892                        /*
 893                         * Return success since either the ocmem property was
 894                         * not specified in device tree, or ocmem support is
 895                         * not compiled into the kernel.
 896                         */
 897                        return 0;
 898                }
 899
 900                return PTR_ERR(ocmem);
 901        }
 902
 903        ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
 904        if (IS_ERR(ocmem_hdl))
 905                return PTR_ERR(ocmem_hdl);
 906
 907        adreno_ocmem->ocmem = ocmem;
 908        adreno_ocmem->base = ocmem_hdl->addr;
 909        adreno_ocmem->hdl = ocmem_hdl;
 910        adreno_gpu->gmem = ocmem_hdl->len;
 911
 912        return 0;
 913}
 914
 915void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
 916{
 917        if (adreno_ocmem && adreno_ocmem->base)
 918                ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
 919                           adreno_ocmem->hdl);
 920}
 921
 922int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 923                struct adreno_gpu *adreno_gpu,
 924                const struct adreno_gpu_funcs *funcs, int nr_rings)
 925{
 926        struct device *dev = &pdev->dev;
 927        struct adreno_platform_config *config = dev->platform_data;
 928        struct msm_gpu_config adreno_gpu_config  = { 0 };
 929        struct msm_gpu *gpu = &adreno_gpu->base;
 930
 931        adreno_gpu->funcs = funcs;
 932        adreno_gpu->info = adreno_info(config->rev);
 933        adreno_gpu->gmem = adreno_gpu->info->gmem;
 934        adreno_gpu->revn = adreno_gpu->info->revn;
 935        adreno_gpu->rev = config->rev;
 936
 937        adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
 938
 939        adreno_gpu_config.nr_rings = nr_rings;
 940
 941        adreno_get_pwrlevels(dev, gpu);
 942
 943        pm_runtime_set_autosuspend_delay(dev,
 944                adreno_gpu->info->inactive_period);
 945        pm_runtime_use_autosuspend(dev);
 946        pm_runtime_enable(dev);
 947
 948        return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
 949                        adreno_gpu->info->name, &adreno_gpu_config);
 950}
 951
 952void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 953{
 954        struct msm_gpu *gpu = &adreno_gpu->base;
 955        struct msm_drm_private *priv = gpu->dev->dev_private;
 956        unsigned int i;
 957
 958        for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
 959                release_firmware(adreno_gpu->fw[i]);
 960
 961        pm_runtime_disable(&priv->gpu_pdev->dev);
 962
 963        msm_gpu_cleanup(&adreno_gpu->base);
 964}
 965