linux/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22#include <core/tegra.h>
  23#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  24#include "priv.h"
  25
  26static int
  27nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
  28{
  29        int ret;
  30
  31        if (tdev->vdd) {
  32                ret = regulator_enable(tdev->vdd);
  33                if (ret)
  34                        goto err_power;
  35        }
  36
  37        ret = clk_prepare_enable(tdev->clk);
  38        if (ret)
  39                goto err_clk;
  40        if (tdev->clk_ref) {
  41                ret = clk_prepare_enable(tdev->clk_ref);
  42                if (ret)
  43                        goto err_clk_ref;
  44        }
  45        ret = clk_prepare_enable(tdev->clk_pwr);
  46        if (ret)
  47                goto err_clk_pwr;
  48        clk_set_rate(tdev->clk_pwr, 204000000);
  49        udelay(10);
  50
  51        reset_control_assert(tdev->rst);
  52        udelay(10);
  53
  54        if (!tdev->pdev->dev.pm_domain) {
  55                ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
  56                if (ret)
  57                        goto err_clamp;
  58                udelay(10);
  59        }
  60
  61        reset_control_deassert(tdev->rst);
  62        udelay(10);
  63
  64        return 0;
  65
  66err_clamp:
  67        clk_disable_unprepare(tdev->clk_pwr);
  68err_clk_pwr:
  69        if (tdev->clk_ref)
  70                clk_disable_unprepare(tdev->clk_ref);
  71err_clk_ref:
  72        clk_disable_unprepare(tdev->clk);
  73err_clk:
  74        if (tdev->vdd)
  75                regulator_disable(tdev->vdd);
  76err_power:
  77        return ret;
  78}
  79
  80static int
  81nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
  82{
  83        int ret;
  84
  85        clk_disable_unprepare(tdev->clk_pwr);
  86        if (tdev->clk_ref)
  87                clk_disable_unprepare(tdev->clk_ref);
  88        clk_disable_unprepare(tdev->clk);
  89        udelay(10);
  90
  91        if (tdev->vdd) {
  92                ret = regulator_disable(tdev->vdd);
  93                if (ret)
  94                        return ret;
  95        }
  96
  97        return 0;
  98}
  99
 100static void
 101nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
 102{
 103#if IS_ENABLED(CONFIG_IOMMU_API)
 104        struct device *dev = &tdev->pdev->dev;
 105        unsigned long pgsize_bitmap;
 106        int ret;
 107
 108        if (!tdev->func->iommu_bit)
 109                return;
 110
 111        mutex_init(&tdev->iommu.mutex);
 112
 113        if (iommu_present(&platform_bus_type)) {
 114                tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
 115                if (!tdev->iommu.domain)
 116                        goto error;
 117
 118                /*
 119                 * A IOMMU is only usable if it supports page sizes smaller
 120                 * or equal to the system's PAGE_SIZE, with a preference if
 121                 * both are equal.
 122                 */
 123                pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
 124                if (pgsize_bitmap & PAGE_SIZE) {
 125                        tdev->iommu.pgshift = PAGE_SHIFT;
 126                } else {
 127                        tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
 128                        if (tdev->iommu.pgshift == 0) {
 129                                dev_warn(dev, "unsupported IOMMU page size\n");
 130                                goto free_domain;
 131                        }
 132                        tdev->iommu.pgshift -= 1;
 133                }
 134
 135                ret = iommu_attach_device(tdev->iommu.domain, dev);
 136                if (ret)
 137                        goto free_domain;
 138
 139                ret = nvkm_mm_init(&tdev->iommu.mm, 0,
 140                                   (1ULL << tdev->func->iommu_bit) >>
 141                                   tdev->iommu.pgshift, 1);
 142                if (ret)
 143                        goto detach_device;
 144        }
 145
 146        return;
 147
 148detach_device:
 149        iommu_detach_device(tdev->iommu.domain, dev);
 150
 151free_domain:
 152        iommu_domain_free(tdev->iommu.domain);
 153
 154error:
 155        tdev->iommu.domain = NULL;
 156        tdev->iommu.pgshift = 0;
 157        dev_err(dev, "cannot initialize IOMMU MM\n");
 158#endif
 159}
 160
 161static void
 162nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
 163{
 164#if IS_ENABLED(CONFIG_IOMMU_API)
 165        if (tdev->iommu.domain) {
 166                nvkm_mm_fini(&tdev->iommu.mm);
 167                iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
 168                iommu_domain_free(tdev->iommu.domain);
 169        }
 170#endif
 171}
 172
 173static struct nvkm_device_tegra *
 174nvkm_device_tegra(struct nvkm_device *device)
 175{
 176        return container_of(device, struct nvkm_device_tegra, device);
 177}
 178
 179static struct resource *
 180nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
 181{
 182        struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 183        return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
 184}
 185
 186static resource_size_t
 187nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
 188{
 189        struct resource *res = nvkm_device_tegra_resource(device, bar);
 190        return res ? res->start : 0;
 191}
 192
 193static resource_size_t
 194nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
 195{
 196        struct resource *res = nvkm_device_tegra_resource(device, bar);
 197        return res ? resource_size(res) : 0;
 198}
 199
 200static irqreturn_t
 201nvkm_device_tegra_intr(int irq, void *arg)
 202{
 203        struct nvkm_device_tegra *tdev = arg;
 204        struct nvkm_device *device = &tdev->device;
 205        bool handled = false;
 206        nvkm_mc_intr_unarm(device);
 207        nvkm_mc_intr(device, &handled);
 208        nvkm_mc_intr_rearm(device);
 209        return handled ? IRQ_HANDLED : IRQ_NONE;
 210}
 211
 212static void
 213nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
 214{
 215        struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 216        if (tdev->irq) {
 217                free_irq(tdev->irq, tdev);
 218                tdev->irq = 0;
 219        };
 220}
 221
 222static int
 223nvkm_device_tegra_init(struct nvkm_device *device)
 224{
 225        struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 226        int irq, ret;
 227
 228        irq = platform_get_irq_byname(tdev->pdev, "stall");
 229        if (irq < 0)
 230                return irq;
 231
 232        ret = request_irq(irq, nvkm_device_tegra_intr,
 233                          IRQF_SHARED, "nvkm", tdev);
 234        if (ret)
 235                return ret;
 236
 237        tdev->irq = irq;
 238        return 0;
 239}
 240
 241static void *
 242nvkm_device_tegra_dtor(struct nvkm_device *device)
 243{
 244        struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
 245        nvkm_device_tegra_power_down(tdev);
 246        nvkm_device_tegra_remove_iommu(tdev);
 247        return tdev;
 248}
 249
 250static const struct nvkm_device_func
 251nvkm_device_tegra_func = {
 252        .tegra = nvkm_device_tegra,
 253        .dtor = nvkm_device_tegra_dtor,
 254        .init = nvkm_device_tegra_init,
 255        .fini = nvkm_device_tegra_fini,
 256        .resource_addr = nvkm_device_tegra_resource_addr,
 257        .resource_size = nvkm_device_tegra_resource_size,
 258        .cpu_coherent = false,
 259};
 260
 261int
 262nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 263                      struct platform_device *pdev,
 264                      const char *cfg, const char *dbg,
 265                      bool detect, bool mmio, u64 subdev_mask,
 266                      struct nvkm_device **pdevice)
 267{
 268        struct nvkm_device_tegra *tdev;
 269        int ret;
 270
 271        if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
 272                return -ENOMEM;
 273
 274        tdev->func = func;
 275        tdev->pdev = pdev;
 276
 277        if (func->require_vdd) {
 278                tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
 279                if (IS_ERR(tdev->vdd)) {
 280                        ret = PTR_ERR(tdev->vdd);
 281                        goto free;
 282                }
 283        }
 284
 285        tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
 286        if (IS_ERR(tdev->rst)) {
 287                ret = PTR_ERR(tdev->rst);
 288                goto free;
 289        }
 290
 291        tdev->clk = devm_clk_get(&pdev->dev, "gpu");
 292        if (IS_ERR(tdev->clk)) {
 293                ret = PTR_ERR(tdev->clk);
 294                goto free;
 295        }
 296
 297        if (func->require_ref_clk)
 298                tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
 299        if (IS_ERR(tdev->clk_ref)) {
 300                ret = PTR_ERR(tdev->clk_ref);
 301                goto free;
 302        }
 303
 304        tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
 305        if (IS_ERR(tdev->clk_pwr)) {
 306                ret = PTR_ERR(tdev->clk_pwr);
 307                goto free;
 308        }
 309
 310        /**
 311         * The IOMMU bit defines the upper limit of the GPU-addressable space.
 312         * This will be refined in nouveau_ttm_init but we need to do it early
 313         * for instmem to behave properly
 314         */
 315        ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 316        if (ret)
 317                goto free;
 318
 319        nvkm_device_tegra_probe_iommu(tdev);
 320
 321        ret = nvkm_device_tegra_power_up(tdev);
 322        if (ret)
 323                goto remove;
 324
 325        tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
 326        tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
 327        ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
 328                               NVKM_DEVICE_TEGRA, pdev->id, NULL,
 329                               cfg, dbg, detect, mmio, subdev_mask,
 330                               &tdev->device);
 331        if (ret)
 332                goto powerdown;
 333
 334        *pdevice = &tdev->device;
 335
 336        return 0;
 337
 338powerdown:
 339        nvkm_device_tegra_power_down(tdev);
 340remove:
 341        nvkm_device_tegra_remove_iommu(tdev);
 342free:
 343        kfree(tdev);
 344        return ret;
 345}
 346#else
 347int
 348nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 349                      struct platform_device *pdev,
 350                      const char *cfg, const char *dbg,
 351                      bool detect, bool mmio, u64 subdev_mask,
 352                      struct nvkm_device **pdevice)
 353{
 354        return -ENOSYS;
 355}
 356#endif
 357