linux/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22#include "priv.h"
  23
  24#include <core/gpuobj.h>
  25#include <core/memory.h>
  26#include <subdev/timer.h>
  27
  28void
  29nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
  30                         u32 size, u16 tag, u8 port, bool secure)
  31{
  32        u8 rem = size % 4;
  33        u32 reg;
  34        int i;
  35
  36        size -= rem;
  37
  38        reg = start | BIT(24) | (secure ? BIT(28) : 0);
  39        nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
  40        for (i = 0; i < size / 4; i++) {
  41                /* write new tag every 256B */
  42                if ((i & 0x3f) == 0)
  43                        nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
  44                nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
  45        }
  46
  47        /*
  48         * If size is not a multiple of 4, mask the last work to ensure garbage
  49         * does not get written
  50         */
  51        if (rem) {
  52                u32 extra = ((u32 *)data)[i];
  53
  54                /* write new tag every 256B */
  55                if ((i & 0x3f) == 0)
  56                        nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
  57                nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
  58                                 extra & (BIT(rem * 8) - 1));
  59                ++i;
  60        }
  61
  62        /* code must be padded to 0x40 words */
  63        for (; i & 0x3f; i++)
  64                nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
  65}
  66
  67static void
  68nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
  69                         u32 size, u8 port)
  70{
  71        u8 rem = size % 4;
  72        int i;
  73
  74        size -= rem;
  75
  76        nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
  77        for (i = 0; i < size / 4; i++)
  78                nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
  79
  80        /*
  81         * If size is not a multiple of 4, mask the last word to ensure garbage
  82         * does not get written
  83         */
  84        if (rem) {
  85                u32 extra = ((u32 *)data)[i];
  86
  87                nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
  88                                 extra & (BIT(rem * 8) - 1));
  89        }
  90}
  91
  92void
  93nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
  94                         u32 size, u8 port)
  95{
  96        const struct nvkm_falcon_func *func = falcon->func;
  97        u8 rem = size % 4;
  98        int i;
  99
 100        if (func->emem_addr && start >= func->emem_addr)
 101                return nvkm_falcon_v1_load_emem(falcon, data,
 102                                                start - func->emem_addr, size,
 103                                                port);
 104
 105        size -= rem;
 106
 107        nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
 108        for (i = 0; i < size / 4; i++)
 109                nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
 110
 111        /*
 112         * If size is not a multiple of 4, mask the last word to ensure garbage
 113         * does not get written
 114         */
 115        if (rem) {
 116                u32 extra = ((u32 *)data)[i];
 117
 118                nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
 119                                 extra & (BIT(rem * 8) - 1));
 120        }
 121}
 122
 123static void
 124nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
 125                         u8 port, void *data)
 126{
 127        u8 rem = size % 4;
 128        int i;
 129
 130        size -= rem;
 131
 132        nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
 133        for (i = 0; i < size / 4; i++)
 134                ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
 135
 136        /*
 137         * If size is not a multiple of 4, mask the last word to ensure garbage
 138         * does not get read
 139         */
 140        if (rem) {
 141                u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
 142
 143                for (i = size; i < size + rem; i++) {
 144                        ((u8 *)data)[i] = (u8)(extra & 0xff);
 145                        extra >>= 8;
 146                }
 147        }
 148}
 149
 150void
 151nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
 152                         u8 port, void *data)
 153{
 154        const struct nvkm_falcon_func *func = falcon->func;
 155        u8 rem = size % 4;
 156        int i;
 157
 158        if (func->emem_addr && start >= func->emem_addr)
 159                return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
 160                                                size, port, data);
 161
 162        size -= rem;
 163
 164        nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
 165        for (i = 0; i < size / 4; i++)
 166                ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
 167
 168        /*
 169         * If size is not a multiple of 4, mask the last word to ensure garbage
 170         * does not get read
 171         */
 172        if (rem) {
 173                u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
 174
 175                for (i = size; i < size + rem; i++) {
 176                        ((u8 *)data)[i] = (u8)(extra & 0xff);
 177                        extra >>= 8;
 178                }
 179        }
 180}
 181
 182void
 183nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
 184{
 185        const u32 fbif = falcon->func->fbif;
 186        u32 inst_loc;
 187
 188        /* disable instance block binding */
 189        if (ctx == NULL) {
 190                nvkm_falcon_wr32(falcon, 0x10c, 0x0);
 191                return;
 192        }
 193
 194        nvkm_falcon_wr32(falcon, 0x10c, 0x1);
 195
 196        /* setup apertures - virtual */
 197        nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
 198        nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
 199        /* setup apertures - physical */
 200        nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
 201        nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
 202        nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
 203
 204        /* Set context */
 205        switch (nvkm_memory_target(ctx)) {
 206        case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
 207        case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
 208        case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
 209        default:
 210                WARN_ON(1);
 211                return;
 212        }
 213
 214        /* Enable context */
 215        nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
 216        nvkm_falcon_wr32(falcon, 0x054,
 217                         ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
 218                         (inst_loc << 28) | (1 << 30));
 219
 220        nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
 221        nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
 222}
 223
 224void
 225nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
 226{
 227        nvkm_falcon_wr32(falcon, 0x104, start_addr);
 228}
 229
 230void
 231nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
 232{
 233        u32 reg = nvkm_falcon_rd32(falcon, 0x100);
 234
 235        if (reg & BIT(6))
 236                nvkm_falcon_wr32(falcon, 0x130, 0x2);
 237        else
 238                nvkm_falcon_wr32(falcon, 0x100, 0x2);
 239}
 240
 241int
 242nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
 243{
 244        struct nvkm_device *device = falcon->owner->device;
 245        int ret;
 246
 247        ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
 248        if (ret < 0)
 249                return ret;
 250
 251        return 0;
 252}
 253
 254int
 255nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
 256{
 257        struct nvkm_device *device = falcon->owner->device;
 258        int ret;
 259
 260        /* clear interrupt(s) */
 261        nvkm_falcon_mask(falcon, 0x004, mask, mask);
 262        /* wait until interrupts are cleared */
 263        ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
 264        if (ret < 0)
 265                return ret;
 266
 267        return 0;
 268}
 269
 270static int
 271falcon_v1_wait_idle(struct nvkm_falcon *falcon)
 272{
 273        struct nvkm_device *device = falcon->owner->device;
 274        int ret;
 275
 276        ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
 277        if (ret < 0)
 278                return ret;
 279
 280        return 0;
 281}
 282
 283int
 284nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
 285{
 286        struct nvkm_device *device = falcon->owner->device;
 287        int ret;
 288
 289        ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
 290        if (ret < 0) {
 291                nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
 292                return ret;
 293        }
 294
 295        ret = falcon_v1_wait_idle(falcon);
 296        if (ret)
 297                return ret;
 298
 299        /* enable IRQs */
 300        nvkm_falcon_wr32(falcon, 0x010, 0xff);
 301
 302        return 0;
 303}
 304
 305void
 306nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
 307{
 308        /* disable IRQs and wait for any previous code to complete */
 309        nvkm_falcon_wr32(falcon, 0x014, 0xff);
 310        falcon_v1_wait_idle(falcon);
 311}
 312
 313static const struct nvkm_falcon_func
 314nvkm_falcon_v1 = {
 315        .load_imem = nvkm_falcon_v1_load_imem,
 316        .load_dmem = nvkm_falcon_v1_load_dmem,
 317        .read_dmem = nvkm_falcon_v1_read_dmem,
 318        .bind_context = nvkm_falcon_v1_bind_context,
 319        .start = nvkm_falcon_v1_start,
 320        .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
 321        .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
 322        .enable = nvkm_falcon_v1_enable,
 323        .disable = nvkm_falcon_v1_disable,
 324        .set_start_addr = nvkm_falcon_v1_set_start_addr,
 325};
 326
 327int
 328nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
 329                   struct nvkm_falcon **pfalcon)
 330{
 331        struct nvkm_falcon *falcon;
 332        if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
 333                return -ENOMEM;
 334        nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
 335        return 0;
 336}
 337