linux/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include "amdgpu.h"
  25
  26bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
  27{
  28        /* By now all MMIO pages except mailbox are blocked */
  29        /* if blocking is enabled in hypervisor. Choose the */
  30        /* SCRATCH_REG0 to test. */
  31        return RREG32_NO_KIQ(0xc040) == 0xffffffff;
  32}
  33
  34void amdgpu_virt_init_setting(struct amdgpu_device *adev)
  35{
  36        /* enable virtual display */
  37        adev->mode_info.num_crtc = 1;
  38        adev->enable_virtual_display = true;
  39        adev->cg_flags = 0;
  40        adev->pg_flags = 0;
  41}
  42
  43uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
  44{
  45        signed long r, cnt = 0;
  46        unsigned long flags;
  47        uint32_t seq;
  48        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  49        struct amdgpu_ring *ring = &kiq->ring;
  50
  51        BUG_ON(!ring->funcs->emit_rreg);
  52
  53        spin_lock_irqsave(&kiq->ring_lock, flags);
  54        amdgpu_ring_alloc(ring, 32);
  55        amdgpu_ring_emit_rreg(ring, reg);
  56        amdgpu_fence_emit_polling(ring, &seq);
  57        amdgpu_ring_commit(ring);
  58        spin_unlock_irqrestore(&kiq->ring_lock, flags);
  59
  60        r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  61
  62        /* don't wait anymore for gpu reset case because this way may
  63         * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
  64         * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
  65         * never return if we keep waiting in virt_kiq_rreg, which cause
  66         * gpu_recover() hang there.
  67         *
  68         * also don't wait anymore for IRQ context
  69         * */
  70        if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
  71                goto failed_kiq_read;
  72
  73        might_sleep();
  74        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
  75                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
  76                r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
  77        }
  78
  79        if (cnt > MAX_KIQ_REG_TRY)
  80                goto failed_kiq_read;
  81
  82        return adev->wb.wb[adev->virt.reg_val_offs];
  83
  84failed_kiq_read:
  85        pr_err("failed to read reg:%x\n", reg);
  86        return ~0;
  87}
  88
  89void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
  90{
  91        signed long r, cnt = 0;
  92        unsigned long flags;
  93        uint32_t seq;
  94        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
  95        struct amdgpu_ring *ring = &kiq->ring;
  96
  97        BUG_ON(!ring->funcs->emit_wreg);
  98
  99        spin_lock_irqsave(&kiq->ring_lock, flags);
 100        amdgpu_ring_alloc(ring, 32);
 101        amdgpu_ring_emit_wreg(ring, reg, v);
 102        amdgpu_fence_emit_polling(ring, &seq);
 103        amdgpu_ring_commit(ring);
 104        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 105
 106        r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 107
 108        /* don't wait anymore for gpu reset case because this way may
 109         * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
 110         * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
 111         * never return if we keep waiting in virt_kiq_rreg, which cause
 112         * gpu_recover() hang there.
 113         *
 114         * also don't wait anymore for IRQ context
 115         * */
 116        if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 117                goto failed_kiq_write;
 118
 119        might_sleep();
 120        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 121
 122                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 123                r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 124        }
 125
 126        if (cnt > MAX_KIQ_REG_TRY)
 127                goto failed_kiq_write;
 128
 129        return;
 130
 131failed_kiq_write:
 132        pr_err("failed to write reg:%x\n", reg);
 133}
 134
 135void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 136                                        uint32_t reg0, uint32_t reg1,
 137                                        uint32_t ref, uint32_t mask)
 138{
 139        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 140        struct amdgpu_ring *ring = &kiq->ring;
 141        signed long r, cnt = 0;
 142        unsigned long flags;
 143        uint32_t seq;
 144
 145        spin_lock_irqsave(&kiq->ring_lock, flags);
 146        amdgpu_ring_alloc(ring, 32);
 147        amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
 148                                            ref, mask);
 149        amdgpu_fence_emit_polling(ring, &seq);
 150        amdgpu_ring_commit(ring);
 151        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 152
 153        r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 154
 155        /* don't wait anymore for IRQ context */
 156        if (r < 1 && in_interrupt())
 157                goto failed_kiq;
 158
 159        might_sleep();
 160        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 161
 162                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 163                r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 164        }
 165
 166        if (cnt > MAX_KIQ_REG_TRY)
 167                goto failed_kiq;
 168
 169        return;
 170
 171failed_kiq:
 172        pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
 173}
 174
 175/**
 176 * amdgpu_virt_request_full_gpu() - request full gpu access
 177 * @amdgpu:     amdgpu device.
 178 * @init:       is driver init time.
 179 * When start to init/fini driver, first need to request full gpu access.
 180 * Return: Zero if request success, otherwise will return error.
 181 */
 182int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
 183{
 184        struct amdgpu_virt *virt = &adev->virt;
 185        int r;
 186
 187        if (virt->ops && virt->ops->req_full_gpu) {
 188                r = virt->ops->req_full_gpu(adev, init);
 189                if (r)
 190                        return r;
 191
 192                adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
 193        }
 194
 195        return 0;
 196}
 197
 198/**
 199 * amdgpu_virt_release_full_gpu() - release full gpu access
 200 * @amdgpu:     amdgpu device.
 201 * @init:       is driver init time.
 202 * When finishing driver init/fini, need to release full gpu access.
 203 * Return: Zero if release success, otherwise will returen error.
 204 */
 205int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
 206{
 207        struct amdgpu_virt *virt = &adev->virt;
 208        int r;
 209
 210        if (virt->ops && virt->ops->rel_full_gpu) {
 211                r = virt->ops->rel_full_gpu(adev, init);
 212                if (r)
 213                        return r;
 214
 215                adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
 216        }
 217        return 0;
 218}
 219
 220/**
 221 * amdgpu_virt_reset_gpu() - reset gpu
 222 * @amdgpu:     amdgpu device.
 223 * Send reset command to GPU hypervisor to reset GPU that VM is using
 224 * Return: Zero if reset success, otherwise will return error.
 225 */
 226int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
 227{
 228        struct amdgpu_virt *virt = &adev->virt;
 229        int r;
 230
 231        if (virt->ops && virt->ops->reset_gpu) {
 232                r = virt->ops->reset_gpu(adev);
 233                if (r)
 234                        return r;
 235
 236                adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
 237        }
 238
 239        return 0;
 240}
 241
 242/**
 243 * amdgpu_virt_wait_reset() - wait for reset gpu completed
 244 * @amdgpu:     amdgpu device.
 245 * Wait for GPU reset completed.
 246 * Return: Zero if reset success, otherwise will return error.
 247 */
 248int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
 249{
 250        struct amdgpu_virt *virt = &adev->virt;
 251
 252        if (!virt->ops || !virt->ops->wait_reset)
 253                return -EINVAL;
 254
 255        return virt->ops->wait_reset(adev);
 256}
 257
 258/**
 259 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
 260 * @amdgpu:     amdgpu device.
 261 * MM table is used by UVD and VCE for its initialization
 262 * Return: Zero if allocate success.
 263 */
 264int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
 265{
 266        int r;
 267
 268        if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
 269                return 0;
 270
 271        r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
 272                                    AMDGPU_GEM_DOMAIN_VRAM,
 273                                    &adev->virt.mm_table.bo,
 274                                    &adev->virt.mm_table.gpu_addr,
 275                                    (void *)&adev->virt.mm_table.cpu_addr);
 276        if (r) {
 277                DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
 278                return r;
 279        }
 280
 281        memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
 282        DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
 283                 adev->virt.mm_table.gpu_addr,
 284                 adev->virt.mm_table.cpu_addr);
 285        return 0;
 286}
 287
 288/**
 289 * amdgpu_virt_free_mm_table() - free mm table memory
 290 * @amdgpu:     amdgpu device.
 291 * Free MM table memory
 292 */
 293void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
 294{
 295        if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
 296                return;
 297
 298        amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
 299                              &adev->virt.mm_table.gpu_addr,
 300                              (void *)&adev->virt.mm_table.cpu_addr);
 301        adev->virt.mm_table.gpu_addr = 0;
 302}
 303
 304
 305int amdgpu_virt_fw_reserve_get_checksum(void *obj,
 306                                        unsigned long obj_size,
 307                                        unsigned int key,
 308                                        unsigned int chksum)
 309{
 310        unsigned int ret = key;
 311        unsigned long i = 0;
 312        unsigned char *pos;
 313
 314        pos = (char *)obj;
 315        /* calculate checksum */
 316        for (i = 0; i < obj_size; ++i)
 317                ret += *(pos + i);
 318        /* minus the chksum itself */
 319        pos = (char *)&chksum;
 320        for (i = 0; i < sizeof(chksum); ++i)
 321                ret -= *(pos + i);
 322        return ret;
 323}
 324
 325void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 326{
 327        uint32_t pf2vf_size = 0;
 328        uint32_t checksum = 0;
 329        uint32_t checkval;
 330        char *str;
 331
 332        adev->virt.fw_reserve.p_pf2vf = NULL;
 333        adev->virt.fw_reserve.p_vf2pf = NULL;
 334
 335        if (adev->fw_vram_usage.va != NULL) {
 336                adev->virt.fw_reserve.p_pf2vf =
 337                        (struct amd_sriov_msg_pf2vf_info_header *)(
 338                        adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
 339                AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
 340                AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
 341                AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
 342
 343                /* pf2vf message must be in 4K */
 344                if (pf2vf_size > 0 && pf2vf_size < 4096) {
 345                        checkval = amdgpu_virt_fw_reserve_get_checksum(
 346                                adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
 347                                adev->virt.fw_reserve.checksum_key, checksum);
 348                        if (checkval == checksum) {
 349                                adev->virt.fw_reserve.p_vf2pf =
 350                                        ((void *)adev->virt.fw_reserve.p_pf2vf +
 351                                        pf2vf_size);
 352                                memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
 353                                        sizeof(amdgim_vf2pf_info));
 354                                AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
 355                                        AMDGPU_FW_VRAM_VF2PF_VER);
 356                                AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
 357                                        sizeof(amdgim_vf2pf_info));
 358                                AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
 359                                        &str);
 360#ifdef MODULE
 361                                if (THIS_MODULE->version != NULL)
 362                                        strcpy(str, THIS_MODULE->version);
 363                                else
 364#endif
 365                                        strcpy(str, "N/A");
 366                                AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
 367                                        0);
 368                                AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
 369                                        amdgpu_virt_fw_reserve_get_checksum(
 370                                        adev->virt.fw_reserve.p_vf2pf,
 371                                        pf2vf_size,
 372                                        adev->virt.fw_reserve.checksum_key, 0));
 373                        }
 374                }
 375        }
 376}
 377
 378
 379