linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "amdgpu_amdkfd.h"
  24#include "amd_shared.h"
  25
  26#include "amdgpu.h"
  27#include "amdgpu_gfx.h"
  28#include "amdgpu_dma_buf.h"
  29#include <linux/module.h>
  30#include <linux/dma-buf.h>
  31#include "amdgpu_xgmi.h"
  32
  33static const unsigned int compute_vmid_bitmap = 0xFF00;
  34
  35/* Total memory size in system memory and all GPU VRAM. Used to
  36 * estimate worst case amount of memory to reserve for page tables
  37 */
  38uint64_t amdgpu_amdkfd_total_mem_size;
  39
  40int amdgpu_amdkfd_init(void)
  41{
  42        struct sysinfo si;
  43        int ret;
  44
  45        si_meminfo(&si);
  46        amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
  47        amdgpu_amdkfd_total_mem_size *= si.mem_unit;
  48
  49#ifdef CONFIG_HSA_AMD
  50        ret = kgd2kfd_init();
  51        amdgpu_amdkfd_gpuvm_init_mem_limits();
  52#else
  53        ret = -ENOENT;
  54#endif
  55
  56        return ret;
  57}
  58
  59void amdgpu_amdkfd_fini(void)
  60{
  61        kgd2kfd_exit();
  62}
  63
  64void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
  65{
  66        const struct kfd2kgd_calls *kfd2kgd;
  67
  68        switch (adev->asic_type) {
  69#ifdef CONFIG_DRM_AMDGPU_CIK
  70        case CHIP_KAVERI:
  71        case CHIP_HAWAII:
  72                kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
  73                break;
  74#endif
  75        case CHIP_CARRIZO:
  76        case CHIP_TONGA:
  77        case CHIP_FIJI:
  78        case CHIP_POLARIS10:
  79        case CHIP_POLARIS11:
  80        case CHIP_POLARIS12:
  81        case CHIP_VEGAM:
  82                kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
  83                break;
  84        case CHIP_VEGA10:
  85        case CHIP_VEGA12:
  86        case CHIP_VEGA20:
  87        case CHIP_RAVEN:
  88                kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
  89                break;
  90        case CHIP_ARCTURUS:
  91                kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
  92                break;
  93        case CHIP_NAVI10:
  94        case CHIP_NAVI14:
  95        case CHIP_NAVI12:
  96                kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
  97                break;
  98        default:
  99                dev_info(adev->dev, "kfd not supported on this ASIC\n");
 100                return;
 101        }
 102
 103        adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
 104                                      adev->pdev, kfd2kgd);
 105
 106        if (adev->kfd.dev)
 107                amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 108}
 109
 110/**
 111 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
 112 *                                setup amdkfd
 113 *
 114 * @adev: amdgpu_device pointer
 115 * @aperture_base: output returning doorbell aperture base physical address
 116 * @aperture_size: output returning doorbell aperture size in bytes
 117 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
 118 *
 119 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
 120 * takes doorbells required for its own rings and reports the setup to amdkfd.
 121 * amdgpu reserved doorbells are at the start of the doorbell aperture.
 122 */
 123static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
 124                                         phys_addr_t *aperture_base,
 125                                         size_t *aperture_size,
 126                                         size_t *start_offset)
 127{
 128        /*
 129         * The first num_doorbells are used by amdgpu.
 130         * amdkfd takes whatever's left in the aperture.
 131         */
 132        if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
 133                *aperture_base = adev->doorbell.base;
 134                *aperture_size = adev->doorbell.size;
 135                *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
 136        } else {
 137                *aperture_base = 0;
 138                *aperture_size = 0;
 139                *start_offset = 0;
 140        }
 141}
 142
 143void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 144{
 145        int i;
 146        int last_valid_bit;
 147
 148        if (adev->kfd.dev) {
 149                struct kgd2kfd_shared_resources gpu_resources = {
 150                        .compute_vmid_bitmap = compute_vmid_bitmap,
 151                        .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
 152                        .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
 153                        .gpuvm_size = min(adev->vm_manager.max_pfn
 154                                          << AMDGPU_GPU_PAGE_SHIFT,
 155                                          AMDGPU_GMC_HOLE_START),
 156                        .drm_render_minor = adev->ddev->render->index,
 157                        .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
 158
 159                };
 160
 161                /* this is going to have a few of the MSBs set that we need to
 162                 * clear
 163                 */
 164                bitmap_complement(gpu_resources.queue_bitmap,
 165                                  adev->gfx.mec.queue_bitmap,
 166                                  KGD_MAX_QUEUES);
 167
 168                /* remove the KIQ bit as well */
 169                if (adev->gfx.kiq.ring.sched.ready)
 170                        clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
 171                                                          adev->gfx.kiq.ring.me - 1,
 172                                                          adev->gfx.kiq.ring.pipe,
 173                                                          adev->gfx.kiq.ring.queue),
 174                                  gpu_resources.queue_bitmap);
 175
 176                /* According to linux/bitmap.h we shouldn't use bitmap_clear if
 177                 * nbits is not compile time constant
 178                 */
 179                last_valid_bit = 1 /* only first MEC can have compute queues */
 180                                * adev->gfx.mec.num_pipe_per_mec
 181                                * adev->gfx.mec.num_queue_per_pipe;
 182                for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
 183                        clear_bit(i, gpu_resources.queue_bitmap);
 184
 185                amdgpu_doorbell_get_kfd_info(adev,
 186                                &gpu_resources.doorbell_physical_address,
 187                                &gpu_resources.doorbell_aperture_size,
 188                                &gpu_resources.doorbell_start_offset);
 189
 190                /* Since SOC15, BIF starts to statically use the
 191                 * lower 12 bits of doorbell addresses for routing
 192                 * based on settings in registers like
 193                 * SDMA0_DOORBELL_RANGE etc..
 194                 * In order to route a doorbell to CP engine, the lower
 195                 * 12 bits of its address has to be outside the range
 196                 * set for SDMA, VCN, and IH blocks.
 197                 */
 198                if (adev->asic_type >= CHIP_VEGA10) {
 199                        gpu_resources.non_cp_doorbells_start =
 200                                        adev->doorbell_index.first_non_cp;
 201                        gpu_resources.non_cp_doorbells_end =
 202                                        adev->doorbell_index.last_non_cp;
 203                }
 204
 205                kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
 206        }
 207}
 208
 209void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
 210{
 211        if (adev->kfd.dev) {
 212                kgd2kfd_device_exit(adev->kfd.dev);
 213                adev->kfd.dev = NULL;
 214        }
 215}
 216
 217void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
 218                const void *ih_ring_entry)
 219{
 220        if (adev->kfd.dev)
 221                kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
 222}
 223
 224void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
 225{
 226        if (adev->kfd.dev)
 227                kgd2kfd_suspend(adev->kfd.dev);
 228}
 229
 230int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
 231{
 232        int r = 0;
 233
 234        if (adev->kfd.dev)
 235                r = kgd2kfd_resume(adev->kfd.dev);
 236
 237        return r;
 238}
 239
 240int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
 241{
 242        int r = 0;
 243
 244        if (adev->kfd.dev)
 245                r = kgd2kfd_pre_reset(adev->kfd.dev);
 246
 247        return r;
 248}
 249
 250int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
 251{
 252        int r = 0;
 253
 254        if (adev->kfd.dev)
 255                r = kgd2kfd_post_reset(adev->kfd.dev);
 256
 257        return r;
 258}
 259
 260void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 261{
 262        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 263
 264        if (amdgpu_device_should_recover_gpu(adev))
 265                amdgpu_device_gpu_recover(adev, NULL);
 266}
 267
 268int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 269                                void **mem_obj, uint64_t *gpu_addr,
 270                                void **cpu_ptr, bool mqd_gfx9)
 271{
 272        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 273        struct amdgpu_bo *bo = NULL;
 274        struct amdgpu_bo_param bp;
 275        int r;
 276        void *cpu_ptr_tmp = NULL;
 277
 278        memset(&bp, 0, sizeof(bp));
 279        bp.size = size;
 280        bp.byte_align = PAGE_SIZE;
 281        bp.domain = AMDGPU_GEM_DOMAIN_GTT;
 282        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 283        bp.type = ttm_bo_type_kernel;
 284        bp.resv = NULL;
 285
 286        if (mqd_gfx9)
 287                bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
 288
 289        r = amdgpu_bo_create(adev, &bp, &bo);
 290        if (r) {
 291                dev_err(adev->dev,
 292                        "failed to allocate BO for amdkfd (%d)\n", r);
 293                return r;
 294        }
 295
 296        /* map the buffer */
 297        r = amdgpu_bo_reserve(bo, true);
 298        if (r) {
 299                dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
 300                goto allocate_mem_reserve_bo_failed;
 301        }
 302
 303        r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
 304        if (r) {
 305                dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
 306                goto allocate_mem_pin_bo_failed;
 307        }
 308
 309        r = amdgpu_ttm_alloc_gart(&bo->tbo);
 310        if (r) {
 311                dev_err(adev->dev, "%p bind failed\n", bo);
 312                goto allocate_mem_kmap_bo_failed;
 313        }
 314
 315        r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
 316        if (r) {
 317                dev_err(adev->dev,
 318                        "(%d) failed to map bo to kernel for amdkfd\n", r);
 319                goto allocate_mem_kmap_bo_failed;
 320        }
 321
 322        *mem_obj = bo;
 323        *gpu_addr = amdgpu_bo_gpu_offset(bo);
 324        *cpu_ptr = cpu_ptr_tmp;
 325
 326        amdgpu_bo_unreserve(bo);
 327
 328        return 0;
 329
 330allocate_mem_kmap_bo_failed:
 331        amdgpu_bo_unpin(bo);
 332allocate_mem_pin_bo_failed:
 333        amdgpu_bo_unreserve(bo);
 334allocate_mem_reserve_bo_failed:
 335        amdgpu_bo_unref(&bo);
 336
 337        return r;
 338}
 339
 340void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 341{
 342        struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
 343
 344        amdgpu_bo_reserve(bo, true);
 345        amdgpu_bo_kunmap(bo);
 346        amdgpu_bo_unpin(bo);
 347        amdgpu_bo_unreserve(bo);
 348        amdgpu_bo_unref(&(bo));
 349}
 350
 351int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
 352                                void **mem_obj)
 353{
 354        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 355        struct amdgpu_bo *bo = NULL;
 356        struct amdgpu_bo_param bp;
 357        int r;
 358
 359        memset(&bp, 0, sizeof(bp));
 360        bp.size = size;
 361        bp.byte_align = 1;
 362        bp.domain = AMDGPU_GEM_DOMAIN_GWS;
 363        bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 364        bp.type = ttm_bo_type_device;
 365        bp.resv = NULL;
 366
 367        r = amdgpu_bo_create(adev, &bp, &bo);
 368        if (r) {
 369                dev_err(adev->dev,
 370                        "failed to allocate gws BO for amdkfd (%d)\n", r);
 371                return r;
 372        }
 373
 374        *mem_obj = bo;
 375        return 0;
 376}
 377
 378void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
 379{
 380        struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
 381
 382        amdgpu_bo_unref(&bo);
 383}
 384
 385uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
 386                                      enum kgd_engine_type type)
 387{
 388        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 389
 390        switch (type) {
 391        case KGD_ENGINE_PFP:
 392                return adev->gfx.pfp_fw_version;
 393
 394        case KGD_ENGINE_ME:
 395                return adev->gfx.me_fw_version;
 396
 397        case KGD_ENGINE_CE:
 398                return adev->gfx.ce_fw_version;
 399
 400        case KGD_ENGINE_MEC1:
 401                return adev->gfx.mec_fw_version;
 402
 403        case KGD_ENGINE_MEC2:
 404                return adev->gfx.mec2_fw_version;
 405
 406        case KGD_ENGINE_RLC:
 407                return adev->gfx.rlc_fw_version;
 408
 409        case KGD_ENGINE_SDMA1:
 410                return adev->sdma.instance[0].fw_version;
 411
 412        case KGD_ENGINE_SDMA2:
 413                return adev->sdma.instance[1].fw_version;
 414
 415        default:
 416                return 0;
 417        }
 418
 419        return 0;
 420}
 421
 422void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
 423                                      struct kfd_local_mem_info *mem_info)
 424{
 425        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 426        uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
 427                                             ~((1ULL << 32) - 1);
 428        resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
 429
 430        memset(mem_info, 0, sizeof(*mem_info));
 431        if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
 432                mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
 433                mem_info->local_mem_size_private = adev->gmc.real_vram_size -
 434                                adev->gmc.visible_vram_size;
 435        } else {
 436                mem_info->local_mem_size_public = 0;
 437                mem_info->local_mem_size_private = adev->gmc.real_vram_size;
 438        }
 439        mem_info->vram_width = adev->gmc.vram_width;
 440
 441        pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
 442                        &adev->gmc.aper_base, &aper_limit,
 443                        mem_info->local_mem_size_public,
 444                        mem_info->local_mem_size_private);
 445
 446        if (amdgpu_sriov_vf(adev))
 447                mem_info->mem_clk_max = adev->clock.default_mclk / 100;
 448        else if (adev->powerplay.pp_funcs) {
 449                if (amdgpu_emu_mode == 1)
 450                        mem_info->mem_clk_max = 0;
 451                else
 452                        mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
 453        } else
 454                mem_info->mem_clk_max = 100;
 455}
 456
 457uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
 458{
 459        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 460
 461        if (adev->gfx.funcs->get_gpu_clock_counter)
 462                return adev->gfx.funcs->get_gpu_clock_counter(adev);
 463        return 0;
 464}
 465
 466uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 467{
 468        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 469
 470        /* the sclk is in quantas of 10kHz */
 471        if (amdgpu_sriov_vf(adev))
 472                return adev->clock.default_sclk / 100;
 473        else if (adev->powerplay.pp_funcs)
 474                return amdgpu_dpm_get_sclk(adev, false) / 100;
 475        else
 476                return 100;
 477}
 478
 479void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
 480{
 481        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 482        struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
 483
 484        memset(cu_info, 0, sizeof(*cu_info));
 485        if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
 486                return;
 487
 488        cu_info->cu_active_number = acu_info.number;
 489        cu_info->cu_ao_mask = acu_info.ao_cu_mask;
 490        memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
 491               sizeof(acu_info.bitmap));
 492        cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
 493        cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
 494        cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
 495        cu_info->simd_per_cu = acu_info.simd_per_cu;
 496        cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
 497        cu_info->wave_front_size = acu_info.wave_front_size;
 498        cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
 499        cu_info->lds_size = acu_info.lds_size;
 500}
 501
 502int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
 503                                  struct kgd_dev **dma_buf_kgd,
 504                                  uint64_t *bo_size, void *metadata_buffer,
 505                                  size_t buffer_size, uint32_t *metadata_size,
 506                                  uint32_t *flags)
 507{
 508        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 509        struct dma_buf *dma_buf;
 510        struct drm_gem_object *obj;
 511        struct amdgpu_bo *bo;
 512        uint64_t metadata_flags;
 513        int r = -EINVAL;
 514
 515        dma_buf = dma_buf_get(dma_buf_fd);
 516        if (IS_ERR(dma_buf))
 517                return PTR_ERR(dma_buf);
 518
 519        if (dma_buf->ops != &amdgpu_dmabuf_ops)
 520                /* Can't handle non-graphics buffers */
 521                goto out_put;
 522
 523        obj = dma_buf->priv;
 524        if (obj->dev->driver != adev->ddev->driver)
 525                /* Can't handle buffers from different drivers */
 526                goto out_put;
 527
 528        adev = obj->dev->dev_private;
 529        bo = gem_to_amdgpu_bo(obj);
 530        if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
 531                                    AMDGPU_GEM_DOMAIN_GTT)))
 532                /* Only VRAM and GTT BOs are supported */
 533                goto out_put;
 534
 535        r = 0;
 536        if (dma_buf_kgd)
 537                *dma_buf_kgd = (struct kgd_dev *)adev;
 538        if (bo_size)
 539                *bo_size = amdgpu_bo_size(bo);
 540        if (metadata_size)
 541                *metadata_size = bo->metadata_size;
 542        if (metadata_buffer)
 543                r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
 544                                           metadata_size, &metadata_flags);
 545        if (flags) {
 546                *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
 547                        ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
 548
 549                if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 550                        *flags |= ALLOC_MEM_FLAGS_PUBLIC;
 551        }
 552
 553out_put:
 554        dma_buf_put(dma_buf);
 555        return r;
 556}
 557
 558uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
 559{
 560        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 561
 562        return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
 563}
 564
 565uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
 566{
 567        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 568
 569        return adev->gmc.xgmi.hive_id;
 570}
 571uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
 572{
 573        struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
 574        struct amdgpu_device *adev = (struct amdgpu_device *)dst;
 575        int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
 576
 577        if (ret < 0) {
 578                DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
 579                        adev->gmc.xgmi.physical_node_id,
 580                        peer_adev->gmc.xgmi.physical_node_id, ret);
 581                ret = 0;
 582        }
 583        return  (uint8_t)ret;
 584}
 585
 586uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
 587{
 588        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 589
 590        return adev->rmmio_remap.bus_addr;
 591}
 592
 593uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
 594{
 595        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 596
 597        return adev->gds.gws_size;
 598}
 599
 600int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 601                                uint32_t vmid, uint64_t gpu_addr,
 602                                uint32_t *ib_cmd, uint32_t ib_len)
 603{
 604        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 605        struct amdgpu_job *job;
 606        struct amdgpu_ib *ib;
 607        struct amdgpu_ring *ring;
 608        struct dma_fence *f = NULL;
 609        int ret;
 610
 611        switch (engine) {
 612        case KGD_ENGINE_MEC1:
 613                ring = &adev->gfx.compute_ring[0];
 614                break;
 615        case KGD_ENGINE_SDMA1:
 616                ring = &adev->sdma.instance[0].ring;
 617                break;
 618        case KGD_ENGINE_SDMA2:
 619                ring = &adev->sdma.instance[1].ring;
 620                break;
 621        default:
 622                pr_err("Invalid engine in IB submission: %d\n", engine);
 623                ret = -EINVAL;
 624                goto err;
 625        }
 626
 627        ret = amdgpu_job_alloc(adev, 1, &job, NULL);
 628        if (ret)
 629                goto err;
 630
 631        ib = &job->ibs[0];
 632        memset(ib, 0, sizeof(struct amdgpu_ib));
 633
 634        ib->gpu_addr = gpu_addr;
 635        ib->ptr = ib_cmd;
 636        ib->length_dw = ib_len;
 637        /* This works for NO_HWS. TODO: need to handle without knowing VMID */
 638        job->vmid = vmid;
 639
 640        ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
 641        if (ret) {
 642                DRM_ERROR("amdgpu: failed to schedule IB.\n");
 643                goto err_ib_sched;
 644        }
 645
 646        ret = dma_fence_wait(f, false);
 647
 648err_ib_sched:
 649        dma_fence_put(f);
 650        amdgpu_job_free(job);
 651err:
 652        return ret;
 653}
 654
 655void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 656{
 657        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 658
 659        if (is_support_sw_smu(adev))
 660                smu_switch_power_profile(&adev->smu,
 661                                         PP_SMC_POWER_PROFILE_COMPUTE,
 662                                         !idle);
 663        else if (adev->powerplay.pp_funcs &&
 664                 adev->powerplay.pp_funcs->switch_power_profile)
 665                amdgpu_dpm_switch_power_profile(adev,
 666                                                PP_SMC_POWER_PROFILE_COMPUTE,
 667                                                !idle);
 668}
 669
 670bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 671{
 672        if (adev->kfd.dev) {
 673                if ((1 << vmid) & compute_vmid_bitmap)
 674                        return true;
 675        }
 676
 677        return false;
 678}
 679
 680bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
 681{
 682        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 683
 684        return adev->have_atomics_support;
 685}
 686
 687#ifndef CONFIG_HSA_AMD
 688bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
 689{
 690        return false;
 691}
 692
 693void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 694{
 695}
 696
 697void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
 698                                        struct amdgpu_vm *vm)
 699{
 700}
 701
 702struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
 703{
 704        return NULL;
 705}
 706
 707int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
 708{
 709        return 0;
 710}
 711
 712struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
 713{
 714        return NULL;
 715}
 716
 717struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
 718{
 719        return NULL;
 720}
 721
 722struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
 723{
 724        return NULL;
 725}
 726
 727struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
 728{
 729        return NULL;
 730}
 731
 732struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
 733{
 734        return NULL;
 735}
 736
 737struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
 738                              const struct kfd2kgd_calls *f2g)
 739{
 740        return NULL;
 741}
 742
 743bool kgd2kfd_device_init(struct kfd_dev *kfd,
 744                         const struct kgd2kfd_shared_resources *gpu_resources)
 745{
 746        return false;
 747}
 748
 749void kgd2kfd_device_exit(struct kfd_dev *kfd)
 750{
 751}
 752
 753void kgd2kfd_exit(void)
 754{
 755}
 756
 757void kgd2kfd_suspend(struct kfd_dev *kfd)
 758{
 759}
 760
 761int kgd2kfd_resume(struct kfd_dev *kfd)
 762{
 763        return 0;
 764}
 765
 766int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 767{
 768        return 0;
 769}
 770
 771int kgd2kfd_post_reset(struct kfd_dev *kfd)
 772{
 773        return 0;
 774}
 775
 776void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 777{
 778}
 779
 780void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
 781{
 782}
 783#endif
 784