linux/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23/*
  24 * This file defines the private interface between the
  25 * AMD kernel graphics drivers and the AMD KFD.
  26 */
  27
  28#ifndef KGD_KFD_INTERFACE_H_INCLUDED
  29#define KGD_KFD_INTERFACE_H_INCLUDED
  30
  31#include <linux/types.h>
  32#include <linux/bitmap.h>
  33#include <linux/dma-fence.h>
  34
  35struct pci_dev;
  36
  37#define KFD_INTERFACE_VERSION 2
  38#define KGD_MAX_QUEUES 128
  39
  40struct kfd_dev;
  41struct kgd_dev;
  42
  43struct kgd_mem;
  44
  45enum kfd_preempt_type {
  46        KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
  47        KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
  48};
  49
  50struct kfd_cu_info {
  51        uint32_t num_shader_engines;
  52        uint32_t num_shader_arrays_per_engine;
  53        uint32_t num_cu_per_sh;
  54        uint32_t cu_active_number;
  55        uint32_t cu_ao_mask;
  56        uint32_t simd_per_cu;
  57        uint32_t max_waves_per_simd;
  58        uint32_t wave_front_size;
  59        uint32_t max_scratch_slots_per_cu;
  60        uint32_t lds_size;
  61        uint32_t cu_bitmap[4][4];
  62};
  63
  64/* For getting GPU local memory information from KGD */
  65struct kfd_local_mem_info {
  66        uint64_t local_mem_size_private;
  67        uint64_t local_mem_size_public;
  68        uint32_t vram_width;
  69        uint32_t mem_clk_max;
  70};
  71
  72enum kgd_memory_pool {
  73        KGD_POOL_SYSTEM_CACHEABLE = 1,
  74        KGD_POOL_SYSTEM_WRITECOMBINE = 2,
  75        KGD_POOL_FRAMEBUFFER = 3,
  76};
  77
  78enum kgd_engine_type {
  79        KGD_ENGINE_PFP = 1,
  80        KGD_ENGINE_ME,
  81        KGD_ENGINE_CE,
  82        KGD_ENGINE_MEC1,
  83        KGD_ENGINE_MEC2,
  84        KGD_ENGINE_RLC,
  85        KGD_ENGINE_SDMA1,
  86        KGD_ENGINE_SDMA2,
  87        KGD_ENGINE_MAX
  88};
  89
  90struct kgd2kfd_shared_resources {
  91        /* Bit n == 1 means VMID n is available for KFD. */
  92        unsigned int compute_vmid_bitmap;
  93
  94        /* number of pipes per mec */
  95        uint32_t num_pipe_per_mec;
  96
  97        /* number of queues per pipe */
  98        uint32_t num_queue_per_pipe;
  99
 100        /* Bit n == 1 means Queue n is available for KFD */
 101        DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
 102
 103        /* Doorbell assignments (SOC15 and later chips only). Only
 104         * specific doorbells are routed to each SDMA engine. Others
 105         * are routed to IH and VCN. They are not usable by the CP.
 106         *
 107         * Any doorbell number D that satisfies the following condition
 108         * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
 109         *
 110         * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
 111         * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means
 112         * mask would be set to 0x1f8 and val set to 0x0f0.
 113         */
 114        unsigned int sdma_doorbell[2][2];
 115        unsigned int reserved_doorbell_mask;
 116        unsigned int reserved_doorbell_val;
 117
 118        /* Base address of doorbell aperture. */
 119        phys_addr_t doorbell_physical_address;
 120
 121        /* Size in bytes of doorbell aperture. */
 122        size_t doorbell_aperture_size;
 123
 124        /* Number of bytes at start of aperture reserved for KGD. */
 125        size_t doorbell_start_offset;
 126
 127        /* GPUVM address space size in bytes */
 128        uint64_t gpuvm_size;
 129
 130        /* Minor device number of the render node */
 131        int drm_render_minor;
 132};
 133
 134struct tile_config {
 135        uint32_t *tile_config_ptr;
 136        uint32_t *macro_tile_config_ptr;
 137        uint32_t num_tile_configs;
 138        uint32_t num_macro_tile_configs;
 139
 140        uint32_t gb_addr_config;
 141        uint32_t num_banks;
 142        uint32_t num_ranks;
 143};
 144
 145
 146/*
 147 * Allocation flag domains
 148 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
 149 */
 150#define ALLOC_MEM_FLAGS_VRAM            (1 << 0)
 151#define ALLOC_MEM_FLAGS_GTT             (1 << 1)
 152#define ALLOC_MEM_FLAGS_USERPTR         (1 << 2) /* TODO */
 153#define ALLOC_MEM_FLAGS_DOORBELL        (1 << 3) /* TODO */
 154
 155/*
 156 * Allocation flags attributes/access options.
 157 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
 158 */
 159#define ALLOC_MEM_FLAGS_WRITABLE        (1 << 31)
 160#define ALLOC_MEM_FLAGS_EXECUTABLE      (1 << 30)
 161#define ALLOC_MEM_FLAGS_PUBLIC          (1 << 29)
 162#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE   (1 << 28) /* TODO */
 163#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM   (1 << 27)
 164#define ALLOC_MEM_FLAGS_COHERENT        (1 << 26) /* For GFXv9 or later */
 165
 166/**
 167 * struct kfd2kgd_calls
 168 *
 169 * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
 170 * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
 171 *
 172 * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
 173 *
 174 * @get_local_mem_info: Retrieves information about GPU local memory
 175 *
 176 * @get_gpu_clock_counter: Retrieves GPU clock counter
 177 *
 178 * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
 179 *
 180 * @alloc_pasid: Allocate a PASID
 181 * @free_pasid: Free a PASID
 182 *
 183 * @program_sh_mem_settings: A function that should initiate the memory
 184 * properties such as main aperture memory type (cache / non cached) and
 185 * secondary aperture base address, size and memory type.
 186 * This function is used only for no cp scheduling mode.
 187 *
 188 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
 189 * scheduling mode. Only used for no cp scheduling mode.
 190 *
 191 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
 192 * sceduling mode.
 193 *
 194 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
 195 * used only for no HWS mode.
 196 *
 197 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
 198 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
 199 *
 200 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
 201 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
 202 *
 203 * @hqd_is_occupies: Checks if a hqd slot is occupied.
 204 *
 205 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
 206 *
 207 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
 208 *
 209 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
 210 * SDMA hqd slot.
 211 *
 212 * @get_fw_version: Returns FW versions from the header
 213 *
 214 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
 215 * Only used for no cp scheduling mode
 216 *
 217 * @get_tile_config: Returns GPU-specific tiling mode information
 218 *
 219 * @get_cu_info: Retrieves activated cu info
 220 *
 221 * @get_vram_usage: Returns current VRAM usage
 222 *
 223 * @create_process_vm: Create a VM address space for a given process and GPU
 224 *
 225 * @destroy_process_vm: Destroy a VM
 226 *
 227 * @get_process_page_dir: Get physical address of a VM page directory
 228 *
 229 * @set_vm_context_page_table_base: Program page table base for a VMID
 230 *
 231 * @alloc_memory_of_gpu: Allocate GPUVM memory
 232 *
 233 * @free_memory_of_gpu: Free GPUVM memory
 234 *
 235 * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
 236 * space. Allocates and updates page tables and page directories as
 237 * needed. This function may return before all page table updates have
 238 * completed. This allows multiple map operations (on multiple GPUs)
 239 * to happen concurrently. Use sync_memory to synchronize with all
 240 * pending updates.
 241 *
 242 * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
 243 *
 244 * @sync_memory: Wait for pending page table updates to complete
 245 *
 246 * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
 247 * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
 248 * The kernel virtual address remains valid until the BO is freed.
 249 *
 250 * @restore_process_bos: Restore all BOs that belong to the
 251 * process. This is intended for restoring memory mappings after a TTM
 252 * eviction.
 253 *
 254 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
 255 *
 256 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
 257 *
 258 * @submit_ib: Submits an IB to the engine specified by inserting the
 259 * IB to the corresponding ring (ring type). The IB is executed with the
 260 * specified VMID in a user mode context.
 261 *
 262 * This structure contains function pointers to services that the kgd driver
 263 * provides to amdkfd driver.
 264 *
 265 */
 266struct kfd2kgd_calls {
 267        int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
 268                                        void **mem_obj, uint64_t *gpu_addr,
 269                                        void **cpu_ptr);
 270
 271        void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
 272
 273        void (*get_local_mem_info)(struct kgd_dev *kgd,
 274                        struct kfd_local_mem_info *mem_info);
 275        uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
 276
 277        uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
 278
 279        int (*alloc_pasid)(unsigned int bits);
 280        void (*free_pasid)(unsigned int pasid);
 281
 282        /* Register access functions */
 283        void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
 284                        uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
 285                        uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
 286
 287        int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
 288                                        unsigned int vmid);
 289
 290        int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
 291
 292        int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 293                        uint32_t queue_id, uint32_t __user *wptr,
 294                        uint32_t wptr_shift, uint32_t wptr_mask,
 295                        struct mm_struct *mm);
 296
 297        int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
 298                             uint32_t __user *wptr, struct mm_struct *mm);
 299
 300        int (*hqd_dump)(struct kgd_dev *kgd,
 301                        uint32_t pipe_id, uint32_t queue_id,
 302                        uint32_t (**dump)[2], uint32_t *n_regs);
 303
 304        int (*hqd_sdma_dump)(struct kgd_dev *kgd,
 305                             uint32_t engine_id, uint32_t queue_id,
 306                             uint32_t (**dump)[2], uint32_t *n_regs);
 307
 308        bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
 309                                uint32_t pipe_id, uint32_t queue_id);
 310
 311        int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
 312                                unsigned int timeout, uint32_t pipe_id,
 313                                uint32_t queue_id);
 314
 315        bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
 316
 317        int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
 318                                unsigned int timeout);
 319
 320        int (*address_watch_disable)(struct kgd_dev *kgd);
 321        int (*address_watch_execute)(struct kgd_dev *kgd,
 322                                        unsigned int watch_point_id,
 323                                        uint32_t cntl_val,
 324                                        uint32_t addr_hi,
 325                                        uint32_t addr_lo);
 326        int (*wave_control_execute)(struct kgd_dev *kgd,
 327                                        uint32_t gfx_index_val,
 328                                        uint32_t sq_cmd);
 329        uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
 330                                        unsigned int watch_point_id,
 331                                        unsigned int reg_offset);
 332        bool (*get_atc_vmid_pasid_mapping_valid)(
 333                                        struct kgd_dev *kgd,
 334                                        uint8_t vmid);
 335        uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
 336                                        struct kgd_dev *kgd,
 337                                        uint8_t vmid);
 338
 339        uint16_t (*get_fw_version)(struct kgd_dev *kgd,
 340                                enum kgd_engine_type type);
 341        void (*set_scratch_backing_va)(struct kgd_dev *kgd,
 342                                uint64_t va, uint32_t vmid);
 343        int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
 344
 345        void (*get_cu_info)(struct kgd_dev *kgd,
 346                        struct kfd_cu_info *cu_info);
 347        uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
 348
 349        int (*create_process_vm)(struct kgd_dev *kgd, void **vm,
 350                        void **process_info, struct dma_fence **ef);
 351        int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
 352                        void **vm, void **process_info, struct dma_fence **ef);
 353        void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
 354        uint32_t (*get_process_page_dir)(void *vm);
 355        void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
 356                        uint32_t vmid, uint32_t page_table_base);
 357        int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
 358                        uint64_t size, void *vm,
 359                        struct kgd_mem **mem, uint64_t *offset,
 360                        uint32_t flags);
 361        int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
 362        int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
 363                        void *vm);
 364        int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
 365                        void *vm);
 366        int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
 367        int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
 368                        void **kptr, uint64_t *size);
 369        int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
 370
 371        int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
 372        int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
 373
 374        int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
 375                        uint32_t vmid, uint64_t gpu_addr,
 376                        uint32_t *ib_cmd, uint32_t ib_len);
 377};
 378
 379/**
 380 * struct kgd2kfd_calls
 381 *
 382 * @exit: Notifies amdkfd that kgd module is unloaded
 383 *
 384 * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
 385 *
 386 * @device_init: Initialize the newly probed device (if it is a device that
 387 * amdkfd supports)
 388 *
 389 * @device_exit: Notifies amdkfd about a removal of a kgd device
 390 *
 391 * @suspend: Notifies amdkfd about a suspend action done to a kgd device
 392 *
 393 * @resume: Notifies amdkfd about a resume action done to a kgd device
 394 *
 395 * @quiesce_mm: Quiesce all user queue access to specified MM address space
 396 *
 397 * @resume_mm: Resume user queue access to specified MM address space
 398 *
 399 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
 400 * for safe eviction of KFD BOs that belong to the specified process.
 401 *
 402 * This structure contains function callback pointers so the kgd driver
 403 * will notify to the amdkfd about certain status changes.
 404 *
 405 */
 406struct kgd2kfd_calls {
 407        void (*exit)(void);
 408        struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
 409                const struct kfd2kgd_calls *f2g);
 410        bool (*device_init)(struct kfd_dev *kfd,
 411                        const struct kgd2kfd_shared_resources *gpu_resources);
 412        void (*device_exit)(struct kfd_dev *kfd);
 413        void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
 414        void (*suspend)(struct kfd_dev *kfd);
 415        int (*resume)(struct kfd_dev *kfd);
 416        int (*quiesce_mm)(struct mm_struct *mm);
 417        int (*resume_mm)(struct mm_struct *mm);
 418        int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
 419                        struct dma_fence *fence);
 420};
 421
 422int kgd2kfd_init(unsigned interface_version,
 423                const struct kgd2kfd_calls **g2f);
 424
 425#endif  /* KGD_KFD_INTERFACE_H_INCLUDED */
 426