linux/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include <linux/pci.h>
  24#include <linux/acpi.h>
  25#include "kfd_crat.h"
  26#include "kfd_priv.h"
  27#include "kfd_topology.h"
  28#include "kfd_iommu.h"
  29
  30/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
  31 * GPU processor ID are expressed with Bit[31]=1.
  32 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
  33 * used in the CRAT.
  34 */
  35static uint32_t gpu_processor_id_low = 0x80001000;
  36
  37/* Return the next available gpu_processor_id and increment it for next GPU
  38 *      @total_cu_count - Total CUs present in the GPU including ones
  39 *                        masked off
  40 */
  41static inline unsigned int get_and_inc_gpu_processor_id(
  42                                unsigned int total_cu_count)
  43{
  44        int current_id = gpu_processor_id_low;
  45
  46        gpu_processor_id_low += total_cu_count;
  47        return current_id;
  48}
  49
  50/* Static table to describe GPU Cache information */
  51struct kfd_gpu_cache_info {
  52        uint32_t        cache_size;
  53        uint32_t        cache_level;
  54        uint32_t        flags;
  55        /* Indicates how many Compute Units share this cache
  56         * Value = 1 indicates the cache is not shared
  57         */
  58        uint32_t        num_cu_shared;
  59};
  60
  61static struct kfd_gpu_cache_info kaveri_cache_info[] = {
  62        {
  63                /* TCP L1 Cache per CU */
  64                .cache_size = 16,
  65                .cache_level = 1,
  66                .flags = (CRAT_CACHE_FLAGS_ENABLED |
  67                                CRAT_CACHE_FLAGS_DATA_CACHE |
  68                                CRAT_CACHE_FLAGS_SIMD_CACHE),
  69                .num_cu_shared = 1,
  70
  71        },
  72        {
  73                /* Scalar L1 Instruction Cache (in SQC module) per bank */
  74                .cache_size = 16,
  75                .cache_level = 1,
  76                .flags = (CRAT_CACHE_FLAGS_ENABLED |
  77                                CRAT_CACHE_FLAGS_INST_CACHE |
  78                                CRAT_CACHE_FLAGS_SIMD_CACHE),
  79                .num_cu_shared = 2,
  80        },
  81        {
  82                /* Scalar L1 Data Cache (in SQC module) per bank */
  83                .cache_size = 8,
  84                .cache_level = 1,
  85                .flags = (CRAT_CACHE_FLAGS_ENABLED |
  86                                CRAT_CACHE_FLAGS_DATA_CACHE |
  87                                CRAT_CACHE_FLAGS_SIMD_CACHE),
  88                .num_cu_shared = 2,
  89        },
  90
  91        /* TODO: Add L2 Cache information */
  92};
  93
  94
  95static struct kfd_gpu_cache_info carrizo_cache_info[] = {
  96        {
  97                /* TCP L1 Cache per CU */
  98                .cache_size = 16,
  99                .cache_level = 1,
 100                .flags = (CRAT_CACHE_FLAGS_ENABLED |
 101                                CRAT_CACHE_FLAGS_DATA_CACHE |
 102                                CRAT_CACHE_FLAGS_SIMD_CACHE),
 103                .num_cu_shared = 1,
 104        },
 105        {
 106                /* Scalar L1 Instruction Cache (in SQC module) per bank */
 107                .cache_size = 8,
 108                .cache_level = 1,
 109                .flags = (CRAT_CACHE_FLAGS_ENABLED |
 110                                CRAT_CACHE_FLAGS_INST_CACHE |
 111                                CRAT_CACHE_FLAGS_SIMD_CACHE),
 112                .num_cu_shared = 4,
 113        },
 114        {
 115                /* Scalar L1 Data Cache (in SQC module) per bank. */
 116                .cache_size = 4,
 117                .cache_level = 1,
 118                .flags = (CRAT_CACHE_FLAGS_ENABLED |
 119                                CRAT_CACHE_FLAGS_DATA_CACHE |
 120                                CRAT_CACHE_FLAGS_SIMD_CACHE),
 121                .num_cu_shared = 4,
 122        },
 123
 124        /* TODO: Add L2 Cache information */
 125};
 126
 127/* NOTE: In future if more information is added to struct kfd_gpu_cache_info
 128 * the following ASICs may need a separate table.
 129 */
 130#define hawaii_cache_info kaveri_cache_info
 131#define tonga_cache_info carrizo_cache_info
 132#define fiji_cache_info  carrizo_cache_info
 133#define polaris10_cache_info carrizo_cache_info
 134#define polaris11_cache_info carrizo_cache_info
 135/* TODO - check & update Vega10 cache details */
 136#define vega10_cache_info carrizo_cache_info
 137#define raven_cache_info carrizo_cache_info
 138
 139static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
 140                struct crat_subtype_computeunit *cu)
 141{
 142        dev->node_props.cpu_cores_count = cu->num_cpu_cores;
 143        dev->node_props.cpu_core_id_base = cu->processor_id_low;
 144        if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
 145                dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
 146
 147        pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
 148                        cu->processor_id_low);
 149}
 150
 151static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
 152                struct crat_subtype_computeunit *cu)
 153{
 154        dev->node_props.simd_id_base = cu->processor_id_low;
 155        dev->node_props.simd_count = cu->num_simd_cores;
 156        dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
 157        dev->node_props.max_waves_per_simd = cu->max_waves_simd;
 158        dev->node_props.wave_front_size = cu->wave_front_size;
 159        dev->node_props.array_count = cu->array_count;
 160        dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
 161        dev->node_props.simd_per_cu = cu->num_simd_per_cu;
 162        dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
 163        if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
 164                dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
 165        pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
 166}
 167
 168/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
 169 * topology device present in the device_list
 170 */
 171static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
 172                                struct list_head *device_list)
 173{
 174        struct kfd_topology_device *dev;
 175
 176        pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
 177                        cu->proximity_domain, cu->hsa_capability);
 178        list_for_each_entry(dev, device_list, list) {
 179                if (cu->proximity_domain == dev->proximity_domain) {
 180                        if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
 181                                kfd_populated_cu_info_cpu(dev, cu);
 182
 183                        if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
 184                                kfd_populated_cu_info_gpu(dev, cu);
 185                        break;
 186                }
 187        }
 188
 189        return 0;
 190}
 191
 192static struct kfd_mem_properties *
 193find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
 194                struct kfd_topology_device *dev)
 195{
 196        struct kfd_mem_properties *props;
 197
 198        list_for_each_entry(props, &dev->mem_props, list) {
 199                if (props->heap_type == heap_type
 200                                && props->flags == flags
 201                                && props->width == width)
 202                        return props;
 203        }
 204
 205        return NULL;
 206}
 207/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
 208 * topology device present in the device_list
 209 */
 210static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
 211                                struct list_head *device_list)
 212{
 213        struct kfd_mem_properties *props;
 214        struct kfd_topology_device *dev;
 215        uint32_t heap_type;
 216        uint64_t size_in_bytes;
 217        uint32_t flags = 0;
 218        uint32_t width;
 219
 220        pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
 221                        mem->proximity_domain);
 222        list_for_each_entry(dev, device_list, list) {
 223                if (mem->proximity_domain == dev->proximity_domain) {
 224                        /* We're on GPU node */
 225                        if (dev->node_props.cpu_cores_count == 0) {
 226                                /* APU */
 227                                if (mem->visibility_type == 0)
 228                                        heap_type =
 229                                                HSA_MEM_HEAP_TYPE_FB_PRIVATE;
 230                                /* dGPU */
 231                                else
 232                                        heap_type = mem->visibility_type;
 233                        } else
 234                                heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
 235
 236                        if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
 237                                flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
 238                        if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
 239                                flags |= HSA_MEM_FLAGS_NON_VOLATILE;
 240
 241                        size_in_bytes =
 242                                ((uint64_t)mem->length_high << 32) +
 243                                                        mem->length_low;
 244                        width = mem->width;
 245
 246                        /* Multiple banks of the same type are aggregated into
 247                         * one. User mode doesn't care about multiple physical
 248                         * memory segments. It's managed as a single virtual
 249                         * heap for user mode.
 250                         */
 251                        props = find_subtype_mem(heap_type, flags, width, dev);
 252                        if (props) {
 253                                props->size_in_bytes += size_in_bytes;
 254                                break;
 255                        }
 256
 257                        props = kfd_alloc_struct(props);
 258                        if (!props)
 259                                return -ENOMEM;
 260
 261                        props->heap_type = heap_type;
 262                        props->flags = flags;
 263                        props->size_in_bytes = size_in_bytes;
 264                        props->width = width;
 265
 266                        dev->node_props.mem_banks_count++;
 267                        list_add_tail(&props->list, &dev->mem_props);
 268
 269                        break;
 270                }
 271        }
 272
 273        return 0;
 274}
 275
 276/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
 277 * topology device present in the device_list
 278 */
 279static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
 280                        struct list_head *device_list)
 281{
 282        struct kfd_cache_properties *props;
 283        struct kfd_topology_device *dev;
 284        uint32_t id;
 285        uint32_t total_num_of_cu;
 286
 287        id = cache->processor_id_low;
 288
 289        pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
 290        list_for_each_entry(dev, device_list, list) {
 291                total_num_of_cu = (dev->node_props.array_count *
 292                                        dev->node_props.cu_per_simd_array);
 293
 294                /* Cache infomration in CRAT doesn't have proximity_domain
 295                 * information as it is associated with a CPU core or GPU
 296                 * Compute Unit. So map the cache using CPU core Id or SIMD
 297                 * (GPU) ID.
 298                 * TODO: This works because currently we can safely assume that
 299                 *  Compute Units are parsed before caches are parsed. In
 300                 *  future, remove this dependency
 301                 */
 302                if ((id >= dev->node_props.cpu_core_id_base &&
 303                        id <= dev->node_props.cpu_core_id_base +
 304                                dev->node_props.cpu_cores_count) ||
 305                        (id >= dev->node_props.simd_id_base &&
 306                        id < dev->node_props.simd_id_base +
 307                                total_num_of_cu)) {
 308                        props = kfd_alloc_struct(props);
 309                        if (!props)
 310                                return -ENOMEM;
 311
 312                        props->processor_id_low = id;
 313                        props->cache_level = cache->cache_level;
 314                        props->cache_size = cache->cache_size;
 315                        props->cacheline_size = cache->cache_line_size;
 316                        props->cachelines_per_tag = cache->lines_per_tag;
 317                        props->cache_assoc = cache->associativity;
 318                        props->cache_latency = cache->cache_latency;
 319                        memcpy(props->sibling_map, cache->sibling_map,
 320                                        sizeof(props->sibling_map));
 321
 322                        if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
 323                                props->cache_type |= HSA_CACHE_TYPE_DATA;
 324                        if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
 325                                props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
 326                        if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
 327                                props->cache_type |= HSA_CACHE_TYPE_CPU;
 328                        if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
 329                                props->cache_type |= HSA_CACHE_TYPE_HSACU;
 330
 331                        dev->cache_count++;
 332                        dev->node_props.caches_count++;
 333                        list_add_tail(&props->list, &dev->cache_props);
 334
 335                        break;
 336                }
 337        }
 338
 339        return 0;
 340}
 341
 342/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
 343 * topology device present in the device_list
 344 */
 345static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
 346                                        struct list_head *device_list)
 347{
 348        struct kfd_iolink_properties *props = NULL, *props2;
 349        struct kfd_topology_device *dev, *cpu_dev;
 350        uint32_t id_from;
 351        uint32_t id_to;
 352
 353        id_from = iolink->proximity_domain_from;
 354        id_to = iolink->proximity_domain_to;
 355
 356        pr_debug("Found IO link entry in CRAT table with id_from=%d\n",
 357                        id_from);
 358        list_for_each_entry(dev, device_list, list) {
 359                if (id_from == dev->proximity_domain) {
 360                        props = kfd_alloc_struct(props);
 361                        if (!props)
 362                                return -ENOMEM;
 363
 364                        props->node_from = id_from;
 365                        props->node_to = id_to;
 366                        props->ver_maj = iolink->version_major;
 367                        props->ver_min = iolink->version_minor;
 368                        props->iolink_type = iolink->io_interface_type;
 369
 370                        if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
 371                                props->weight = 20;
 372                        else
 373                                props->weight = node_distance(id_from, id_to);
 374
 375                        props->min_latency = iolink->minimum_latency;
 376                        props->max_latency = iolink->maximum_latency;
 377                        props->min_bandwidth = iolink->minimum_bandwidth_mbs;
 378                        props->max_bandwidth = iolink->maximum_bandwidth_mbs;
 379                        props->rec_transfer_size =
 380                                        iolink->recommended_transfer_size;
 381
 382                        dev->io_link_count++;
 383                        dev->node_props.io_links_count++;
 384                        list_add_tail(&props->list, &dev->io_link_props);
 385                        break;
 386                }
 387        }
 388
 389        /* CPU topology is created before GPUs are detected, so CPU->GPU
 390         * links are not built at that time. If a PCIe type is discovered, it
 391         * means a GPU is detected and we are adding GPU->CPU to the topology.
 392         * At this time, also add the corresponded CPU->GPU link.
 393         */
 394        if (props && props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) {
 395                cpu_dev = kfd_topology_device_by_proximity_domain(id_to);
 396                if (!cpu_dev)
 397                        return -ENODEV;
 398                /* same everything but the other direction */
 399                props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
 400                props2->node_from = id_to;
 401                props2->node_to = id_from;
 402                props2->kobj = NULL;
 403                cpu_dev->io_link_count++;
 404                cpu_dev->node_props.io_links_count++;
 405                list_add_tail(&props2->list, &cpu_dev->io_link_props);
 406        }
 407
 408        return 0;
 409}
 410
 411/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
 412 * present in the device_list
 413 *      @sub_type_hdr - subtype section of crat_image
 414 *      @device_list - list of topology devices present in this crat_image
 415 */
 416static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
 417                                struct list_head *device_list)
 418{
 419        struct crat_subtype_computeunit *cu;
 420        struct crat_subtype_memory *mem;
 421        struct crat_subtype_cache *cache;
 422        struct crat_subtype_iolink *iolink;
 423        int ret = 0;
 424
 425        switch (sub_type_hdr->type) {
 426        case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
 427                cu = (struct crat_subtype_computeunit *)sub_type_hdr;
 428                ret = kfd_parse_subtype_cu(cu, device_list);
 429                break;
 430        case CRAT_SUBTYPE_MEMORY_AFFINITY:
 431                mem = (struct crat_subtype_memory *)sub_type_hdr;
 432                ret = kfd_parse_subtype_mem(mem, device_list);
 433                break;
 434        case CRAT_SUBTYPE_CACHE_AFFINITY:
 435                cache = (struct crat_subtype_cache *)sub_type_hdr;
 436                ret = kfd_parse_subtype_cache(cache, device_list);
 437                break;
 438        case CRAT_SUBTYPE_TLB_AFFINITY:
 439                /*
 440                 * For now, nothing to do here
 441                 */
 442                pr_debug("Found TLB entry in CRAT table (not processing)\n");
 443                break;
 444        case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
 445                /*
 446                 * For now, nothing to do here
 447                 */
 448                pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
 449                break;
 450        case CRAT_SUBTYPE_IOLINK_AFFINITY:
 451                iolink = (struct crat_subtype_iolink *)sub_type_hdr;
 452                ret = kfd_parse_subtype_iolink(iolink, device_list);
 453                break;
 454        default:
 455                pr_warn("Unknown subtype %d in CRAT\n",
 456                                sub_type_hdr->type);
 457        }
 458
 459        return ret;
 460}
 461
 462/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
 463 * create a kfd_topology_device and add in to device_list. Also parse
 464 * CRAT subtypes and attach it to appropriate kfd_topology_device
 465 *      @crat_image - input image containing CRAT
 466 *      @device_list - [OUT] list of kfd_topology_device generated after
 467 *                     parsing crat_image
 468 *      @proximity_domain - Proximity domain of the first device in the table
 469 *
 470 *      Return - 0 if successful else -ve value
 471 */
 472int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
 473                         uint32_t proximity_domain)
 474{
 475        struct kfd_topology_device *top_dev = NULL;
 476        struct crat_subtype_generic *sub_type_hdr;
 477        uint16_t node_id;
 478        int ret = 0;
 479        struct crat_header *crat_table = (struct crat_header *)crat_image;
 480        uint16_t num_nodes;
 481        uint32_t image_len;
 482
 483        if (!crat_image)
 484                return -EINVAL;
 485
 486        if (!list_empty(device_list)) {
 487                pr_warn("Error device list should be empty\n");
 488                return -EINVAL;
 489        }
 490
 491        num_nodes = crat_table->num_domains;
 492        image_len = crat_table->length;
 493
 494        pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
 495
 496        for (node_id = 0; node_id < num_nodes; node_id++) {
 497                top_dev = kfd_create_topology_device(device_list);
 498                if (!top_dev)
 499                        break;
 500                top_dev->proximity_domain = proximity_domain++;
 501        }
 502
 503        if (!top_dev) {
 504                ret = -ENOMEM;
 505                goto err;
 506        }
 507
 508        memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
 509        memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
 510                        CRAT_OEMTABLEID_LENGTH);
 511        top_dev->oem_revision = crat_table->oem_revision;
 512
 513        sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
 514        while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
 515                        ((char *)crat_image) + image_len) {
 516                if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
 517                        ret = kfd_parse_subtype(sub_type_hdr, device_list);
 518                        if (ret)
 519                                break;
 520                }
 521
 522                sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 523                                sub_type_hdr->length);
 524        }
 525
 526err:
 527        if (ret)
 528                kfd_release_topology_device_list(device_list);
 529
 530        return ret;
 531}
 532
 533/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
 534static int fill_in_pcache(struct crat_subtype_cache *pcache,
 535                                struct kfd_gpu_cache_info *pcache_info,
 536                                struct kfd_cu_info *cu_info,
 537                                int mem_available,
 538                                int cu_bitmask,
 539                                int cache_type, unsigned int cu_processor_id,
 540                                int cu_block)
 541{
 542        unsigned int cu_sibling_map_mask;
 543        int first_active_cu;
 544
 545        /* First check if enough memory is available */
 546        if (sizeof(struct crat_subtype_cache) > mem_available)
 547                return -ENOMEM;
 548
 549        cu_sibling_map_mask = cu_bitmask;
 550        cu_sibling_map_mask >>= cu_block;
 551        cu_sibling_map_mask &=
 552                ((1 << pcache_info[cache_type].num_cu_shared) - 1);
 553        first_active_cu = ffs(cu_sibling_map_mask);
 554
 555        /* CU could be inactive. In case of shared cache find the first active
 556         * CU. and incase of non-shared cache check if the CU is inactive. If
 557         * inactive active skip it
 558         */
 559        if (first_active_cu) {
 560                memset(pcache, 0, sizeof(struct crat_subtype_cache));
 561                pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
 562                pcache->length = sizeof(struct crat_subtype_cache);
 563                pcache->flags = pcache_info[cache_type].flags;
 564                pcache->processor_id_low = cu_processor_id
 565                                         + (first_active_cu - 1);
 566                pcache->cache_level = pcache_info[cache_type].cache_level;
 567                pcache->cache_size = pcache_info[cache_type].cache_size;
 568
 569                /* Sibling map is w.r.t processor_id_low, so shift out
 570                 * inactive CU
 571                 */
 572                cu_sibling_map_mask =
 573                        cu_sibling_map_mask >> (first_active_cu - 1);
 574
 575                pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
 576                pcache->sibling_map[1] =
 577                                (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
 578                pcache->sibling_map[2] =
 579                                (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
 580                pcache->sibling_map[3] =
 581                                (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
 582                return 0;
 583        }
 584        return 1;
 585}
 586
 587/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
 588 * tables
 589 *
 590 *      @kdev - [IN] GPU device
 591 *      @gpu_processor_id - [IN] GPU processor ID to which these caches
 592 *                          associate
 593 *      @available_size - [IN] Amount of memory available in pcache
 594 *      @cu_info - [IN] Compute Unit info obtained from KGD
 595 *      @pcache - [OUT] memory into which cache data is to be filled in.
 596 *      @size_filled - [OUT] amount of data used up in pcache.
 597 *      @num_of_entries - [OUT] number of caches added
 598 */
 599static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
 600                        int gpu_processor_id,
 601                        int available_size,
 602                        struct kfd_cu_info *cu_info,
 603                        struct crat_subtype_cache *pcache,
 604                        int *size_filled,
 605                        int *num_of_entries)
 606{
 607        struct kfd_gpu_cache_info *pcache_info;
 608        int num_of_cache_types = 0;
 609        int i, j, k;
 610        int ct = 0;
 611        int mem_available = available_size;
 612        unsigned int cu_processor_id;
 613        int ret;
 614
 615        switch (kdev->device_info->asic_family) {
 616        case CHIP_KAVERI:
 617                pcache_info = kaveri_cache_info;
 618                num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
 619                break;
 620        case CHIP_HAWAII:
 621                pcache_info = hawaii_cache_info;
 622                num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
 623                break;
 624        case CHIP_CARRIZO:
 625                pcache_info = carrizo_cache_info;
 626                num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
 627                break;
 628        case CHIP_TONGA:
 629                pcache_info = tonga_cache_info;
 630                num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
 631                break;
 632        case CHIP_FIJI:
 633                pcache_info = fiji_cache_info;
 634                num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
 635                break;
 636        case CHIP_POLARIS10:
 637                pcache_info = polaris10_cache_info;
 638                num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
 639                break;
 640        case CHIP_POLARIS11:
 641                pcache_info = polaris11_cache_info;
 642                num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
 643                break;
 644        case CHIP_VEGA10:
 645                pcache_info = vega10_cache_info;
 646                num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
 647                break;
 648        case CHIP_RAVEN:
 649                pcache_info = raven_cache_info;
 650                num_of_cache_types = ARRAY_SIZE(raven_cache_info);
 651                break;
 652        default:
 653                return -EINVAL;
 654        }
 655
 656        *size_filled = 0;
 657        *num_of_entries = 0;
 658
 659        /* For each type of cache listed in the kfd_gpu_cache_info table,
 660         * go through all available Compute Units.
 661         * The [i,j,k] loop will
 662         *              if kfd_gpu_cache_info.num_cu_shared = 1
 663         *                      will parse through all available CU
 664         *              If (kfd_gpu_cache_info.num_cu_shared != 1)
 665         *                      then it will consider only one CU from
 666         *                      the shared unit
 667         */
 668
 669        for (ct = 0; ct < num_of_cache_types; ct++) {
 670                cu_processor_id = gpu_processor_id;
 671                for (i = 0; i < cu_info->num_shader_engines; i++) {
 672                        for (j = 0; j < cu_info->num_shader_arrays_per_engine;
 673                                j++) {
 674                                for (k = 0; k < cu_info->num_cu_per_sh;
 675                                        k += pcache_info[ct].num_cu_shared) {
 676
 677                                        ret = fill_in_pcache(pcache,
 678                                                pcache_info,
 679                                                cu_info,
 680                                                mem_available,
 681                                                cu_info->cu_bitmap[i][j],
 682                                                ct,
 683                                                cu_processor_id,
 684                                                k);
 685
 686                                        if (ret < 0)
 687                                                break;
 688
 689                                        if (!ret) {
 690                                                pcache++;
 691                                                (*num_of_entries)++;
 692                                                mem_available -=
 693                                                        sizeof(*pcache);
 694                                                (*size_filled) +=
 695                                                        sizeof(*pcache);
 696                                        }
 697
 698                                        /* Move to next CU block */
 699                                        cu_processor_id +=
 700                                                pcache_info[ct].num_cu_shared;
 701                                }
 702                        }
 703                }
 704        }
 705
 706        pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
 707
 708        return 0;
 709}
 710
 711/*
 712 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
 713 * copies CRAT from ACPI (if available).
 714 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
 715 *
 716 *      @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
 717 *                   crat_image will be NULL
 718 *      @size: [OUT] size of crat_image
 719 *
 720 *      Return 0 if successful else return error code
 721 */
 722int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 723{
 724        struct acpi_table_header *crat_table;
 725        acpi_status status;
 726        void *pcrat_image;
 727
 728        if (!crat_image)
 729                return -EINVAL;
 730
 731        *crat_image = NULL;
 732
 733        /* Fetch the CRAT table from ACPI */
 734        status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
 735        if (status == AE_NOT_FOUND) {
 736                pr_warn("CRAT table not found\n");
 737                return -ENODATA;
 738        } else if (ACPI_FAILURE(status)) {
 739                const char *err = acpi_format_exception(status);
 740
 741                pr_err("CRAT table error: %s\n", err);
 742                return -EINVAL;
 743        }
 744
 745        if (ignore_crat) {
 746                pr_info("CRAT table disabled by module option\n");
 747                return -ENODATA;
 748        }
 749
 750        pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
 751        if (!pcrat_image)
 752                return -ENOMEM;
 753
 754        memcpy(pcrat_image, crat_table, crat_table->length);
 755
 756        *crat_image = pcrat_image;
 757        *size = crat_table->length;
 758
 759        return 0;
 760}
 761
 762/* Memory required to create Virtual CRAT.
 763 * Since there is no easy way to predict the amount of memory required, the
 764 * following amount are allocated for CPU and GPU Virtual CRAT. This is
 765 * expected to cover all known conditions. But to be safe additional check
 766 * is put in the code to ensure we don't overwrite.
 767 */
 768#define VCRAT_SIZE_FOR_CPU      (2 * PAGE_SIZE)
 769#define VCRAT_SIZE_FOR_GPU      (3 * PAGE_SIZE)
 770
 771/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
 772 *
 773 *      @numa_node_id: CPU NUMA node id
 774 *      @avail_size: Available size in the memory
 775 *      @sub_type_hdr: Memory into which compute info will be filled in
 776 *
 777 *      Return 0 if successful else return -ve value
 778 */
 779static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
 780                                int proximity_domain,
 781                                struct crat_subtype_computeunit *sub_type_hdr)
 782{
 783        const struct cpumask *cpumask;
 784
 785        *avail_size -= sizeof(struct crat_subtype_computeunit);
 786        if (*avail_size < 0)
 787                return -ENOMEM;
 788
 789        memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
 790
 791        /* Fill in subtype header data */
 792        sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
 793        sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
 794        sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 795
 796        cpumask = cpumask_of_node(numa_node_id);
 797
 798        /* Fill in CU data */
 799        sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
 800        sub_type_hdr->proximity_domain = proximity_domain;
 801        sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
 802        if (sub_type_hdr->processor_id_low == -1)
 803                return -EINVAL;
 804
 805        sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
 806
 807        return 0;
 808}
 809
 810/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
 811 *
 812 *      @numa_node_id: CPU NUMA node id
 813 *      @avail_size: Available size in the memory
 814 *      @sub_type_hdr: Memory into which compute info will be filled in
 815 *
 816 *      Return 0 if successful else return -ve value
 817 */
 818static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
 819                        int proximity_domain,
 820                        struct crat_subtype_memory *sub_type_hdr)
 821{
 822        uint64_t mem_in_bytes = 0;
 823        pg_data_t *pgdat;
 824        int zone_type;
 825
 826        *avail_size -= sizeof(struct crat_subtype_memory);
 827        if (*avail_size < 0)
 828                return -ENOMEM;
 829
 830        memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
 831
 832        /* Fill in subtype header data */
 833        sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
 834        sub_type_hdr->length = sizeof(struct crat_subtype_memory);
 835        sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 836
 837        /* Fill in Memory Subunit data */
 838
 839        /* Unlike si_meminfo, si_meminfo_node is not exported. So
 840         * the following lines are duplicated from si_meminfo_node
 841         * function
 842         */
 843        pgdat = NODE_DATA(numa_node_id);
 844        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
 845                mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
 846        mem_in_bytes <<= PAGE_SHIFT;
 847
 848        sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
 849        sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
 850        sub_type_hdr->proximity_domain = proximity_domain;
 851
 852        return 0;
 853}
 854
 855static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 856                                uint32_t *num_entries,
 857                                struct crat_subtype_iolink *sub_type_hdr)
 858{
 859        int nid;
 860        struct cpuinfo_x86 *c = &cpu_data(0);
 861        uint8_t link_type;
 862
 863        if (c->x86_vendor == X86_VENDOR_AMD)
 864                link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
 865        else
 866                link_type = CRAT_IOLINK_TYPE_QPI_1_1;
 867
 868        *num_entries = 0;
 869
 870        /* Create IO links from this node to other CPU nodes */
 871        for_each_online_node(nid) {
 872                if (nid == numa_node_id) /* node itself */
 873                        continue;
 874
 875                *avail_size -= sizeof(struct crat_subtype_iolink);
 876                if (*avail_size < 0)
 877                        return -ENOMEM;
 878
 879                memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
 880
 881                /* Fill in subtype header data */
 882                sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
 883                sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
 884                sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 885
 886                /* Fill in IO link data */
 887                sub_type_hdr->proximity_domain_from = numa_node_id;
 888                sub_type_hdr->proximity_domain_to = nid;
 889                sub_type_hdr->io_interface_type = link_type;
 890
 891                (*num_entries)++;
 892                sub_type_hdr++;
 893        }
 894
 895        return 0;
 896}
 897
 898/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
 899 *
 900 *      @pcrat_image: Fill in VCRAT for CPU
 901 *      @size:  [IN] allocated size of crat_image.
 902 *              [OUT] actual size of data filled in crat_image
 903 */
 904static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 905{
 906        struct crat_header *crat_table = (struct crat_header *)pcrat_image;
 907        struct acpi_table_header *acpi_table;
 908        acpi_status status;
 909        struct crat_subtype_generic *sub_type_hdr;
 910        int avail_size = *size;
 911        int numa_node_id;
 912        uint32_t entries = 0;
 913        int ret = 0;
 914
 915        if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
 916                return -EINVAL;
 917
 918        /* Fill in CRAT Header.
 919         * Modify length and total_entries as subunits are added.
 920         */
 921        avail_size -= sizeof(struct crat_header);
 922        if (avail_size < 0)
 923                return -ENOMEM;
 924
 925        memset(crat_table, 0, sizeof(struct crat_header));
 926        memcpy(&crat_table->signature, CRAT_SIGNATURE,
 927                        sizeof(crat_table->signature));
 928        crat_table->length = sizeof(struct crat_header);
 929
 930        status = acpi_get_table("DSDT", 0, &acpi_table);
 931        if (status != AE_OK)
 932                pr_warn("DSDT table not found for OEM information\n");
 933        else {
 934                crat_table->oem_revision = acpi_table->revision;
 935                memcpy(crat_table->oem_id, acpi_table->oem_id,
 936                                CRAT_OEMID_LENGTH);
 937                memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
 938                                CRAT_OEMTABLEID_LENGTH);
 939        }
 940        crat_table->total_entries = 0;
 941        crat_table->num_domains = 0;
 942
 943        sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
 944
 945        for_each_online_node(numa_node_id) {
 946                if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
 947                        continue;
 948
 949                /* Fill in Subtype: Compute Unit */
 950                ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
 951                        crat_table->num_domains,
 952                        (struct crat_subtype_computeunit *)sub_type_hdr);
 953                if (ret < 0)
 954                        return ret;
 955                crat_table->length += sub_type_hdr->length;
 956                crat_table->total_entries++;
 957
 958                sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 959                        sub_type_hdr->length);
 960
 961                /* Fill in Subtype: Memory */
 962                ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
 963                        crat_table->num_domains,
 964                        (struct crat_subtype_memory *)sub_type_hdr);
 965                if (ret < 0)
 966                        return ret;
 967                crat_table->length += sub_type_hdr->length;
 968                crat_table->total_entries++;
 969
 970                sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 971                        sub_type_hdr->length);
 972
 973                /* Fill in Subtype: IO Link */
 974                ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
 975                                &entries,
 976                                (struct crat_subtype_iolink *)sub_type_hdr);
 977                if (ret < 0)
 978                        return ret;
 979                crat_table->length += (sub_type_hdr->length * entries);
 980                crat_table->total_entries += entries;
 981
 982                sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 983                                sub_type_hdr->length * entries);
 984
 985                crat_table->num_domains++;
 986        }
 987
 988        /* TODO: Add cache Subtype for CPU.
 989         * Currently, CPU cache information is available in function
 990         * detect_cache_attributes(cpu) defined in the file
 991         * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
 992         * exported and to get the same information the code needs to be
 993         * duplicated.
 994         */
 995
 996        *size = crat_table->length;
 997        pr_info("Virtual CRAT table created for CPU\n");
 998
 999        return 0;
1000}
1001
1002static int kfd_fill_gpu_memory_affinity(int *avail_size,
1003                struct kfd_dev *kdev, uint8_t type, uint64_t size,
1004                struct crat_subtype_memory *sub_type_hdr,
1005                uint32_t proximity_domain,
1006                const struct kfd_local_mem_info *local_mem_info)
1007{
1008        *avail_size -= sizeof(struct crat_subtype_memory);
1009        if (*avail_size < 0)
1010                return -ENOMEM;
1011
1012        memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1013        sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1014        sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1015        sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1016
1017        sub_type_hdr->proximity_domain = proximity_domain;
1018
1019        pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1020                        type, size);
1021
1022        sub_type_hdr->length_low = lower_32_bits(size);
1023        sub_type_hdr->length_high = upper_32_bits(size);
1024
1025        sub_type_hdr->width = local_mem_info->vram_width;
1026        sub_type_hdr->visibility_type = type;
1027
1028        return 0;
1029}
1030
1031/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1032 * to its NUMA node
1033 *      @avail_size: Available size in the memory
1034 *      @kdev - [IN] GPU device
1035 *      @sub_type_hdr: Memory into which io link info will be filled in
1036 *      @proximity_domain - proximity domain of the GPU node
1037 *
1038 *      Return 0 if successful else return -ve value
1039 */
1040static int kfd_fill_gpu_direct_io_link(int *avail_size,
1041                        struct kfd_dev *kdev,
1042                        struct crat_subtype_iolink *sub_type_hdr,
1043                        uint32_t proximity_domain)
1044{
1045        *avail_size -= sizeof(struct crat_subtype_iolink);
1046        if (*avail_size < 0)
1047                return -ENOMEM;
1048
1049        memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1050
1051        /* Fill in subtype header data */
1052        sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1053        sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1054        sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1055
1056        /* Fill in IOLINK subtype.
1057         * TODO: Fill-in other fields of iolink subtype
1058         */
1059        sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1060        sub_type_hdr->proximity_domain_from = proximity_domain;
1061#ifdef CONFIG_NUMA
1062        if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1063                sub_type_hdr->proximity_domain_to = 0;
1064        else
1065                sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1066#else
1067        sub_type_hdr->proximity_domain_to = 0;
1068#endif
1069        return 0;
1070}
1071
1072/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1073 *
1074 *      @pcrat_image: Fill in VCRAT for GPU
1075 *      @size:  [IN] allocated size of crat_image.
1076 *              [OUT] actual size of data filled in crat_image
1077 */
1078static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1079                                      size_t *size, struct kfd_dev *kdev,
1080                                      uint32_t proximity_domain)
1081{
1082        struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1083        struct crat_subtype_generic *sub_type_hdr;
1084        struct crat_subtype_computeunit *cu;
1085        struct kfd_cu_info cu_info;
1086        int avail_size = *size;
1087        uint32_t total_num_of_cu;
1088        int num_of_cache_entries = 0;
1089        int cache_mem_filled = 0;
1090        int ret = 0;
1091        struct kfd_local_mem_info local_mem_info;
1092
1093        if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1094                return -EINVAL;
1095
1096        /* Fill the CRAT Header.
1097         * Modify length and total_entries as subunits are added.
1098         */
1099        avail_size -= sizeof(struct crat_header);
1100        if (avail_size < 0)
1101                return -ENOMEM;
1102
1103        memset(crat_table, 0, sizeof(struct crat_header));
1104
1105        memcpy(&crat_table->signature, CRAT_SIGNATURE,
1106                        sizeof(crat_table->signature));
1107        /* Change length as we add more subtypes*/
1108        crat_table->length = sizeof(struct crat_header);
1109        crat_table->num_domains = 1;
1110        crat_table->total_entries = 0;
1111
1112        /* Fill in Subtype: Compute Unit
1113         * First fill in the sub type header and then sub type data
1114         */
1115        avail_size -= sizeof(struct crat_subtype_computeunit);
1116        if (avail_size < 0)
1117                return -ENOMEM;
1118
1119        sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1120        memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1121
1122        sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1123        sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1124        sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1125
1126        /* Fill CU subtype data */
1127        cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1128        cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1129        cu->proximity_domain = proximity_domain;
1130
1131        kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
1132        cu->num_simd_per_cu = cu_info.simd_per_cu;
1133        cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1134        cu->max_waves_simd = cu_info.max_waves_per_simd;
1135
1136        cu->wave_front_size = cu_info.wave_front_size;
1137        cu->array_count = cu_info.num_shader_arrays_per_engine *
1138                cu_info.num_shader_engines;
1139        total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1140        cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1141        cu->num_cu_per_array = cu_info.num_cu_per_sh;
1142        cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1143        cu->num_banks = cu_info.num_shader_engines;
1144        cu->lds_size_in_kb = cu_info.lds_size;
1145
1146        cu->hsa_capability = 0;
1147
1148        /* Check if this node supports IOMMU. During parsing this flag will
1149         * translate to HSA_CAP_ATS_PRESENT
1150         */
1151        if (!kfd_iommu_check_device(kdev))
1152                cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1153
1154        crat_table->length += sub_type_hdr->length;
1155        crat_table->total_entries++;
1156
1157        /* Fill in Subtype: Memory. Only on systems with large BAR (no
1158         * private FB), report memory as public. On other systems
1159         * report the total FB size (public+private) as a single
1160         * private heap.
1161         */
1162        kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
1163        sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1164                        sub_type_hdr->length);
1165
1166        if (debug_largebar)
1167                local_mem_info.local_mem_size_private = 0;
1168
1169        if (local_mem_info.local_mem_size_private == 0)
1170                ret = kfd_fill_gpu_memory_affinity(&avail_size,
1171                                kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1172                                local_mem_info.local_mem_size_public,
1173                                (struct crat_subtype_memory *)sub_type_hdr,
1174                                proximity_domain,
1175                                &local_mem_info);
1176        else
1177                ret = kfd_fill_gpu_memory_affinity(&avail_size,
1178                                kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1179                                local_mem_info.local_mem_size_public +
1180                                local_mem_info.local_mem_size_private,
1181                                (struct crat_subtype_memory *)sub_type_hdr,
1182                                proximity_domain,
1183                                &local_mem_info);
1184        if (ret < 0)
1185                return ret;
1186
1187        crat_table->length += sizeof(struct crat_subtype_memory);
1188        crat_table->total_entries++;
1189
1190        /* TODO: Fill in cache information. This information is NOT readily
1191         * available in KGD
1192         */
1193        sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1194                sub_type_hdr->length);
1195        ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1196                                avail_size,
1197                                &cu_info,
1198                                (struct crat_subtype_cache *)sub_type_hdr,
1199                                &cache_mem_filled,
1200                                &num_of_cache_entries);
1201
1202        if (ret < 0)
1203                return ret;
1204
1205        crat_table->length += cache_mem_filled;
1206        crat_table->total_entries += num_of_cache_entries;
1207        avail_size -= cache_mem_filled;
1208
1209        /* Fill in Subtype: IO_LINKS
1210         *  Only direct links are added here which is Link from GPU to
1211         *  to its NUMA node. Indirect links are added by userspace.
1212         */
1213        sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1214                cache_mem_filled);
1215        ret = kfd_fill_gpu_direct_io_link(&avail_size, kdev,
1216                (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1217
1218        if (ret < 0)
1219                return ret;
1220
1221        crat_table->length += sub_type_hdr->length;
1222        crat_table->total_entries++;
1223
1224        *size = crat_table->length;
1225        pr_info("Virtual CRAT table created for GPU\n");
1226
1227        return ret;
1228}
1229
1230/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1231 *              creates a Virtual CRAT (VCRAT) image
1232 *
1233 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1234 *
1235 *      @crat_image: VCRAT image created because ACPI does not have a
1236 *                   CRAT for this device
1237 *      @size: [OUT] size of virtual crat_image
1238 *      @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1239 *              COMPUTE_UNIT_GPU - Create VCRAT for GPU
1240 *              (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1241 *                      -- this option is not currently implemented.
1242 *                      The assumption is that all AMD APUs will have CRAT
1243 *      @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1244 *
1245 *      Return 0 if successful else return -ve value
1246 */
1247int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1248                                  int flags, struct kfd_dev *kdev,
1249                                  uint32_t proximity_domain)
1250{
1251        void *pcrat_image = NULL;
1252        int ret = 0;
1253
1254        if (!crat_image)
1255                return -EINVAL;
1256
1257        *crat_image = NULL;
1258
1259        /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
1260         * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
1261         * all the current conditions. A check is put not to overwrite beyond
1262         * allocated size
1263         */
1264        switch (flags) {
1265        case COMPUTE_UNIT_CPU:
1266                pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
1267                if (!pcrat_image)
1268                        return -ENOMEM;
1269                *size = VCRAT_SIZE_FOR_CPU;
1270                ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1271                break;
1272        case COMPUTE_UNIT_GPU:
1273                if (!kdev)
1274                        return -EINVAL;
1275                pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1276                if (!pcrat_image)
1277                        return -ENOMEM;
1278                *size = VCRAT_SIZE_FOR_GPU;
1279                ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1280                                                 proximity_domain);
1281                break;
1282        case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1283                /* TODO: */
1284                ret = -EINVAL;
1285                pr_err("VCRAT not implemented for APU\n");
1286                break;
1287        default:
1288                ret = -EINVAL;
1289        }
1290
1291        if (!ret)
1292                *crat_image = pcrat_image;
1293        else
1294                kfree(pcrat_image);
1295
1296        return ret;
1297}
1298
1299
1300/* kfd_destroy_crat_image
1301 *
1302 *      @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1303 *
1304 */
1305void kfd_destroy_crat_image(void *crat_image)
1306{
1307        kfree(crat_image);
1308}
1309