root/drivers/gpu/drm/amd/amdkfd/kfd_crat.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_and_inc_gpu_processor_id
  2. kfd_populated_cu_info_cpu
  3. kfd_populated_cu_info_gpu
  4. kfd_parse_subtype_cu
  5. find_subtype_mem
  6. kfd_parse_subtype_mem
  7. kfd_parse_subtype_cache
  8. kfd_parse_subtype_iolink
  9. kfd_parse_subtype
  10. kfd_parse_crat_table
  11. fill_in_pcache
  12. kfd_fill_gpu_cache_info
  13. kfd_create_crat_image_acpi
  14. kfd_fill_cu_for_cpu
  15. kfd_fill_mem_info_for_cpu
  16. kfd_fill_iolink_info_for_cpu
  17. kfd_create_vcrat_image_cpu
  18. kfd_fill_gpu_memory_affinity
  19. kfd_fill_gpu_direct_io_link_to_cpu
  20. kfd_fill_gpu_xgmi_link_to_gpu
  21. kfd_create_vcrat_image_gpu
  22. kfd_create_crat_image_virtual
  23. kfd_destroy_crat_image

   1 /*
   2  * Copyright 2015-2017 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 
  23 #include <linux/pci.h>
  24 #include <linux/acpi.h>
  25 #include "kfd_crat.h"
  26 #include "kfd_priv.h"
  27 #include "kfd_topology.h"
  28 #include "kfd_iommu.h"
  29 #include "amdgpu_amdkfd.h"
  30 
  31 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
  32  * GPU processor ID are expressed with Bit[31]=1.
  33  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
  34  * used in the CRAT.
  35  */
  36 static uint32_t gpu_processor_id_low = 0x80001000;
  37 
  38 /* Return the next available gpu_processor_id and increment it for next GPU
  39  *      @total_cu_count - Total CUs present in the GPU including ones
  40  *                        masked off
  41  */
  42 static inline unsigned int get_and_inc_gpu_processor_id(
  43                                 unsigned int total_cu_count)
  44 {
  45         int current_id = gpu_processor_id_low;
  46 
  47         gpu_processor_id_low += total_cu_count;
  48         return current_id;
  49 }
  50 
  51 /* Static table to describe GPU Cache information */
  52 struct kfd_gpu_cache_info {
  53         uint32_t        cache_size;
  54         uint32_t        cache_level;
  55         uint32_t        flags;
  56         /* Indicates how many Compute Units share this cache
  57          * Value = 1 indicates the cache is not shared
  58          */
  59         uint32_t        num_cu_shared;
  60 };
  61 
  62 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
  63         {
  64                 /* TCP L1 Cache per CU */
  65                 .cache_size = 16,
  66                 .cache_level = 1,
  67                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
  68                                 CRAT_CACHE_FLAGS_DATA_CACHE |
  69                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
  70                 .num_cu_shared = 1,
  71 
  72         },
  73         {
  74                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
  75                 .cache_size = 16,
  76                 .cache_level = 1,
  77                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
  78                                 CRAT_CACHE_FLAGS_INST_CACHE |
  79                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
  80                 .num_cu_shared = 2,
  81         },
  82         {
  83                 /* Scalar L1 Data Cache (in SQC module) per bank */
  84                 .cache_size = 8,
  85                 .cache_level = 1,
  86                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
  87                                 CRAT_CACHE_FLAGS_DATA_CACHE |
  88                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
  89                 .num_cu_shared = 2,
  90         },
  91 
  92         /* TODO: Add L2 Cache information */
  93 };
  94 
  95 
  96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
  97         {
  98                 /* TCP L1 Cache per CU */
  99                 .cache_size = 16,
 100                 .cache_level = 1,
 101                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
 102                                 CRAT_CACHE_FLAGS_DATA_CACHE |
 103                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
 104                 .num_cu_shared = 1,
 105         },
 106         {
 107                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
 108                 .cache_size = 8,
 109                 .cache_level = 1,
 110                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
 111                                 CRAT_CACHE_FLAGS_INST_CACHE |
 112                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
 113                 .num_cu_shared = 4,
 114         },
 115         {
 116                 /* Scalar L1 Data Cache (in SQC module) per bank. */
 117                 .cache_size = 4,
 118                 .cache_level = 1,
 119                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
 120                                 CRAT_CACHE_FLAGS_DATA_CACHE |
 121                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
 122                 .num_cu_shared = 4,
 123         },
 124 
 125         /* TODO: Add L2 Cache information */
 126 };
 127 
 128 /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
 129  * the following ASICs may need a separate table.
 130  */
 131 #define hawaii_cache_info kaveri_cache_info
 132 #define tonga_cache_info carrizo_cache_info
 133 #define fiji_cache_info  carrizo_cache_info
 134 #define polaris10_cache_info carrizo_cache_info
 135 #define polaris11_cache_info carrizo_cache_info
 136 #define polaris12_cache_info carrizo_cache_info
 137 #define vegam_cache_info carrizo_cache_info
 138 /* TODO - check & update Vega10 cache details */
 139 #define vega10_cache_info carrizo_cache_info
 140 #define raven_cache_info carrizo_cache_info
 141 /* TODO - check & update Navi10 cache details */
 142 #define navi10_cache_info carrizo_cache_info
 143 
 144 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
 145                 struct crat_subtype_computeunit *cu)
 146 {
 147         dev->node_props.cpu_cores_count = cu->num_cpu_cores;
 148         dev->node_props.cpu_core_id_base = cu->processor_id_low;
 149         if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
 150                 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
 151 
 152         pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
 153                         cu->processor_id_low);
 154 }
 155 
 156 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
 157                 struct crat_subtype_computeunit *cu)
 158 {
 159         dev->node_props.simd_id_base = cu->processor_id_low;
 160         dev->node_props.simd_count = cu->num_simd_cores;
 161         dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
 162         dev->node_props.max_waves_per_simd = cu->max_waves_simd;
 163         dev->node_props.wave_front_size = cu->wave_front_size;
 164         dev->node_props.array_count = cu->array_count;
 165         dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
 166         dev->node_props.simd_per_cu = cu->num_simd_per_cu;
 167         dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
 168         if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
 169                 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
 170         pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
 171 }
 172 
 173 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
 174  * topology device present in the device_list
 175  */
 176 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
 177                                 struct list_head *device_list)
 178 {
 179         struct kfd_topology_device *dev;
 180 
 181         pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
 182                         cu->proximity_domain, cu->hsa_capability);
 183         list_for_each_entry(dev, device_list, list) {
 184                 if (cu->proximity_domain == dev->proximity_domain) {
 185                         if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
 186                                 kfd_populated_cu_info_cpu(dev, cu);
 187 
 188                         if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
 189                                 kfd_populated_cu_info_gpu(dev, cu);
 190                         break;
 191                 }
 192         }
 193 
 194         return 0;
 195 }
 196 
 197 static struct kfd_mem_properties *
 198 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
 199                 struct kfd_topology_device *dev)
 200 {
 201         struct kfd_mem_properties *props;
 202 
 203         list_for_each_entry(props, &dev->mem_props, list) {
 204                 if (props->heap_type == heap_type
 205                                 && props->flags == flags
 206                                 && props->width == width)
 207                         return props;
 208         }
 209 
 210         return NULL;
 211 }
 212 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
 213  * topology device present in the device_list
 214  */
 215 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
 216                                 struct list_head *device_list)
 217 {
 218         struct kfd_mem_properties *props;
 219         struct kfd_topology_device *dev;
 220         uint32_t heap_type;
 221         uint64_t size_in_bytes;
 222         uint32_t flags = 0;
 223         uint32_t width;
 224 
 225         pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
 226                         mem->proximity_domain);
 227         list_for_each_entry(dev, device_list, list) {
 228                 if (mem->proximity_domain == dev->proximity_domain) {
 229                         /* We're on GPU node */
 230                         if (dev->node_props.cpu_cores_count == 0) {
 231                                 /* APU */
 232                                 if (mem->visibility_type == 0)
 233                                         heap_type =
 234                                                 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
 235                                 /* dGPU */
 236                                 else
 237                                         heap_type = mem->visibility_type;
 238                         } else
 239                                 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
 240 
 241                         if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
 242                                 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
 243                         if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
 244                                 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
 245 
 246                         size_in_bytes =
 247                                 ((uint64_t)mem->length_high << 32) +
 248                                                         mem->length_low;
 249                         width = mem->width;
 250 
 251                         /* Multiple banks of the same type are aggregated into
 252                          * one. User mode doesn't care about multiple physical
 253                          * memory segments. It's managed as a single virtual
 254                          * heap for user mode.
 255                          */
 256                         props = find_subtype_mem(heap_type, flags, width, dev);
 257                         if (props) {
 258                                 props->size_in_bytes += size_in_bytes;
 259                                 break;
 260                         }
 261 
 262                         props = kfd_alloc_struct(props);
 263                         if (!props)
 264                                 return -ENOMEM;
 265 
 266                         props->heap_type = heap_type;
 267                         props->flags = flags;
 268                         props->size_in_bytes = size_in_bytes;
 269                         props->width = width;
 270 
 271                         dev->node_props.mem_banks_count++;
 272                         list_add_tail(&props->list, &dev->mem_props);
 273 
 274                         break;
 275                 }
 276         }
 277 
 278         return 0;
 279 }
 280 
 281 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
 282  * topology device present in the device_list
 283  */
 284 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
 285                         struct list_head *device_list)
 286 {
 287         struct kfd_cache_properties *props;
 288         struct kfd_topology_device *dev;
 289         uint32_t id;
 290         uint32_t total_num_of_cu;
 291 
 292         id = cache->processor_id_low;
 293 
 294         pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
 295         list_for_each_entry(dev, device_list, list) {
 296                 total_num_of_cu = (dev->node_props.array_count *
 297                                         dev->node_props.cu_per_simd_array);
 298 
 299                 /* Cache infomration in CRAT doesn't have proximity_domain
 300                  * information as it is associated with a CPU core or GPU
 301                  * Compute Unit. So map the cache using CPU core Id or SIMD
 302                  * (GPU) ID.
 303                  * TODO: This works because currently we can safely assume that
 304                  *  Compute Units are parsed before caches are parsed. In
 305                  *  future, remove this dependency
 306                  */
 307                 if ((id >= dev->node_props.cpu_core_id_base &&
 308                         id <= dev->node_props.cpu_core_id_base +
 309                                 dev->node_props.cpu_cores_count) ||
 310                         (id >= dev->node_props.simd_id_base &&
 311                         id < dev->node_props.simd_id_base +
 312                                 total_num_of_cu)) {
 313                         props = kfd_alloc_struct(props);
 314                         if (!props)
 315                                 return -ENOMEM;
 316 
 317                         props->processor_id_low = id;
 318                         props->cache_level = cache->cache_level;
 319                         props->cache_size = cache->cache_size;
 320                         props->cacheline_size = cache->cache_line_size;
 321                         props->cachelines_per_tag = cache->lines_per_tag;
 322                         props->cache_assoc = cache->associativity;
 323                         props->cache_latency = cache->cache_latency;
 324                         memcpy(props->sibling_map, cache->sibling_map,
 325                                         sizeof(props->sibling_map));
 326 
 327                         if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
 328                                 props->cache_type |= HSA_CACHE_TYPE_DATA;
 329                         if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
 330                                 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
 331                         if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
 332                                 props->cache_type |= HSA_CACHE_TYPE_CPU;
 333                         if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
 334                                 props->cache_type |= HSA_CACHE_TYPE_HSACU;
 335 
 336                         dev->cache_count++;
 337                         dev->node_props.caches_count++;
 338                         list_add_tail(&props->list, &dev->cache_props);
 339 
 340                         break;
 341                 }
 342         }
 343 
 344         return 0;
 345 }
 346 
 347 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
 348  * topology device present in the device_list
 349  */
 350 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
 351                                         struct list_head *device_list)
 352 {
 353         struct kfd_iolink_properties *props = NULL, *props2;
 354         struct kfd_topology_device *dev, *to_dev;
 355         uint32_t id_from;
 356         uint32_t id_to;
 357 
 358         id_from = iolink->proximity_domain_from;
 359         id_to = iolink->proximity_domain_to;
 360 
 361         pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
 362                         id_from, id_to);
 363         list_for_each_entry(dev, device_list, list) {
 364                 if (id_from == dev->proximity_domain) {
 365                         props = kfd_alloc_struct(props);
 366                         if (!props)
 367                                 return -ENOMEM;
 368 
 369                         props->node_from = id_from;
 370                         props->node_to = id_to;
 371                         props->ver_maj = iolink->version_major;
 372                         props->ver_min = iolink->version_minor;
 373                         props->iolink_type = iolink->io_interface_type;
 374 
 375                         if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
 376                                 props->weight = 20;
 377                         else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
 378                                 props->weight = 15 * iolink->num_hops_xgmi;
 379                         else
 380                                 props->weight = node_distance(id_from, id_to);
 381 
 382                         props->min_latency = iolink->minimum_latency;
 383                         props->max_latency = iolink->maximum_latency;
 384                         props->min_bandwidth = iolink->minimum_bandwidth_mbs;
 385                         props->max_bandwidth = iolink->maximum_bandwidth_mbs;
 386                         props->rec_transfer_size =
 387                                         iolink->recommended_transfer_size;
 388 
 389                         dev->io_link_count++;
 390                         dev->node_props.io_links_count++;
 391                         list_add_tail(&props->list, &dev->io_link_props);
 392                         break;
 393                 }
 394         }
 395 
 396         /* CPU topology is created before GPUs are detected, so CPU->GPU
 397          * links are not built at that time. If a PCIe type is discovered, it
 398          * means a GPU is detected and we are adding GPU->CPU to the topology.
 399          * At this time, also add the corresponded CPU->GPU link if GPU
 400          * is large bar.
 401          * For xGMI, we only added the link with one direction in the crat
 402          * table, add corresponded reversed direction link now.
 403          */
 404         if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
 405                 to_dev = kfd_topology_device_by_proximity_domain(id_to);
 406                 if (!to_dev)
 407                         return -ENODEV;
 408                 /* same everything but the other direction */
 409                 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
 410                 props2->node_from = id_to;
 411                 props2->node_to = id_from;
 412                 props2->kobj = NULL;
 413                 to_dev->io_link_count++;
 414                 to_dev->node_props.io_links_count++;
 415                 list_add_tail(&props2->list, &to_dev->io_link_props);
 416         }
 417 
 418         return 0;
 419 }
 420 
 421 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
 422  * present in the device_list
 423  *      @sub_type_hdr - subtype section of crat_image
 424  *      @device_list - list of topology devices present in this crat_image
 425  */
 426 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
 427                                 struct list_head *device_list)
 428 {
 429         struct crat_subtype_computeunit *cu;
 430         struct crat_subtype_memory *mem;
 431         struct crat_subtype_cache *cache;
 432         struct crat_subtype_iolink *iolink;
 433         int ret = 0;
 434 
 435         switch (sub_type_hdr->type) {
 436         case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
 437                 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
 438                 ret = kfd_parse_subtype_cu(cu, device_list);
 439                 break;
 440         case CRAT_SUBTYPE_MEMORY_AFFINITY:
 441                 mem = (struct crat_subtype_memory *)sub_type_hdr;
 442                 ret = kfd_parse_subtype_mem(mem, device_list);
 443                 break;
 444         case CRAT_SUBTYPE_CACHE_AFFINITY:
 445                 cache = (struct crat_subtype_cache *)sub_type_hdr;
 446                 ret = kfd_parse_subtype_cache(cache, device_list);
 447                 break;
 448         case CRAT_SUBTYPE_TLB_AFFINITY:
 449                 /*
 450                  * For now, nothing to do here
 451                  */
 452                 pr_debug("Found TLB entry in CRAT table (not processing)\n");
 453                 break;
 454         case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
 455                 /*
 456                  * For now, nothing to do here
 457                  */
 458                 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
 459                 break;
 460         case CRAT_SUBTYPE_IOLINK_AFFINITY:
 461                 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
 462                 ret = kfd_parse_subtype_iolink(iolink, device_list);
 463                 break;
 464         default:
 465                 pr_warn("Unknown subtype %d in CRAT\n",
 466                                 sub_type_hdr->type);
 467         }
 468 
 469         return ret;
 470 }
 471 
 472 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
 473  * create a kfd_topology_device and add in to device_list. Also parse
 474  * CRAT subtypes and attach it to appropriate kfd_topology_device
 475  *      @crat_image - input image containing CRAT
 476  *      @device_list - [OUT] list of kfd_topology_device generated after
 477  *                     parsing crat_image
 478  *      @proximity_domain - Proximity domain of the first device in the table
 479  *
 480  *      Return - 0 if successful else -ve value
 481  */
 482 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
 483                          uint32_t proximity_domain)
 484 {
 485         struct kfd_topology_device *top_dev = NULL;
 486         struct crat_subtype_generic *sub_type_hdr;
 487         uint16_t node_id;
 488         int ret = 0;
 489         struct crat_header *crat_table = (struct crat_header *)crat_image;
 490         uint16_t num_nodes;
 491         uint32_t image_len;
 492 
 493         if (!crat_image)
 494                 return -EINVAL;
 495 
 496         if (!list_empty(device_list)) {
 497                 pr_warn("Error device list should be empty\n");
 498                 return -EINVAL;
 499         }
 500 
 501         num_nodes = crat_table->num_domains;
 502         image_len = crat_table->length;
 503 
 504         pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
 505 
 506         for (node_id = 0; node_id < num_nodes; node_id++) {
 507                 top_dev = kfd_create_topology_device(device_list);
 508                 if (!top_dev)
 509                         break;
 510                 top_dev->proximity_domain = proximity_domain++;
 511         }
 512 
 513         if (!top_dev) {
 514                 ret = -ENOMEM;
 515                 goto err;
 516         }
 517 
 518         memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
 519         memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
 520                         CRAT_OEMTABLEID_LENGTH);
 521         top_dev->oem_revision = crat_table->oem_revision;
 522 
 523         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
 524         while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
 525                         ((char *)crat_image) + image_len) {
 526                 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
 527                         ret = kfd_parse_subtype(sub_type_hdr, device_list);
 528                         if (ret)
 529                                 break;
 530                 }
 531 
 532                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 533                                 sub_type_hdr->length);
 534         }
 535 
 536 err:
 537         if (ret)
 538                 kfd_release_topology_device_list(device_list);
 539 
 540         return ret;
 541 }
 542 
 543 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
 544 static int fill_in_pcache(struct crat_subtype_cache *pcache,
 545                                 struct kfd_gpu_cache_info *pcache_info,
 546                                 struct kfd_cu_info *cu_info,
 547                                 int mem_available,
 548                                 int cu_bitmask,
 549                                 int cache_type, unsigned int cu_processor_id,
 550                                 int cu_block)
 551 {
 552         unsigned int cu_sibling_map_mask;
 553         int first_active_cu;
 554 
 555         /* First check if enough memory is available */
 556         if (sizeof(struct crat_subtype_cache) > mem_available)
 557                 return -ENOMEM;
 558 
 559         cu_sibling_map_mask = cu_bitmask;
 560         cu_sibling_map_mask >>= cu_block;
 561         cu_sibling_map_mask &=
 562                 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
 563         first_active_cu = ffs(cu_sibling_map_mask);
 564 
 565         /* CU could be inactive. In case of shared cache find the first active
 566          * CU. and incase of non-shared cache check if the CU is inactive. If
 567          * inactive active skip it
 568          */
 569         if (first_active_cu) {
 570                 memset(pcache, 0, sizeof(struct crat_subtype_cache));
 571                 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
 572                 pcache->length = sizeof(struct crat_subtype_cache);
 573                 pcache->flags = pcache_info[cache_type].flags;
 574                 pcache->processor_id_low = cu_processor_id
 575                                          + (first_active_cu - 1);
 576                 pcache->cache_level = pcache_info[cache_type].cache_level;
 577                 pcache->cache_size = pcache_info[cache_type].cache_size;
 578 
 579                 /* Sibling map is w.r.t processor_id_low, so shift out
 580                  * inactive CU
 581                  */
 582                 cu_sibling_map_mask =
 583                         cu_sibling_map_mask >> (first_active_cu - 1);
 584 
 585                 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
 586                 pcache->sibling_map[1] =
 587                                 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
 588                 pcache->sibling_map[2] =
 589                                 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
 590                 pcache->sibling_map[3] =
 591                                 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
 592                 return 0;
 593         }
 594         return 1;
 595 }
 596 
 597 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
 598  * tables
 599  *
 600  *      @kdev - [IN] GPU device
 601  *      @gpu_processor_id - [IN] GPU processor ID to which these caches
 602  *                          associate
 603  *      @available_size - [IN] Amount of memory available in pcache
 604  *      @cu_info - [IN] Compute Unit info obtained from KGD
 605  *      @pcache - [OUT] memory into which cache data is to be filled in.
 606  *      @size_filled - [OUT] amount of data used up in pcache.
 607  *      @num_of_entries - [OUT] number of caches added
 608  */
 609 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
 610                         int gpu_processor_id,
 611                         int available_size,
 612                         struct kfd_cu_info *cu_info,
 613                         struct crat_subtype_cache *pcache,
 614                         int *size_filled,
 615                         int *num_of_entries)
 616 {
 617         struct kfd_gpu_cache_info *pcache_info;
 618         int num_of_cache_types = 0;
 619         int i, j, k;
 620         int ct = 0;
 621         int mem_available = available_size;
 622         unsigned int cu_processor_id;
 623         int ret;
 624 
 625         switch (kdev->device_info->asic_family) {
 626         case CHIP_KAVERI:
 627                 pcache_info = kaveri_cache_info;
 628                 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
 629                 break;
 630         case CHIP_HAWAII:
 631                 pcache_info = hawaii_cache_info;
 632                 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
 633                 break;
 634         case CHIP_CARRIZO:
 635                 pcache_info = carrizo_cache_info;
 636                 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
 637                 break;
 638         case CHIP_TONGA:
 639                 pcache_info = tonga_cache_info;
 640                 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
 641                 break;
 642         case CHIP_FIJI:
 643                 pcache_info = fiji_cache_info;
 644                 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
 645                 break;
 646         case CHIP_POLARIS10:
 647                 pcache_info = polaris10_cache_info;
 648                 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
 649                 break;
 650         case CHIP_POLARIS11:
 651                 pcache_info = polaris11_cache_info;
 652                 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
 653                 break;
 654         case CHIP_POLARIS12:
 655                 pcache_info = polaris12_cache_info;
 656                 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
 657                 break;
 658         case CHIP_VEGAM:
 659                 pcache_info = vegam_cache_info;
 660                 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
 661                 break;
 662         case CHIP_VEGA10:
 663         case CHIP_VEGA12:
 664         case CHIP_VEGA20:
 665         case CHIP_ARCTURUS:
 666                 pcache_info = vega10_cache_info;
 667                 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
 668                 break;
 669         case CHIP_RAVEN:
 670                 pcache_info = raven_cache_info;
 671                 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
 672                 break;
 673         case CHIP_NAVI10:
 674                 pcache_info = navi10_cache_info;
 675                 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
 676                 break;
 677         default:
 678                 return -EINVAL;
 679         }
 680 
 681         *size_filled = 0;
 682         *num_of_entries = 0;
 683 
 684         /* For each type of cache listed in the kfd_gpu_cache_info table,
 685          * go through all available Compute Units.
 686          * The [i,j,k] loop will
 687          *              if kfd_gpu_cache_info.num_cu_shared = 1
 688          *                      will parse through all available CU
 689          *              If (kfd_gpu_cache_info.num_cu_shared != 1)
 690          *                      then it will consider only one CU from
 691          *                      the shared unit
 692          */
 693 
 694         for (ct = 0; ct < num_of_cache_types; ct++) {
 695                 cu_processor_id = gpu_processor_id;
 696                 for (i = 0; i < cu_info->num_shader_engines; i++) {
 697                         for (j = 0; j < cu_info->num_shader_arrays_per_engine;
 698                                 j++) {
 699                                 for (k = 0; k < cu_info->num_cu_per_sh;
 700                                         k += pcache_info[ct].num_cu_shared) {
 701 
 702                                         ret = fill_in_pcache(pcache,
 703                                                 pcache_info,
 704                                                 cu_info,
 705                                                 mem_available,
 706                                                 cu_info->cu_bitmap[i][j],
 707                                                 ct,
 708                                                 cu_processor_id,
 709                                                 k);
 710 
 711                                         if (ret < 0)
 712                                                 break;
 713 
 714                                         if (!ret) {
 715                                                 pcache++;
 716                                                 (*num_of_entries)++;
 717                                                 mem_available -=
 718                                                         sizeof(*pcache);
 719                                                 (*size_filled) +=
 720                                                         sizeof(*pcache);
 721                                         }
 722 
 723                                         /* Move to next CU block */
 724                                         cu_processor_id +=
 725                                                 pcache_info[ct].num_cu_shared;
 726                                 }
 727                         }
 728                 }
 729         }
 730 
 731         pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
 732 
 733         return 0;
 734 }
 735 
 736 /*
 737  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
 738  * copies CRAT from ACPI (if available).
 739  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
 740  *
 741  *      @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
 742  *                   crat_image will be NULL
 743  *      @size: [OUT] size of crat_image
 744  *
 745  *      Return 0 if successful else return error code
 746  */
 747 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 748 {
 749         struct acpi_table_header *crat_table;
 750         acpi_status status;
 751         void *pcrat_image;
 752 
 753         if (!crat_image)
 754                 return -EINVAL;
 755 
 756         *crat_image = NULL;
 757 
 758         /* Fetch the CRAT table from ACPI */
 759         status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
 760         if (status == AE_NOT_FOUND) {
 761                 pr_warn("CRAT table not found\n");
 762                 return -ENODATA;
 763         } else if (ACPI_FAILURE(status)) {
 764                 const char *err = acpi_format_exception(status);
 765 
 766                 pr_err("CRAT table error: %s\n", err);
 767                 return -EINVAL;
 768         }
 769 
 770         if (ignore_crat) {
 771                 pr_info("CRAT table disabled by module option\n");
 772                 return -ENODATA;
 773         }
 774 
 775         pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
 776         if (!pcrat_image)
 777                 return -ENOMEM;
 778 
 779         *crat_image = pcrat_image;
 780         *size = crat_table->length;
 781 
 782         return 0;
 783 }
 784 
 785 /* Memory required to create Virtual CRAT.
 786  * Since there is no easy way to predict the amount of memory required, the
 787  * following amount are allocated for CPU and GPU Virtual CRAT. This is
 788  * expected to cover all known conditions. But to be safe additional check
 789  * is put in the code to ensure we don't overwrite.
 790  */
 791 #define VCRAT_SIZE_FOR_CPU      (2 * PAGE_SIZE)
 792 #define VCRAT_SIZE_FOR_GPU      (4 * PAGE_SIZE)
 793 
 794 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
 795  *
 796  *      @numa_node_id: CPU NUMA node id
 797  *      @avail_size: Available size in the memory
 798  *      @sub_type_hdr: Memory into which compute info will be filled in
 799  *
 800  *      Return 0 if successful else return -ve value
 801  */
 802 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
 803                                 int proximity_domain,
 804                                 struct crat_subtype_computeunit *sub_type_hdr)
 805 {
 806         const struct cpumask *cpumask;
 807 
 808         *avail_size -= sizeof(struct crat_subtype_computeunit);
 809         if (*avail_size < 0)
 810                 return -ENOMEM;
 811 
 812         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
 813 
 814         /* Fill in subtype header data */
 815         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
 816         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
 817         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 818 
 819         cpumask = cpumask_of_node(numa_node_id);
 820 
 821         /* Fill in CU data */
 822         sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
 823         sub_type_hdr->proximity_domain = proximity_domain;
 824         sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
 825         if (sub_type_hdr->processor_id_low == -1)
 826                 return -EINVAL;
 827 
 828         sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
 829 
 830         return 0;
 831 }
 832 
 833 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
 834  *
 835  *      @numa_node_id: CPU NUMA node id
 836  *      @avail_size: Available size in the memory
 837  *      @sub_type_hdr: Memory into which compute info will be filled in
 838  *
 839  *      Return 0 if successful else return -ve value
 840  */
 841 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
 842                         int proximity_domain,
 843                         struct crat_subtype_memory *sub_type_hdr)
 844 {
 845         uint64_t mem_in_bytes = 0;
 846         pg_data_t *pgdat;
 847         int zone_type;
 848 
 849         *avail_size -= sizeof(struct crat_subtype_memory);
 850         if (*avail_size < 0)
 851                 return -ENOMEM;
 852 
 853         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
 854 
 855         /* Fill in subtype header data */
 856         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
 857         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
 858         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 859 
 860         /* Fill in Memory Subunit data */
 861 
 862         /* Unlike si_meminfo, si_meminfo_node is not exported. So
 863          * the following lines are duplicated from si_meminfo_node
 864          * function
 865          */
 866         pgdat = NODE_DATA(numa_node_id);
 867         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
 868                 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
 869         mem_in_bytes <<= PAGE_SHIFT;
 870 
 871         sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
 872         sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
 873         sub_type_hdr->proximity_domain = proximity_domain;
 874 
 875         return 0;
 876 }
 877 
 878 #ifdef CONFIG_X86_64
 879 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 880                                 uint32_t *num_entries,
 881                                 struct crat_subtype_iolink *sub_type_hdr)
 882 {
 883         int nid;
 884         struct cpuinfo_x86 *c = &cpu_data(0);
 885         uint8_t link_type;
 886 
 887         if (c->x86_vendor == X86_VENDOR_AMD)
 888                 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
 889         else
 890                 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
 891 
 892         *num_entries = 0;
 893 
 894         /* Create IO links from this node to other CPU nodes */
 895         for_each_online_node(nid) {
 896                 if (nid == numa_node_id) /* node itself */
 897                         continue;
 898 
 899                 *avail_size -= sizeof(struct crat_subtype_iolink);
 900                 if (*avail_size < 0)
 901                         return -ENOMEM;
 902 
 903                 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
 904 
 905                 /* Fill in subtype header data */
 906                 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
 907                 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
 908                 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
 909 
 910                 /* Fill in IO link data */
 911                 sub_type_hdr->proximity_domain_from = numa_node_id;
 912                 sub_type_hdr->proximity_domain_to = nid;
 913                 sub_type_hdr->io_interface_type = link_type;
 914 
 915                 (*num_entries)++;
 916                 sub_type_hdr++;
 917         }
 918 
 919         return 0;
 920 }
 921 #endif
 922 
 923 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
 924  *
 925  *      @pcrat_image: Fill in VCRAT for CPU
 926  *      @size:  [IN] allocated size of crat_image.
 927  *              [OUT] actual size of data filled in crat_image
 928  */
 929 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 930 {
 931         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
 932         struct acpi_table_header *acpi_table;
 933         acpi_status status;
 934         struct crat_subtype_generic *sub_type_hdr;
 935         int avail_size = *size;
 936         int numa_node_id;
 937 #ifdef CONFIG_X86_64
 938         uint32_t entries = 0;
 939 #endif
 940         int ret = 0;
 941 
 942         if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
 943                 return -EINVAL;
 944 
 945         /* Fill in CRAT Header.
 946          * Modify length and total_entries as subunits are added.
 947          */
 948         avail_size -= sizeof(struct crat_header);
 949         if (avail_size < 0)
 950                 return -ENOMEM;
 951 
 952         memset(crat_table, 0, sizeof(struct crat_header));
 953         memcpy(&crat_table->signature, CRAT_SIGNATURE,
 954                         sizeof(crat_table->signature));
 955         crat_table->length = sizeof(struct crat_header);
 956 
 957         status = acpi_get_table("DSDT", 0, &acpi_table);
 958         if (status != AE_OK)
 959                 pr_warn("DSDT table not found for OEM information\n");
 960         else {
 961                 crat_table->oem_revision = acpi_table->revision;
 962                 memcpy(crat_table->oem_id, acpi_table->oem_id,
 963                                 CRAT_OEMID_LENGTH);
 964                 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
 965                                 CRAT_OEMTABLEID_LENGTH);
 966         }
 967         crat_table->total_entries = 0;
 968         crat_table->num_domains = 0;
 969 
 970         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
 971 
 972         for_each_online_node(numa_node_id) {
 973                 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
 974                         continue;
 975 
 976                 /* Fill in Subtype: Compute Unit */
 977                 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
 978                         crat_table->num_domains,
 979                         (struct crat_subtype_computeunit *)sub_type_hdr);
 980                 if (ret < 0)
 981                         return ret;
 982                 crat_table->length += sub_type_hdr->length;
 983                 crat_table->total_entries++;
 984 
 985                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 986                         sub_type_hdr->length);
 987 
 988                 /* Fill in Subtype: Memory */
 989                 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
 990                         crat_table->num_domains,
 991                         (struct crat_subtype_memory *)sub_type_hdr);
 992                 if (ret < 0)
 993                         return ret;
 994                 crat_table->length += sub_type_hdr->length;
 995                 crat_table->total_entries++;
 996 
 997                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 998                         sub_type_hdr->length);
 999 
1000                 /* Fill in Subtype: IO Link */
1001 #ifdef CONFIG_X86_64
1002                 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1003                                 &entries,
1004                                 (struct crat_subtype_iolink *)sub_type_hdr);
1005                 if (ret < 0)
1006                         return ret;
1007                 crat_table->length += (sub_type_hdr->length * entries);
1008                 crat_table->total_entries += entries;
1009 
1010                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1011                                 sub_type_hdr->length * entries);
1012 #else
1013                 pr_info("IO link not available for non x86 platforms\n");
1014 #endif
1015 
1016                 crat_table->num_domains++;
1017         }
1018 
1019         /* TODO: Add cache Subtype for CPU.
1020          * Currently, CPU cache information is available in function
1021          * detect_cache_attributes(cpu) defined in the file
1022          * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1023          * exported and to get the same information the code needs to be
1024          * duplicated.
1025          */
1026 
1027         *size = crat_table->length;
1028         pr_info("Virtual CRAT table created for CPU\n");
1029 
1030         return 0;
1031 }
1032 
1033 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1034                 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1035                 struct crat_subtype_memory *sub_type_hdr,
1036                 uint32_t proximity_domain,
1037                 const struct kfd_local_mem_info *local_mem_info)
1038 {
1039         *avail_size -= sizeof(struct crat_subtype_memory);
1040         if (*avail_size < 0)
1041                 return -ENOMEM;
1042 
1043         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1044         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1045         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1046         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1047 
1048         sub_type_hdr->proximity_domain = proximity_domain;
1049 
1050         pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1051                         type, size);
1052 
1053         sub_type_hdr->length_low = lower_32_bits(size);
1054         sub_type_hdr->length_high = upper_32_bits(size);
1055 
1056         sub_type_hdr->width = local_mem_info->vram_width;
1057         sub_type_hdr->visibility_type = type;
1058 
1059         return 0;
1060 }
1061 
1062 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1063  * to its NUMA node
1064  *      @avail_size: Available size in the memory
1065  *      @kdev - [IN] GPU device
1066  *      @sub_type_hdr: Memory into which io link info will be filled in
1067  *      @proximity_domain - proximity domain of the GPU node
1068  *
1069  *      Return 0 if successful else return -ve value
1070  */
1071 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1072                         struct kfd_dev *kdev,
1073                         struct crat_subtype_iolink *sub_type_hdr,
1074                         uint32_t proximity_domain)
1075 {
1076         *avail_size -= sizeof(struct crat_subtype_iolink);
1077         if (*avail_size < 0)
1078                 return -ENOMEM;
1079 
1080         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1081 
1082         /* Fill in subtype header data */
1083         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1084         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1085         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1086         if (kfd_dev_is_large_bar(kdev))
1087                 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1088 
1089         /* Fill in IOLINK subtype.
1090          * TODO: Fill-in other fields of iolink subtype
1091          */
1092         sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1093         sub_type_hdr->proximity_domain_from = proximity_domain;
1094 #ifdef CONFIG_NUMA
1095         if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1096                 sub_type_hdr->proximity_domain_to = 0;
1097         else
1098                 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1099 #else
1100         sub_type_hdr->proximity_domain_to = 0;
1101 #endif
1102         return 0;
1103 }
1104 
1105 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1106                         struct kfd_dev *kdev,
1107                         struct kfd_dev *peer_kdev,
1108                         struct crat_subtype_iolink *sub_type_hdr,
1109                         uint32_t proximity_domain_from,
1110                         uint32_t proximity_domain_to)
1111 {
1112         *avail_size -= sizeof(struct crat_subtype_iolink);
1113         if (*avail_size < 0)
1114                 return -ENOMEM;
1115 
1116         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1117 
1118         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1119         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1120         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1121                                CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1122 
1123         sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1124         sub_type_hdr->proximity_domain_from = proximity_domain_from;
1125         sub_type_hdr->proximity_domain_to = proximity_domain_to;
1126         sub_type_hdr->num_hops_xgmi =
1127                 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1128         return 0;
1129 }
1130 
1131 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1132  *
1133  *      @pcrat_image: Fill in VCRAT for GPU
1134  *      @size:  [IN] allocated size of crat_image.
1135  *              [OUT] actual size of data filled in crat_image
1136  */
1137 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1138                                       size_t *size, struct kfd_dev *kdev,
1139                                       uint32_t proximity_domain)
1140 {
1141         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1142         struct crat_subtype_generic *sub_type_hdr;
1143         struct kfd_local_mem_info local_mem_info;
1144         struct kfd_topology_device *peer_dev;
1145         struct crat_subtype_computeunit *cu;
1146         struct kfd_cu_info cu_info;
1147         int avail_size = *size;
1148         uint32_t total_num_of_cu;
1149         int num_of_cache_entries = 0;
1150         int cache_mem_filled = 0;
1151         uint32_t nid = 0;
1152         int ret = 0;
1153 
1154         if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1155                 return -EINVAL;
1156 
1157         /* Fill the CRAT Header.
1158          * Modify length and total_entries as subunits are added.
1159          */
1160         avail_size -= sizeof(struct crat_header);
1161         if (avail_size < 0)
1162                 return -ENOMEM;
1163 
1164         memset(crat_table, 0, sizeof(struct crat_header));
1165 
1166         memcpy(&crat_table->signature, CRAT_SIGNATURE,
1167                         sizeof(crat_table->signature));
1168         /* Change length as we add more subtypes*/
1169         crat_table->length = sizeof(struct crat_header);
1170         crat_table->num_domains = 1;
1171         crat_table->total_entries = 0;
1172 
1173         /* Fill in Subtype: Compute Unit
1174          * First fill in the sub type header and then sub type data
1175          */
1176         avail_size -= sizeof(struct crat_subtype_computeunit);
1177         if (avail_size < 0)
1178                 return -ENOMEM;
1179 
1180         sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1181         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1182 
1183         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1184         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1185         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1186 
1187         /* Fill CU subtype data */
1188         cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1189         cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1190         cu->proximity_domain = proximity_domain;
1191 
1192         amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1193         cu->num_simd_per_cu = cu_info.simd_per_cu;
1194         cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1195         cu->max_waves_simd = cu_info.max_waves_per_simd;
1196 
1197         cu->wave_front_size = cu_info.wave_front_size;
1198         cu->array_count = cu_info.num_shader_arrays_per_engine *
1199                 cu_info.num_shader_engines;
1200         total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1201         cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1202         cu->num_cu_per_array = cu_info.num_cu_per_sh;
1203         cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1204         cu->num_banks = cu_info.num_shader_engines;
1205         cu->lds_size_in_kb = cu_info.lds_size;
1206 
1207         cu->hsa_capability = 0;
1208 
1209         /* Check if this node supports IOMMU. During parsing this flag will
1210          * translate to HSA_CAP_ATS_PRESENT
1211          */
1212         if (!kfd_iommu_check_device(kdev))
1213                 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1214 
1215         crat_table->length += sub_type_hdr->length;
1216         crat_table->total_entries++;
1217 
1218         /* Fill in Subtype: Memory. Only on systems with large BAR (no
1219          * private FB), report memory as public. On other systems
1220          * report the total FB size (public+private) as a single
1221          * private heap.
1222          */
1223         amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1224         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1225                         sub_type_hdr->length);
1226 
1227         if (debug_largebar)
1228                 local_mem_info.local_mem_size_private = 0;
1229 
1230         if (local_mem_info.local_mem_size_private == 0)
1231                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1232                                 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1233                                 local_mem_info.local_mem_size_public,
1234                                 (struct crat_subtype_memory *)sub_type_hdr,
1235                                 proximity_domain,
1236                                 &local_mem_info);
1237         else
1238                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1239                                 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1240                                 local_mem_info.local_mem_size_public +
1241                                 local_mem_info.local_mem_size_private,
1242                                 (struct crat_subtype_memory *)sub_type_hdr,
1243                                 proximity_domain,
1244                                 &local_mem_info);
1245         if (ret < 0)
1246                 return ret;
1247 
1248         crat_table->length += sizeof(struct crat_subtype_memory);
1249         crat_table->total_entries++;
1250 
1251         /* TODO: Fill in cache information. This information is NOT readily
1252          * available in KGD
1253          */
1254         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1255                 sub_type_hdr->length);
1256         ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1257                                 avail_size,
1258                                 &cu_info,
1259                                 (struct crat_subtype_cache *)sub_type_hdr,
1260                                 &cache_mem_filled,
1261                                 &num_of_cache_entries);
1262 
1263         if (ret < 0)
1264                 return ret;
1265 
1266         crat_table->length += cache_mem_filled;
1267         crat_table->total_entries += num_of_cache_entries;
1268         avail_size -= cache_mem_filled;
1269 
1270         /* Fill in Subtype: IO_LINKS
1271          *  Only direct links are added here which is Link from GPU to
1272          *  to its NUMA node. Indirect links are added by userspace.
1273          */
1274         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1275                 cache_mem_filled);
1276         ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1277                 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1278 
1279         if (ret < 0)
1280                 return ret;
1281 
1282         crat_table->length += sub_type_hdr->length;
1283         crat_table->total_entries++;
1284 
1285 
1286         /* Fill in Subtype: IO_LINKS
1287          * Direct links from GPU to other GPUs through xGMI.
1288          * We will loop GPUs that already be processed (with lower value
1289          * of proximity_domain), add the link for the GPUs with same
1290          * hive id (from this GPU to other GPU) . The reversed iolink
1291          * (from other GPU to this GPU) will be added
1292          * in kfd_parse_subtype_iolink.
1293          */
1294         if (kdev->hive_id) {
1295                 for (nid = 0; nid < proximity_domain; ++nid) {
1296                         peer_dev = kfd_topology_device_by_proximity_domain(nid);
1297                         if (!peer_dev->gpu)
1298                                 continue;
1299                         if (peer_dev->gpu->hive_id != kdev->hive_id)
1300                                 continue;
1301                         sub_type_hdr = (typeof(sub_type_hdr))(
1302                                 (char *)sub_type_hdr +
1303                                 sizeof(struct crat_subtype_iolink));
1304                         ret = kfd_fill_gpu_xgmi_link_to_gpu(
1305                                 &avail_size, kdev, peer_dev->gpu,
1306                                 (struct crat_subtype_iolink *)sub_type_hdr,
1307                                 proximity_domain, nid);
1308                         if (ret < 0)
1309                                 return ret;
1310                         crat_table->length += sub_type_hdr->length;
1311                         crat_table->total_entries++;
1312                 }
1313         }
1314         *size = crat_table->length;
1315         pr_info("Virtual CRAT table created for GPU\n");
1316 
1317         return ret;
1318 }
1319 
1320 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1321  *              creates a Virtual CRAT (VCRAT) image
1322  *
1323  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1324  *
1325  *      @crat_image: VCRAT image created because ACPI does not have a
1326  *                   CRAT for this device
1327  *      @size: [OUT] size of virtual crat_image
1328  *      @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1329  *              COMPUTE_UNIT_GPU - Create VCRAT for GPU
1330  *              (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1331  *                      -- this option is not currently implemented.
1332  *                      The assumption is that all AMD APUs will have CRAT
1333  *      @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1334  *
1335  *      Return 0 if successful else return -ve value
1336  */
1337 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1338                                   int flags, struct kfd_dev *kdev,
1339                                   uint32_t proximity_domain)
1340 {
1341         void *pcrat_image = NULL;
1342         int ret = 0;
1343 
1344         if (!crat_image)
1345                 return -EINVAL;
1346 
1347         *crat_image = NULL;
1348 
1349         /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
1350          * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
1351          * all the current conditions. A check is put not to overwrite beyond
1352          * allocated size
1353          */
1354         switch (flags) {
1355         case COMPUTE_UNIT_CPU:
1356                 pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
1357                 if (!pcrat_image)
1358                         return -ENOMEM;
1359                 *size = VCRAT_SIZE_FOR_CPU;
1360                 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1361                 break;
1362         case COMPUTE_UNIT_GPU:
1363                 if (!kdev)
1364                         return -EINVAL;
1365                 pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1366                 if (!pcrat_image)
1367                         return -ENOMEM;
1368                 *size = VCRAT_SIZE_FOR_GPU;
1369                 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1370                                                  proximity_domain);
1371                 break;
1372         case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1373                 /* TODO: */
1374                 ret = -EINVAL;
1375                 pr_err("VCRAT not implemented for APU\n");
1376                 break;
1377         default:
1378                 ret = -EINVAL;
1379         }
1380 
1381         if (!ret)
1382                 *crat_image = pcrat_image;
1383         else
1384                 kfree(pcrat_image);
1385 
1386         return ret;
1387 }
1388 
1389 
1390 /* kfd_destroy_crat_image
1391  *
1392  *      @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1393  *
1394  */
1395 void kfd_destroy_crat_image(void *crat_image)
1396 {
1397         kfree(crat_image);
1398 }

/* [<][>][^][v][top][bottom][index][help] */