1/* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/module.h> 24#include <linux/fdtable.h> 25#include <linux/uaccess.h> 26#include <drm/drmP.h> 27#include "radeon.h" 28#include "cikd.h" 29#include "cik_reg.h" 30#include "radeon_kfd.h" 31#include "radeon_ucode.h" 32#include <linux/firmware.h> 33#include "cik_structs.h" 34 35#define CIK_PIPE_PER_MEC (4) 36 37struct kgd_mem { 38 struct radeon_bo *bo; 39 uint64_t gpu_addr; 40 void *cpu_ptr; 41}; 42 43 44static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 45 void **mem_obj, uint64_t *gpu_addr, 46 void **cpu_ptr); 47 48static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 49 50static uint64_t get_vmem_size(struct kgd_dev *kgd); 51static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); 52 53static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 54static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 55 56/* 57 * Register access functions 58 */ 59 60static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 61 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, 62 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); 63 64static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 65 unsigned int vmid); 66 67static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 68 uint32_t hpd_size, uint64_t hpd_gpu_addr); 69 70static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 71 uint32_t queue_id, uint32_t __user *wptr); 72static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); 73static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 74 uint32_t pipe_id, uint32_t queue_id); 75 76static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, 77 unsigned int timeout, uint32_t pipe_id, 78 uint32_t queue_id); 79static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); 80static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 81 unsigned int timeout); 82 83static const struct kfd2kgd_calls kfd2kgd = { 84 .init_gtt_mem_allocation = alloc_gtt_mem, 85 .free_gtt_mem = free_gtt_mem, 86 .get_vmem_size = get_vmem_size, 87 .get_gpu_clock_counter = get_gpu_clock_counter, 88 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, 89 .program_sh_mem_settings = kgd_program_sh_mem_settings, 90 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, 91 .init_pipeline = kgd_init_pipeline, 92 .hqd_load = kgd_hqd_load, 93 .hqd_sdma_load = kgd_hqd_sdma_load, 94 .hqd_is_occupied = kgd_hqd_is_occupied, 95 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, 96 .hqd_destroy = kgd_hqd_destroy, 97 .hqd_sdma_destroy = kgd_hqd_sdma_destroy, 98 .get_fw_version = get_fw_version 99}; 100 101static const struct kgd2kfd_calls *kgd2kfd; 102 103bool radeon_kfd_init(void) 104{ 105#if defined(CONFIG_HSA_AMD_MODULE) 106 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); 107 108 kgd2kfd_init_p = symbol_request(kgd2kfd_init); 109 110 if (kgd2kfd_init_p == NULL) 111 return false; 112 113 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { 114 symbol_put(kgd2kfd_init); 115 kgd2kfd = NULL; 116 117 return false; 118 } 119 120 return true; 121#elif defined(CONFIG_HSA_AMD) 122 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { 123 kgd2kfd = NULL; 124 125 return false; 126 } 127 128 return true; 129#else 130 return false; 131#endif 132} 133 134void radeon_kfd_fini(void) 135{ 136 if (kgd2kfd) { 137 kgd2kfd->exit(); 138 symbol_put(kgd2kfd_init); 139 } 140} 141 142void radeon_kfd_device_probe(struct radeon_device *rdev) 143{ 144 if (kgd2kfd) 145 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, 146 rdev->pdev, &kfd2kgd); 147} 148 149void radeon_kfd_device_init(struct radeon_device *rdev) 150{ 151 if (rdev->kfd) { 152 struct kgd2kfd_shared_resources gpu_resources = { 153 .compute_vmid_bitmap = 0xFF00, 154 155 .first_compute_pipe = 1, 156 .compute_pipe_count = 4 - 1, 157 }; 158 159 radeon_doorbell_get_kfd_info(rdev, 160 &gpu_resources.doorbell_physical_address, 161 &gpu_resources.doorbell_aperture_size, 162 &gpu_resources.doorbell_start_offset); 163 164 kgd2kfd->device_init(rdev->kfd, &gpu_resources); 165 } 166} 167 168void radeon_kfd_device_fini(struct radeon_device *rdev) 169{ 170 if (rdev->kfd) { 171 kgd2kfd->device_exit(rdev->kfd); 172 rdev->kfd = NULL; 173 } 174} 175 176void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry) 177{ 178 if (rdev->kfd) 179 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry); 180} 181 182void radeon_kfd_suspend(struct radeon_device *rdev) 183{ 184 if (rdev->kfd) 185 kgd2kfd->suspend(rdev->kfd); 186} 187 188int radeon_kfd_resume(struct radeon_device *rdev) 189{ 190 int r = 0; 191 192 if (rdev->kfd) 193 r = kgd2kfd->resume(rdev->kfd); 194 195 return r; 196} 197 198static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 199 void **mem_obj, uint64_t *gpu_addr, 200 void **cpu_ptr) 201{ 202 struct radeon_device *rdev = (struct radeon_device *)kgd; 203 struct kgd_mem **mem = (struct kgd_mem **) mem_obj; 204 int r; 205 206 BUG_ON(kgd == NULL); 207 BUG_ON(gpu_addr == NULL); 208 BUG_ON(cpu_ptr == NULL); 209 210 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL); 211 if ((*mem) == NULL) 212 return -ENOMEM; 213 214 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 215 RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo); 216 if (r) { 217 dev_err(rdev->dev, 218 "failed to allocate BO for amdkfd (%d)\n", r); 219 return r; 220 } 221 222 /* map the buffer */ 223 r = radeon_bo_reserve((*mem)->bo, true); 224 if (r) { 225 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r); 226 goto allocate_mem_reserve_bo_failed; 227 } 228 229 r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT, 230 &(*mem)->gpu_addr); 231 if (r) { 232 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r); 233 goto allocate_mem_pin_bo_failed; 234 } 235 *gpu_addr = (*mem)->gpu_addr; 236 237 r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr); 238 if (r) { 239 dev_err(rdev->dev, 240 "(%d) failed to map bo to kernel for amdkfd\n", r); 241 goto allocate_mem_kmap_bo_failed; 242 } 243 *cpu_ptr = (*mem)->cpu_ptr; 244 245 radeon_bo_unreserve((*mem)->bo); 246 247 return 0; 248 249allocate_mem_kmap_bo_failed: 250 radeon_bo_unpin((*mem)->bo); 251allocate_mem_pin_bo_failed: 252 radeon_bo_unreserve((*mem)->bo); 253allocate_mem_reserve_bo_failed: 254 radeon_bo_unref(&(*mem)->bo); 255 256 return r; 257} 258 259static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) 260{ 261 struct kgd_mem *mem = (struct kgd_mem *) mem_obj; 262 263 BUG_ON(mem == NULL); 264 265 radeon_bo_reserve(mem->bo, true); 266 radeon_bo_kunmap(mem->bo); 267 radeon_bo_unpin(mem->bo); 268 radeon_bo_unreserve(mem->bo); 269 radeon_bo_unref(&(mem->bo)); 270 kfree(mem); 271} 272 273static uint64_t get_vmem_size(struct kgd_dev *kgd) 274{ 275 struct radeon_device *rdev = (struct radeon_device *)kgd; 276 277 BUG_ON(kgd == NULL); 278 279 return rdev->mc.real_vram_size; 280} 281 282static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) 283{ 284 struct radeon_device *rdev = (struct radeon_device *)kgd; 285 286 return rdev->asic->get_gpu_clock_counter(rdev); 287} 288 289static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) 290{ 291 struct radeon_device *rdev = (struct radeon_device *)kgd; 292 293 /* The sclk is in quantas of 10kHz */ 294 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100; 295} 296 297static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd) 298{ 299 return (struct radeon_device *)kgd; 300} 301 302static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value) 303{ 304 struct radeon_device *rdev = get_radeon_device(kgd); 305 306 writel(value, (void __iomem *)(rdev->rmmio + offset)); 307} 308 309static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset) 310{ 311 struct radeon_device *rdev = get_radeon_device(kgd); 312 313 return readl((void __iomem *)(rdev->rmmio + offset)); 314} 315 316static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 317 uint32_t queue, uint32_t vmid) 318{ 319 struct radeon_device *rdev = get_radeon_device(kgd); 320 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); 321 322 mutex_lock(&rdev->srbm_mutex); 323 write_register(kgd, SRBM_GFX_CNTL, value); 324} 325 326static void unlock_srbm(struct kgd_dev *kgd) 327{ 328 struct radeon_device *rdev = get_radeon_device(kgd); 329 330 write_register(kgd, SRBM_GFX_CNTL, 0); 331 mutex_unlock(&rdev->srbm_mutex); 332} 333 334static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 335 uint32_t queue_id) 336{ 337 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; 338 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 339 340 lock_srbm(kgd, mec, pipe, queue_id, 0); 341} 342 343static void release_queue(struct kgd_dev *kgd) 344{ 345 unlock_srbm(kgd); 346} 347 348static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 349 uint32_t sh_mem_config, 350 uint32_t sh_mem_ape1_base, 351 uint32_t sh_mem_ape1_limit, 352 uint32_t sh_mem_bases) 353{ 354 lock_srbm(kgd, 0, 0, 0, vmid); 355 356 write_register(kgd, SH_MEM_CONFIG, sh_mem_config); 357 write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base); 358 write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit); 359 write_register(kgd, SH_MEM_BASES, sh_mem_bases); 360 361 unlock_srbm(kgd); 362} 363 364static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 365 unsigned int vmid) 366{ 367 /* 368 * We have to assume that there is no outstanding mapping. 369 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 370 * because a mapping is in progress or because a mapping finished and 371 * the SW cleared it. 372 * So the protocol is to always wait & clear. 373 */ 374 uint32_t pasid_mapping = (pasid == 0) ? 0 : 375 (uint32_t)pasid | ATC_VMID_PASID_MAPPING_VALID; 376 377 write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t), 378 pasid_mapping); 379 380 while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) & 381 (1U << vmid))) 382 cpu_relax(); 383 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 384 385 /* Mapping vmid to pasid also for IH block */ 386 write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t), 387 pasid_mapping); 388 389 return 0; 390} 391 392static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 393 uint32_t hpd_size, uint64_t hpd_gpu_addr) 394{ 395 uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; 396 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 397 398 lock_srbm(kgd, mec, pipe, 0, 0); 399 write_register(kgd, CP_HPD_EOP_BASE_ADDR, 400 lower_32_bits(hpd_gpu_addr >> 8)); 401 write_register(kgd, CP_HPD_EOP_BASE_ADDR_HI, 402 upper_32_bits(hpd_gpu_addr >> 8)); 403 write_register(kgd, CP_HPD_EOP_VMID, 0); 404 write_register(kgd, CP_HPD_EOP_CONTROL, hpd_size); 405 unlock_srbm(kgd); 406 407 return 0; 408} 409 410static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) 411{ 412 uint32_t retval; 413 414 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + 415 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; 416 417 pr_debug("kfd: sdma base address: 0x%x\n", retval); 418 419 return retval; 420} 421 422static inline struct cik_mqd *get_mqd(void *mqd) 423{ 424 return (struct cik_mqd *)mqd; 425} 426 427static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) 428{ 429 return (struct cik_sdma_rlc_registers *)mqd; 430} 431 432static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 433 uint32_t queue_id, uint32_t __user *wptr) 434{ 435 uint32_t wptr_shadow, is_wptr_shadow_valid; 436 struct cik_mqd *m; 437 438 m = get_mqd(mqd); 439 440 is_wptr_shadow_valid = !get_user(wptr_shadow, wptr); 441 442 acquire_queue(kgd, pipe_id, queue_id); 443 write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo); 444 write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi); 445 write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control); 446 447 write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo); 448 write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi); 449 write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control); 450 451 write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control); 452 write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo); 453 write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi); 454 455 write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr); 456 457 write_register(kgd, CP_HQD_PERSISTENT_STATE, 458 m->cp_hqd_persistent_state); 459 write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd); 460 write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type); 461 462 write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO, 463 m->cp_hqd_atomic0_preop_lo); 464 465 write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI, 466 m->cp_hqd_atomic0_preop_hi); 467 468 write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO, 469 m->cp_hqd_atomic1_preop_lo); 470 471 write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI, 472 m->cp_hqd_atomic1_preop_hi); 473 474 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR, 475 m->cp_hqd_pq_rptr_report_addr_lo); 476 477 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI, 478 m->cp_hqd_pq_rptr_report_addr_hi); 479 480 write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr); 481 482 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR, 483 m->cp_hqd_pq_wptr_poll_addr_lo); 484 485 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI, 486 m->cp_hqd_pq_wptr_poll_addr_hi); 487 488 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 489 m->cp_hqd_pq_doorbell_control); 490 491 write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid); 492 493 write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum); 494 495 write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority); 496 write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority); 497 498 write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr); 499 500 if (is_wptr_shadow_valid) 501 write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow); 502 503 write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active); 504 release_queue(kgd); 505 506 return 0; 507} 508 509static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) 510{ 511 struct cik_sdma_rlc_registers *m; 512 uint32_t sdma_base_addr; 513 514 m = get_sdma_mqd(mqd); 515 sdma_base_addr = get_sdma_base_addr(m); 516 517 write_register(kgd, 518 sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR, 519 m->sdma_rlc_virtual_addr); 520 521 write_register(kgd, 522 sdma_base_addr + SDMA0_RLC0_RB_BASE, 523 m->sdma_rlc_rb_base); 524 525 write_register(kgd, 526 sdma_base_addr + SDMA0_RLC0_RB_BASE_HI, 527 m->sdma_rlc_rb_base_hi); 528 529 write_register(kgd, 530 sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO, 531 m->sdma_rlc_rb_rptr_addr_lo); 532 533 write_register(kgd, 534 sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI, 535 m->sdma_rlc_rb_rptr_addr_hi); 536 537 write_register(kgd, 538 sdma_base_addr + SDMA0_RLC0_DOORBELL, 539 m->sdma_rlc_doorbell); 540 541 write_register(kgd, 542 sdma_base_addr + SDMA0_RLC0_RB_CNTL, 543 m->sdma_rlc_rb_cntl); 544 545 return 0; 546} 547 548static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 549 uint32_t pipe_id, uint32_t queue_id) 550{ 551 uint32_t act; 552 bool retval = false; 553 uint32_t low, high; 554 555 acquire_queue(kgd, pipe_id, queue_id); 556 act = read_register(kgd, CP_HQD_ACTIVE); 557 if (act) { 558 low = lower_32_bits(queue_address >> 8); 559 high = upper_32_bits(queue_address >> 8); 560 561 if (low == read_register(kgd, CP_HQD_PQ_BASE) && 562 high == read_register(kgd, CP_HQD_PQ_BASE_HI)) 563 retval = true; 564 } 565 release_queue(kgd); 566 return retval; 567} 568 569static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 570{ 571 struct cik_sdma_rlc_registers *m; 572 uint32_t sdma_base_addr; 573 uint32_t sdma_rlc_rb_cntl; 574 575 m = get_sdma_mqd(mqd); 576 sdma_base_addr = get_sdma_base_addr(m); 577 578 sdma_rlc_rb_cntl = read_register(kgd, 579 sdma_base_addr + SDMA0_RLC0_RB_CNTL); 580 581 if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE) 582 return true; 583 584 return false; 585} 586 587static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, 588 unsigned int timeout, uint32_t pipe_id, 589 uint32_t queue_id) 590{ 591 uint32_t temp; 592 593 acquire_queue(kgd, pipe_id, queue_id); 594 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0); 595 596 write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type); 597 598 while (true) { 599 temp = read_register(kgd, CP_HQD_ACTIVE); 600 if (temp & 0x1) 601 break; 602 if (timeout == 0) { 603 pr_err("kfd: cp queue preemption time out (%dms)\n", 604 temp); 605 release_queue(kgd); 606 return -ETIME; 607 } 608 msleep(20); 609 timeout -= 20; 610 } 611 612 release_queue(kgd); 613 return 0; 614} 615 616static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 617 unsigned int timeout) 618{ 619 struct cik_sdma_rlc_registers *m; 620 uint32_t sdma_base_addr; 621 uint32_t temp; 622 623 m = get_sdma_mqd(mqd); 624 sdma_base_addr = get_sdma_base_addr(m); 625 626 temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL); 627 temp = temp & ~SDMA_RB_ENABLE; 628 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp); 629 630 while (true) { 631 temp = read_register(kgd, sdma_base_addr + 632 SDMA0_RLC0_CONTEXT_STATUS); 633 if (temp & SDMA_RLC_IDLE) 634 break; 635 if (timeout == 0) 636 return -ETIME; 637 msleep(20); 638 timeout -= 20; 639 } 640 641 write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0); 642 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0); 643 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0); 644 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0); 645 646 return 0; 647} 648 649static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 650{ 651 struct radeon_device *rdev = (struct radeon_device *) kgd; 652 const union radeon_firmware_header *hdr; 653 654 BUG_ON(kgd == NULL || rdev->mec_fw == NULL); 655 656 switch (type) { 657 case KGD_ENGINE_PFP: 658 hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data; 659 break; 660 661 case KGD_ENGINE_ME: 662 hdr = (const union radeon_firmware_header *) rdev->me_fw->data; 663 break; 664 665 case KGD_ENGINE_CE: 666 hdr = (const union radeon_firmware_header *) rdev->ce_fw->data; 667 break; 668 669 case KGD_ENGINE_MEC1: 670 hdr = (const union radeon_firmware_header *) rdev->mec_fw->data; 671 break; 672 673 case KGD_ENGINE_MEC2: 674 hdr = (const union radeon_firmware_header *) 675 rdev->mec2_fw->data; 676 break; 677 678 case KGD_ENGINE_RLC: 679 hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data; 680 break; 681 682 case KGD_ENGINE_SDMA: 683 hdr = (const union radeon_firmware_header *) 684 rdev->sdma_fw->data; 685 break; 686 687 default: 688 return 0; 689 } 690 691 if (hdr == NULL) 692 return 0; 693 694 /* Only 12 bit in use*/ 695 return hdr->common.ucode_version; 696} 697