Lines Matching refs:iommu
230 static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
250 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
254 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
255 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
259 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
261 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
262 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
263 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
266 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
270 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
271 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
275 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
277 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
278 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
294 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
296 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
297 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; in iommu_set_exclusion_range()
300 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
304 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
308 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
313 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
317 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
321 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
326 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
330 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
332 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
335 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
339 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
341 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
344 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) in iommu_set_inv_tlb_timeout() argument
348 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
351 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
355 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
357 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
360 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
363 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
366 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
367 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
370 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
389 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
391 if (iommu->mmio_base) in iommu_unmap_mmio_space()
392 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
393 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
519 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
527 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; in alloc_command_buffer()
536 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
538 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
540 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
541 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
543 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
550 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
554 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
556 entry = (u64)virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
559 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
562 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
563 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); in iommu_enable_command_buffer()
566 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
568 free_pages((unsigned long)iommu->cmd_buf, in free_command_buffer()
569 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); in free_command_buffer()
573 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
575 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_event_buffer()
578 if (iommu->evt_buf == NULL) in alloc_event_buffer()
581 iommu->evt_buf_size = EVT_BUFFER_SIZE; in alloc_event_buffer()
583 return iommu->evt_buf; in alloc_event_buffer()
586 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
590 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
592 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
594 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
598 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
599 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
601 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
604 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
606 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
610 static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) in alloc_ppr_log() argument
612 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_ppr_log()
615 if (iommu->ppr_log == NULL) in alloc_ppr_log()
618 return iommu->ppr_log; in alloc_ppr_log()
621 static void iommu_enable_ppr_log(struct amd_iommu *iommu) in iommu_enable_ppr_log() argument
625 if (iommu->ppr_log == NULL) in iommu_enable_ppr_log()
628 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; in iommu_enable_ppr_log()
630 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, in iommu_enable_ppr_log()
634 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_enable_ppr_log()
635 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_enable_ppr_log()
637 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); in iommu_enable_ppr_log()
638 iommu_feature_enable(iommu, CONTROL_PPR_EN); in iommu_enable_ppr_log()
641 static void __init free_ppr_log(struct amd_iommu *iommu) in free_ppr_log() argument
643 if (iommu->ppr_log == NULL) in free_ppr_log()
646 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); in free_ppr_log()
649 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
651 if (!iommu_feature(iommu, FEATURE_GT)) in iommu_enable_gt()
654 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
687 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
689 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
696 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
716 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
787 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_device_exclusion_range() local
792 if (iommu) { in set_device_exclusion_range()
799 iommu->exclusion_start = m->range_start; in set_device_exclusion_range()
800 iommu->exclusion_length = m->range_length; in set_device_exclusion_range()
808 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
827 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
843 PCI_BUS_NUM(iommu->first_device), in init_iommu_from_acpi()
844 PCI_SLOT(iommu->first_device), in init_iommu_from_acpi()
845 PCI_FUNC(iommu->first_device), in init_iommu_from_acpi()
846 PCI_BUS_NUM(iommu->last_device), in init_iommu_from_acpi()
847 PCI_SLOT(iommu->last_device), in init_iommu_from_acpi()
848 PCI_FUNC(iommu->last_device), in init_iommu_from_acpi()
851 for (dev_i = iommu->first_device; in init_iommu_from_acpi()
852 dev_i <= iommu->last_device; ++dev_i) in init_iommu_from_acpi()
853 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
866 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
896 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
897 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
929 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
957 set_dev_entry_from_acpi(iommu, in init_iommu_from_acpi()
960 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
996 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1011 static int __init init_iommu_devices(struct amd_iommu *iommu) in init_iommu_devices() argument
1015 for (i = iommu->first_device; i <= iommu->last_device; ++i) in init_iommu_devices()
1016 set_iommu_for_device(iommu, i); in init_iommu_devices()
1021 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
1023 free_command_buffer(iommu); in free_iommu_one()
1024 free_event_buffer(iommu); in free_iommu_one()
1025 free_ppr_log(iommu); in free_iommu_one()
1026 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1031 struct amd_iommu *iommu, *next; in free_iommu_all() local
1033 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1034 list_del(&iommu->list); in free_iommu_all()
1035 free_iommu_one(iommu); in free_iommu_all()
1036 kfree(iommu); in free_iommu_all()
1046 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
1055 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1056 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1062 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1064 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1066 dev_name(&iommu->dev->dev)); in amd_iommu_erratum_746_workaround()
1069 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1078 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) in amd_iommu_ats_write_check_workaround() argument
1088 value = iommu_read_l2(iommu, 0x47); in amd_iommu_ats_write_check_workaround()
1094 iommu_write_l2(iommu, 0x47, value | BIT(0)); in amd_iommu_ats_write_check_workaround()
1097 dev_name(&iommu->dev->dev)); in amd_iommu_ats_write_check_workaround()
1105 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) in init_iommu_one() argument
1109 spin_lock_init(&iommu->lock); in init_iommu_one()
1112 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1113 iommu->index = amd_iommus_present++; in init_iommu_one()
1115 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1121 amd_iommus[iommu->index] = iommu; in init_iommu_one()
1126 iommu->devid = h->devid; in init_iommu_one()
1127 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1128 iommu->pci_seg = h->pci_seg; in init_iommu_one()
1129 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1135 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1137 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1140 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1141 iommu->mmio_phys_end); in init_iommu_one()
1142 if (!iommu->mmio_base) in init_iommu_one()
1145 iommu->cmd_buf = alloc_command_buffer(iommu); in init_iommu_one()
1146 if (!iommu->cmd_buf) in init_iommu_one()
1149 iommu->evt_buf = alloc_event_buffer(iommu); in init_iommu_one()
1150 if (!iommu->evt_buf) in init_iommu_one()
1153 iommu->int_enabled = false; in init_iommu_one()
1155 ret = init_iommu_from_acpi(iommu, h); in init_iommu_one()
1163 amd_iommu_rlookup_table[iommu->devid] = NULL; in init_iommu_one()
1165 init_iommu_devices(iommu); in init_iommu_one()
1178 struct amd_iommu *iommu; in init_iommu_all() local
1197 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
1198 if (iommu == NULL) in init_iommu_all()
1201 ret = init_iommu_one(iommu, h); in init_iommu_all()
1217 static void init_iommu_perf_ctr(struct amd_iommu *iommu) in init_iommu_perf_ctr() argument
1221 if (!iommu_feature(iommu, FEATURE_PC)) in init_iommu_perf_ctr()
1227 if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || in init_iommu_perf_ctr()
1228 (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || in init_iommu_perf_ctr()
1237 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
1238 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
1239 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
1246 struct amd_iommu *iommu = dev_get_drvdata(dev); in amd_iommu_show_cap() local
1247 return sprintf(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
1255 struct amd_iommu *iommu = dev_get_drvdata(dev); in amd_iommu_show_features() local
1256 return sprintf(buf, "%llx\n", iommu->features); in amd_iommu_show_features()
1276 static int iommu_init_pci(struct amd_iommu *iommu) in iommu_init_pci() argument
1278 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
1281 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
1282 iommu->devid & 0xff); in iommu_init_pci()
1283 if (!iommu->dev) in iommu_init_pci()
1286 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
1287 &iommu->cap); in iommu_init_pci()
1288 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, in iommu_init_pci()
1290 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, in iommu_init_pci()
1293 iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range), in iommu_init_pci()
1295 iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range), in iommu_init_pci()
1298 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
1302 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); in iommu_init_pci()
1303 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); in iommu_init_pci()
1305 iommu->features = ((u64)high << 32) | low; in iommu_init_pci()
1307 if (iommu_feature(iommu, FEATURE_GT)) { in iommu_init_pci()
1312 pasmax = iommu->features & FEATURE_PASID_MASK; in iommu_init_pci()
1320 glxval = iommu->features & FEATURE_GLXVAL_MASK; in iommu_init_pci()
1329 if (iommu_feature(iommu, FEATURE_GT) && in iommu_init_pci()
1330 iommu_feature(iommu, FEATURE_PPR)) { in iommu_init_pci()
1331 iommu->is_iommu_v2 = true; in iommu_init_pci()
1335 if (iommu_feature(iommu, FEATURE_PPR)) { in iommu_init_pci()
1336 iommu->ppr_log = alloc_ppr_log(iommu); in iommu_init_pci()
1337 if (!iommu->ppr_log) in iommu_init_pci()
1341 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) in iommu_init_pci()
1344 init_iommu_perf_ctr(iommu); in iommu_init_pci()
1346 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
1349 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, in iommu_init_pci()
1357 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
1358 &iommu->stored_addr_lo); in iommu_init_pci()
1359 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
1360 &iommu->stored_addr_hi); in iommu_init_pci()
1363 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
1367 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
1370 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
1373 amd_iommu_erratum_746_workaround(iommu); in iommu_init_pci()
1374 amd_iommu_ats_write_check_workaround(iommu); in iommu_init_pci()
1376 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, in iommu_init_pci()
1378 iommu->index); in iommu_init_pci()
1380 return pci_enable_device(iommu->dev); in iommu_init_pci()
1389 struct amd_iommu *iommu; in print_iommu_info() local
1391 for_each_iommu(iommu) { in print_iommu_info()
1395 dev_name(&iommu->dev->dev), iommu->cap_ptr); in print_iommu_info()
1397 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { in print_iommu_info()
1400 if (iommu_feature(iommu, (1ULL << i))) in print_iommu_info()
1412 struct amd_iommu *iommu; in amd_iommu_init_pci() local
1415 for_each_iommu(iommu) { in amd_iommu_init_pci()
1416 ret = iommu_init_pci(iommu); in amd_iommu_init_pci()
1437 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
1441 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
1445 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
1449 iommu); in iommu_setup_msi()
1452 pci_disable_msi(iommu->dev); in iommu_setup_msi()
1456 iommu->int_enabled = true; in iommu_setup_msi()
1461 static int iommu_init_msi(struct amd_iommu *iommu) in iommu_init_msi() argument
1465 if (iommu->int_enabled) in iommu_init_msi()
1468 if (iommu->dev->msi_cap) in iommu_init_msi()
1469 ret = iommu_setup_msi(iommu); in iommu_init_msi()
1477 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_init_msi()
1479 if (iommu->ppr_log != NULL) in iommu_init_msi()
1480 iommu_feature_enable(iommu, CONTROL_PPFINT_EN); in iommu_init_msi()
1629 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
1631 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
1632 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
1633 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
1635 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
1636 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
1637 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
1639 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
1640 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
1641 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
1643 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
1644 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
1645 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
1650 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
1653 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); in iommu_init_flags()
1656 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
1660 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
1663 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
1680 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
1681 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
1682 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
1683 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
1688 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
1692 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
1695 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
1696 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
1705 struct amd_iommu *iommu; in early_enable_iommus() local
1707 for_each_iommu(iommu) { in early_enable_iommus()
1708 iommu_disable(iommu); in early_enable_iommus()
1709 iommu_init_flags(iommu); in early_enable_iommus()
1710 iommu_set_device_table(iommu); in early_enable_iommus()
1711 iommu_enable_command_buffer(iommu); in early_enable_iommus()
1712 iommu_enable_event_buffer(iommu); in early_enable_iommus()
1713 iommu_set_exclusion_range(iommu); in early_enable_iommus()
1714 iommu_enable(iommu); in early_enable_iommus()
1715 iommu_flush_all_caches(iommu); in early_enable_iommus()
1721 struct amd_iommu *iommu; in enable_iommus_v2() local
1723 for_each_iommu(iommu) { in enable_iommus_v2()
1724 iommu_enable_ppr_log(iommu); in enable_iommus_v2()
1725 iommu_enable_gt(iommu); in enable_iommus_v2()
1738 struct amd_iommu *iommu; in disable_iommus() local
1740 for_each_iommu(iommu) in disable_iommus()
1741 iommu_disable(iommu); in disable_iommus()
1751 struct amd_iommu *iommu; in amd_iommu_resume() local
1753 for_each_iommu(iommu) in amd_iommu_resume()
1754 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
2017 struct amd_iommu *iommu; in amd_iommu_enable_interrupts() local
2020 for_each_iommu(iommu) { in amd_iommu_enable_interrupts()
2021 ret = iommu_init_msi(iommu); in amd_iommu_enable_interrupts()
2055 struct amd_iommu *iommu; in amd_iommu_init_dma() local
2068 for_each_iommu(iommu) in amd_iommu_init_dma()
2069 iommu_flush_all_caches(iommu); in amd_iommu_init_dma()
2214 struct amd_iommu *iommu; in amd_iommu_init() local
2217 for_each_iommu(iommu) in amd_iommu_init()
2218 iommu_flush_all_caches(iommu); in amd_iommu_init()
2248 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
2366 struct amd_iommu *iommu; in amd_iommu_pc_get_max_banks() local
2370 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_pc_get_max_banks()
2371 if (iommu) in amd_iommu_pc_get_max_banks()
2372 ret = iommu->max_banks; in amd_iommu_pc_get_max_banks()
2386 struct amd_iommu *iommu; in amd_iommu_pc_get_max_counters() local
2390 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_pc_get_max_counters()
2391 if (iommu) in amd_iommu_pc_get_max_counters()
2392 ret = iommu->max_counters; in amd_iommu_pc_get_max_counters()
2398 static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, in iommu_pc_get_set_reg_val() argument
2412 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | in iommu_pc_get_set_reg_val()
2413 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg_val()
2419 writel((u32)*value, iommu->mmio_base + offset); in iommu_pc_get_set_reg_val()
2420 writel((*value >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg_val()
2422 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg_val()
2424 *value = readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg_val()
2434 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_pc_get_set_reg_val() local
2437 if (!amd_iommu_pc_present || iommu == NULL) in amd_iommu_pc_get_set_reg_val()
2440 return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, in amd_iommu_pc_get_set_reg_val()