Lines Matching refs:iommu

353 	struct intel_iommu *iommu; /* IOMMU used by this device */  member
407 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
410 struct intel_iommu *iommu);
431 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ argument
432 ecap_pasid(iommu->ecap))
548 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
553 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
566 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
568 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
576 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
578 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
598 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
616 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
617 if (!ecap_coherent(iommu->ecap)) { in domain_update_iommu_coherency()
628 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
632 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
633 if (iommu != skip) { in domain_update_iommu_snooping()
634 if (!ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
648 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
657 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
658 if (iommu != skip) { in domain_update_iommu_superpage()
659 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
677 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu, in iommu_context_addr() argument
680 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
685 if (ecs_enabled(iommu)) { in iommu_context_addr()
699 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
703 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
706 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
713 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; in iommu_dummy()
719 struct intel_iommu *iommu; in device_to_iommu() local
735 for_each_active_iommu(iommu, drhd) { in device_to_iommu()
764 iommu = NULL; in device_to_iommu()
768 return iommu; in device_to_iommu()
778 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
784 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
785 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
788 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
792 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_table() argument
797 spin_lock_irqsave(&iommu->lock, flags); in clear_context_table()
798 context = iommu_context_addr(iommu, bus, devfn, 0); in clear_context_table()
801 __iommu_flush_cache(iommu, context, sizeof(*context)); in clear_context_table()
803 spin_unlock_irqrestore(&iommu->lock, flags); in clear_context_table()
806 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
812 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
813 if (!iommu->root_entry) { in free_context_table()
817 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
821 if (!ecs_enabled(iommu)) in free_context_table()
824 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
829 free_pgtable_page(iommu->root_entry); in free_context_table()
830 iommu->root_entry = NULL; in free_context_table()
832 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1128 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1133 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1136 iommu->name); in iommu_alloc_root_entry()
1140 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1142 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1143 iommu->root_entry = root; in iommu_alloc_root_entry()
1144 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1149 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1155 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1156 if (ecs_enabled(iommu)) in iommu_set_root_entry()
1159 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1160 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1162 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1165 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1168 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1171 static void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1176 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1179 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1180 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1186 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1190 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1213 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1214 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1217 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1220 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1224 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1227 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1253 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1256 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1259 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1262 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1263 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1266 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1269 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1281 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1289 if (!ecap_dev_iotlb_support(iommu->ecap)) in iommu_support_dev_iotlb()
1292 if (!iommu->qi) in iommu_support_dev_iotlb()
1297 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1354 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); in iommu_flush_dev_iotlb()
1359 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, in iommu_flush_iotlb_psi() argument
1375 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1376 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1379 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1386 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1387 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); in iommu_flush_iotlb_psi()
1390 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1395 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1396 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1398 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1401 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1404 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1407 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1412 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1413 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1414 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1417 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1420 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1423 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1428 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1429 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1430 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1433 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1436 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1440 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1445 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1447 iommu->seq_id, ndomains); in iommu_init_domains()
1450 spin_lock_init(&iommu->lock); in iommu_init_domains()
1455 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1456 if (!iommu->domain_ids) { in iommu_init_domains()
1458 iommu->seq_id); in iommu_init_domains()
1461 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), in iommu_init_domains()
1463 if (!iommu->domains) { in iommu_init_domains()
1465 iommu->seq_id); in iommu_init_domains()
1466 kfree(iommu->domain_ids); in iommu_init_domains()
1467 iommu->domain_ids = NULL; in iommu_init_domains()
1475 if (cap_caching_mode(iommu->cap)) in iommu_init_domains()
1476 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1480 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1485 if ((iommu->domains) && (iommu->domain_ids)) { in disable_dmar_iommu()
1486 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { in disable_dmar_iommu()
1491 if (cap_caching_mode(iommu->cap) && i == 0) in disable_dmar_iommu()
1494 domain = iommu->domains[i]; in disable_dmar_iommu()
1495 clear_bit(i, iommu->domain_ids); in disable_dmar_iommu()
1496 if (domain_detach_iommu(domain, iommu) == 0 && in disable_dmar_iommu()
1502 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1503 iommu_disable_translation(iommu); in disable_dmar_iommu()
1506 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1508 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1509 kfree(iommu->domains); in free_dmar_iommu()
1510 kfree(iommu->domain_ids); in free_dmar_iommu()
1511 iommu->domains = NULL; in free_dmar_iommu()
1512 iommu->domain_ids = NULL; in free_dmar_iommu()
1515 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1518 free_context_table(iommu); in free_dmar_iommu()
1543 struct intel_iommu *iommu) in __iommu_attach_domain() argument
1548 ndomains = cap_ndoms(iommu->cap); in __iommu_attach_domain()
1549 num = find_first_zero_bit(iommu->domain_ids, ndomains); in __iommu_attach_domain()
1551 set_bit(num, iommu->domain_ids); in __iommu_attach_domain()
1552 iommu->domains[num] = domain; in __iommu_attach_domain()
1561 struct intel_iommu *iommu) in iommu_attach_domain() argument
1566 spin_lock_irqsave(&iommu->lock, flags); in iommu_attach_domain()
1567 num = __iommu_attach_domain(domain, iommu); in iommu_attach_domain()
1568 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_attach_domain()
1576 struct intel_iommu *iommu) in iommu_attach_vm_domain() argument
1581 ndomains = cap_ndoms(iommu->cap); in iommu_attach_vm_domain()
1582 for_each_set_bit(num, iommu->domain_ids, ndomains) in iommu_attach_vm_domain()
1583 if (iommu->domains[num] == domain) in iommu_attach_vm_domain()
1586 return __iommu_attach_domain(domain, iommu); in iommu_attach_vm_domain()
1590 struct intel_iommu *iommu) in iommu_detach_domain() argument
1595 spin_lock_irqsave(&iommu->lock, flags); in iommu_detach_domain()
1597 ndomains = cap_ndoms(iommu->cap); in iommu_detach_domain()
1598 for_each_set_bit(num, iommu->domain_ids, ndomains) { in iommu_detach_domain()
1599 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1600 clear_bit(num, iommu->domain_ids); in iommu_detach_domain()
1601 iommu->domains[num] = NULL; in iommu_detach_domain()
1606 clear_bit(domain->id, iommu->domain_ids); in iommu_detach_domain()
1607 iommu->domains[domain->id] = NULL; in iommu_detach_domain()
1609 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_detach_domain()
1613 struct intel_iommu *iommu) in domain_attach_iommu() argument
1618 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_attach_iommu()
1621 domain->nid = iommu->node; in domain_attach_iommu()
1628 struct intel_iommu *iommu) in domain_detach_iommu() argument
1634 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_detach_iommu()
1707 struct intel_iommu *iommu; in domain_init() local
1716 iommu = domain_get_iommu(domain); in domain_init()
1717 if (guest_width > cap_mgaw(iommu->cap)) in domain_init()
1718 guest_width = cap_mgaw(iommu->cap); in domain_init()
1722 sagaw = cap_sagaw(iommu->cap); in domain_init()
1732 if (ecap_coherent(iommu->ecap)) in domain_init()
1737 if (ecap_sc_support(iommu->ecap)) in domain_init()
1743 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1747 domain->nid = iommu->node; in domain_init()
1753 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1760 struct intel_iommu *iommu; in domain_exit() local
1781 for_each_active_iommu(iommu, drhd) in domain_exit()
1783 test_bit(iommu->seq_id, domain->iommu_bmp)) in domain_exit()
1784 iommu_detach_domain(domain, iommu); in domain_exit()
1793 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1810 spin_lock_irqsave(&iommu->lock, flags); in domain_context_mapping_one()
1811 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1812 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1815 spin_lock_irqsave(&iommu->lock, flags); in domain_context_mapping_one()
1817 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1826 id = iommu_attach_vm_domain(domain, iommu); in domain_context_mapping_one()
1828 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1839 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1842 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1852 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
1861 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
1864 context_set_address_width(context, iommu->agaw); in domain_context_mapping_one()
1878 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
1879 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
1883 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
1885 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
1888 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_mapping_one()
1890 domain_attach_iommu(domain, iommu); in domain_context_mapping_one()
1897 struct intel_iommu *iommu; member
1906 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
1915 struct intel_iommu *iommu; in domain_context_mapping() local
1919 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
1920 if (!iommu) in domain_context_mapping()
1924 return domain_context_mapping_one(domain, iommu, bus, devfn, in domain_context_mapping()
1928 data.iommu = iommu; in domain_context_mapping()
1938 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
1940 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
1945 struct intel_iommu *iommu; in domain_context_mapped() local
1948 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
1949 if (!iommu) in domain_context_mapped()
1953 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
1956 domain_context_mapped_cb, iommu); in domain_context_mapped()
2119 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) in iommu_detach_dev() argument
2121 if (!iommu) in iommu_detach_dev()
2124 clear_context_table(iommu, bus, devfn); in iommu_detach_dev()
2125 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_detach_dev()
2127 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_detach_dev()
2136 info->dev->archdata.iommu = NULL; in unlink_domain_info()
2150 iommu_detach_dev(info->iommu, info->bus, info->devfn); in domain_remove_dev_info()
2153 iommu_detach_dependent_devices(info->iommu, info->dev); in domain_remove_dev_info()
2154 domain_detach_iommu(domain, info->iommu); in domain_remove_dev_info()
2172 info = dev->archdata.iommu; in find_domain()
2184 if (info->iommu->segment == segment && info->bus == bus && in dmar_search_domain_by_dev_info()
2191 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, in dmar_insert_dev_info() argument
2208 info->iommu = iommu; in dmar_insert_dev_info()
2215 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); in dmar_insert_dev_info()
2229 dev->archdata.iommu = info; in dmar_insert_dev_info()
2245 struct intel_iommu *iommu; in get_domain_for_dev() local
2255 iommu = device_to_iommu(dev, &bus, &devfn); in get_domain_for_dev()
2256 if (!iommu) in get_domain_for_dev()
2269 iommu = info->iommu; in get_domain_for_dev()
2283 domain->id = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2288 domain_attach_iommu(domain, iommu); in get_domain_for_dev()
2296 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias), in get_domain_for_dev()
2309 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in get_domain_for_dev()
2415 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) in iommu_prepare_rmrr_dev()
2452 struct intel_iommu *iommu; in si_domain_init() local
2460 for_each_active_iommu(iommu, drhd) { in si_domain_init()
2461 ret = iommu_attach_domain(si_domain, iommu); in si_domain_init()
2472 domain_attach_iommu(si_domain, iommu); in si_domain_init()
2508 info = dev->archdata.iommu; in identity_mapping()
2519 struct intel_iommu *iommu; in domain_add_dev_info() local
2523 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2524 if (!iommu) in domain_add_dev_info()
2527 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2694 struct intel_iommu *iommu; in iommu_prepare_static_identity_mapping() local
2709 for_each_active_iommu(iommu, drhd) in iommu_prepare_static_identity_mapping()
2732 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2740 if (!iommu->qi) { in intel_iommu_init_qi()
2744 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2749 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2752 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2756 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2757 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2759 iommu->name); in intel_iommu_init_qi()
2761 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2762 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2763 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2772 struct intel_iommu *iommu; in init_dmars() local
2814 for_each_active_iommu(iommu, drhd) { in init_dmars()
2815 g_iommus[iommu->seq_id] = iommu; in init_dmars()
2817 ret = iommu_init_domains(iommu); in init_dmars()
2826 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2829 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2833 for_each_active_iommu(iommu, drhd) in init_dmars()
2834 intel_iommu_init_qi(iommu); in init_dmars()
2892 for_each_iommu(iommu, drhd) { in init_dmars()
2899 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2903 iommu_flush_write_buffer(iommu); in init_dmars()
2905 ret = dmar_set_interrupt(iommu); in init_dmars()
2909 iommu_set_root_entry(iommu); in init_dmars()
2911 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
2912 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
2913 iommu_enable_translation(iommu); in init_dmars()
2914 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2920 for_each_active_iommu(iommu, drhd) { in init_dmars()
2921 disable_dmar_iommu(iommu); in init_dmars()
2922 free_dmar_iommu(iommu); in init_dmars()
2992 info = dev->archdata.iommu; in get_valid_domain_for_dev()
3054 struct intel_iommu *iommu; in __intel_map_single() local
3066 iommu = domain_get_iommu(domain); in __intel_map_single()
3078 !cap_zlr(iommu->cap)) in __intel_map_single()
3094 if (cap_caching_mode(iommu->cap)) in __intel_map_single()
3095 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); in __intel_map_single()
3097 iommu_flush_write_buffer(iommu); in __intel_map_single()
3128 struct intel_iommu *iommu = g_iommus[i]; in flush_unmaps() local
3129 if (!iommu) in flush_unmaps()
3136 if (!cap_caching_mode(iommu->cap)) in flush_unmaps()
3137 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in flush_unmaps()
3145 if (cap_caching_mode(iommu->cap)) in flush_unmaps()
3146 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
3177 struct intel_iommu *iommu; in add_unmap() local
3183 iommu = domain_get_iommu(dom); in add_unmap()
3184 iommu_id = iommu->seq_id; in add_unmap()
3205 struct intel_iommu *iommu; in intel_unmap() local
3214 iommu = domain_get_iommu(domain); in intel_unmap()
3230 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap()
3344 struct intel_iommu *iommu; in intel_map_sg() local
3354 iommu = domain_get_iommu(domain); in intel_map_sg()
3371 !cap_zlr(iommu->cap)) in intel_map_sg()
3387 if (cap_caching_mode(iommu->cap)) in intel_map_sg()
3388 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); in intel_map_sg()
3390 iommu_flush_write_buffer(iommu); in intel_map_sg()
3498 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in quirk_ioat_snb_local_iommu()
3538 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in init_no_remapping_devices()
3547 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3549 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3550 if (iommu->qi) in init_iommu_hw()
3551 dmar_reenable_qi(iommu); in init_iommu_hw()
3553 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3560 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3564 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3566 iommu_set_root_entry(iommu); in init_iommu_hw()
3568 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
3570 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_iommu_hw()
3571 iommu_enable_translation(iommu); in init_iommu_hw()
3572 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3581 struct intel_iommu *iommu; in iommu_flush_all() local
3583 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3584 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3586 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3594 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3597 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3598 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, in iommu_suspend()
3600 if (!iommu->iommu_state) in iommu_suspend()
3606 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3607 iommu_disable_translation(iommu); in iommu_suspend()
3609 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3611 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3612 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3613 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3614 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3615 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3616 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3617 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3618 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3620 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3625 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3626 kfree(iommu->iommu_state); in iommu_suspend()
3634 struct intel_iommu *iommu = NULL; in iommu_resume() local
3645 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3647 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3649 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3650 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3651 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3652 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3653 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3654 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3655 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3656 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3658 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3661 for_each_active_iommu(iommu, drhd) in iommu_resume()
3662 kfree(iommu->iommu_state); in iommu_resume()
3809 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3811 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
3814 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3816 iommu->name); in intel_iommu_add()
3819 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
3820 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
3822 iommu->name); in intel_iommu_add()
3825 sp = domain_update_iommu_superpage(iommu) - 1; in intel_iommu_add()
3826 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3828 iommu->name); in intel_iommu_add()
3835 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3836 iommu_disable_translation(iommu); in intel_iommu_add()
3838 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
3839 ret = iommu_init_domains(iommu); in intel_iommu_add()
3841 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3850 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3854 intel_iommu_init_qi(iommu); in intel_iommu_add()
3855 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3856 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3860 iommu_set_root_entry(iommu); in intel_iommu_add()
3861 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in intel_iommu_add()
3862 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in intel_iommu_add()
3863 iommu_enable_translation(iommu); in intel_iommu_add()
3866 ret = iommu_attach_domain(si_domain, iommu); in intel_iommu_add()
3869 domain_attach_iommu(si_domain, iommu); in intel_iommu_add()
3872 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3876 disable_dmar_iommu(iommu); in intel_iommu_add()
3878 free_dmar_iommu(iommu); in intel_iommu_add()
3885 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3889 if (iommu == NULL) in dmar_iommu_hotplug()
3895 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3896 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4074 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4096 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4097 iommu_flush_iotlb_psi(iommu, si_domain->id, in intel_iommu_memory_notifier()
4122 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_version() local
4123 u32 ver = readl(iommu->reg + DMAR_VER_REG); in intel_iommu_show_version()
4133 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_address() local
4134 return sprintf(buf, "%llx\n", iommu->reg_phys); in intel_iommu_show_address()
4142 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_cap() local
4143 return sprintf(buf, "%llx\n", iommu->cap); in intel_iommu_show_cap()
4151 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_ecap() local
4152 return sprintf(buf, "%llx\n", iommu->ecap); in intel_iommu_show_ecap()
4178 struct intel_iommu *iommu; in intel_iommu_init() local
4199 for_each_active_iommu(iommu, drhd) in intel_iommu_init()
4200 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_init()
4201 iommu_disable_translation(iommu); in intel_iommu_init()
4245 for_each_active_iommu(iommu, drhd) in intel_iommu_init()
4246 iommu->iommu_dev = iommu_device_create(NULL, iommu, in intel_iommu_init()
4248 iommu->name); in intel_iommu_init()
4270 struct intel_iommu *iommu = opaque; in iommu_detach_dev_cb() local
4272 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff); in iommu_detach_dev_cb()
4282 static void iommu_detach_dependent_devices(struct intel_iommu *iommu, in iommu_detach_dependent_devices() argument
4285 if (!iommu || !dev || !dev_is_pci(dev)) in iommu_detach_dependent_devices()
4288 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu); in iommu_detach_dependent_devices()
4295 struct intel_iommu *iommu; in domain_remove_one_dev_info() local
4300 iommu = device_to_iommu(dev, &bus, &devfn); in domain_remove_one_dev_info()
4301 if (!iommu) in domain_remove_one_dev_info()
4306 if (info->iommu == iommu && info->bus == bus && in domain_remove_one_dev_info()
4312 iommu_detach_dev(iommu, info->bus, info->devfn); in domain_remove_one_dev_info()
4313 iommu_detach_dependent_devices(iommu, dev); in domain_remove_one_dev_info()
4328 if (info->iommu == iommu) in domain_remove_one_dev_info()
4335 domain_detach_iommu(domain, iommu); in domain_remove_one_dev_info()
4337 iommu_detach_domain(domain, iommu); in domain_remove_one_dev_info()
4406 struct intel_iommu *iommu; in intel_iommu_attach_device() local
4432 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_attach_device()
4433 if (!iommu) in intel_iommu_attach_device()
4437 addr_width = agaw_to_width(iommu->agaw); in intel_iommu_attach_device()
4438 if (addr_width > cap_mgaw(iommu->cap)) in intel_iommu_attach_device()
4439 addr_width = cap_mgaw(iommu->cap); in intel_iommu_attach_device()
4452 while (iommu->agaw < dmar_domain->agaw) { in intel_iommu_attach_device()
4516 struct intel_iommu *iommu; in intel_iommu_unmap() local
4537 iommu = g_iommus[iommu_id]; in intel_iommu_unmap()
4542 ndomains = cap_ndoms(iommu->cap); in intel_iommu_unmap()
4543 for_each_set_bit(num, iommu->domain_ids, ndomains) { in intel_iommu_unmap()
4544 if (iommu->domains[num] == dmar_domain) in intel_iommu_unmap()
4545 iommu_flush_iotlb_psi(iommu, num, start_pfn, in intel_iommu_unmap()
4586 struct intel_iommu *iommu; in intel_iommu_add_device() local
4590 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_add_device()
4591 if (!iommu) in intel_iommu_add_device()
4594 iommu_device_link(iommu->iommu_dev, dev); in intel_iommu_add_device()
4607 struct intel_iommu *iommu; in intel_iommu_remove_device() local
4610 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_remove_device()
4611 if (!iommu) in intel_iommu_remove_device()
4616 iommu_device_unlink(iommu->iommu_dev, dev); in intel_iommu_remove_device()