Lines Matching refs:domain
342 struct iommu_domain domain; /* generic domain data structure for member
354 struct dmar_domain *domain; /* pointer to domain */ member
388 struct dmar_domain *domain[HIGH_WATER_MARK]; member
403 static void domain_exit(struct dmar_domain *domain);
404 static void domain_remove_dev_info(struct dmar_domain *domain);
405 static void domain_remove_one_dev_info(struct dmar_domain *domain,
409 static int domain_detach_iommu(struct dmar_domain *domain,
446 return container_of(dom, struct dmar_domain, domain); in to_dmar_domain()
529 static inline int domain_type_is_vm(struct dmar_domain *domain) in domain_type_is_vm() argument
531 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; in domain_type_is_vm()
534 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) in domain_type_is_vm_or_si() argument
536 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | in domain_type_is_vm_or_si()
540 static inline int domain_pfn_supported(struct dmar_domain *domain, in domain_pfn_supported() argument
543 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
582 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
587 BUG_ON(domain_type_is_vm_or_si(domain)); in domain_get_iommu()
588 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus); in domain_get_iommu()
595 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
602 domain->iommu_coherency = 1; in domain_update_iommu_coherency()
604 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { in domain_update_iommu_coherency()
607 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
618 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
670 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
672 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
673 domain->iommu_snooping = domain_update_iommu_snooping(NULL); in domain_update_iommu_cap()
674 domain->iommu_superpage = domain_update_iommu_superpage(NULL); in domain_update_iommu_cap()
771 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
774 if (!domain->iommu_coherency) in domain_flush_cache()
835 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
839 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
842 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
844 if (!domain_pfn_supported(domain, pfn)) in pfn_to_dma_pte()
848 parent = domain->pgd; in pfn_to_dma_pte()
863 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
868 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
874 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
891 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
896 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
899 parent = domain->pgd; in dma_pfn_level_pte()
923 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
930 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range()
931 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_clear_range()
937 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
948 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
954 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level() argument
972 dma_pte_free_level(domain, level - 1, level_pte, in dma_pte_free_level()
979 domain_flush_cache(domain, pte, sizeof(*pte)); in dma_pte_free_level()
988 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
992 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_free_pagetable()
993 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_free_pagetable()
996 dma_pte_clear_range(domain, start_pfn, last_pfn); in dma_pte_free_pagetable()
999 dma_pte_free_level(domain, agaw_to_level(domain->agaw), in dma_pte_free_pagetable()
1000 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
1003 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1004 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
1005 domain->pgd = NULL; in dma_pte_free_pagetable()
1015 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables() argument
1031 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables()
1039 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level() argument
1064 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
1072 freelist = dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
1082 domain_flush_cache(domain, first_pte, in dma_pte_clear_level()
1091 struct page *domain_unmap(struct dmar_domain *domain, in domain_unmap() argument
1097 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in domain_unmap()
1098 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in domain_unmap()
1102 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
1103 domain->pgd, 0, start_pfn, last_pfn, NULL); in domain_unmap()
1106 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1107 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
1111 domain->pgd = NULL; in domain_unmap()
1281 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1296 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1335 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1343 list_for_each_entry(info, &domain->devices, link) { in iommu_flush_dev_iotlb()
1482 struct dmar_domain *domain; in disable_dmar_iommu() local
1494 domain = iommu->domains[i]; in disable_dmar_iommu()
1496 if (domain_detach_iommu(domain, iommu) == 0 && in disable_dmar_iommu()
1497 !domain_type_is_vm(domain)) in disable_dmar_iommu()
1498 domain_exit(domain); in disable_dmar_iommu()
1525 struct dmar_domain *domain; in alloc_domain() local
1527 domain = alloc_domain_mem(); in alloc_domain()
1528 if (!domain) in alloc_domain()
1531 memset(domain, 0, sizeof(*domain)); in alloc_domain()
1532 domain->nid = -1; in alloc_domain()
1533 domain->flags = flags; in alloc_domain()
1534 spin_lock_init(&domain->iommu_lock); in alloc_domain()
1535 INIT_LIST_HEAD(&domain->devices); in alloc_domain()
1537 domain->id = atomic_inc_return(&vm_domid); in alloc_domain()
1539 return domain; in alloc_domain()
1542 static int __iommu_attach_domain(struct dmar_domain *domain, in __iommu_attach_domain() argument
1552 iommu->domains[num] = domain; in __iommu_attach_domain()
1560 static int iommu_attach_domain(struct dmar_domain *domain, in iommu_attach_domain() argument
1567 num = __iommu_attach_domain(domain, iommu); in iommu_attach_domain()
1575 static int iommu_attach_vm_domain(struct dmar_domain *domain, in iommu_attach_vm_domain() argument
1583 if (iommu->domains[num] == domain) in iommu_attach_vm_domain()
1586 return __iommu_attach_domain(domain, iommu); in iommu_attach_vm_domain()
1589 static void iommu_detach_domain(struct dmar_domain *domain, in iommu_detach_domain() argument
1596 if (domain_type_is_vm_or_si(domain)) { in iommu_detach_domain()
1599 if (iommu->domains[num] == domain) { in iommu_detach_domain()
1606 clear_bit(domain->id, iommu->domain_ids); in iommu_detach_domain()
1607 iommu->domains[domain->id] = NULL; in iommu_detach_domain()
1612 static void domain_attach_iommu(struct dmar_domain *domain, in domain_attach_iommu() argument
1617 spin_lock_irqsave(&domain->iommu_lock, flags); in domain_attach_iommu()
1618 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_attach_iommu()
1619 domain->iommu_count++; in domain_attach_iommu()
1620 if (domain->iommu_count == 1) in domain_attach_iommu()
1621 domain->nid = iommu->node; in domain_attach_iommu()
1622 domain_update_iommu_cap(domain); in domain_attach_iommu()
1624 spin_unlock_irqrestore(&domain->iommu_lock, flags); in domain_attach_iommu()
1627 static int domain_detach_iommu(struct dmar_domain *domain, in domain_detach_iommu() argument
1633 spin_lock_irqsave(&domain->iommu_lock, flags); in domain_detach_iommu()
1634 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) { in domain_detach_iommu()
1635 count = --domain->iommu_count; in domain_detach_iommu()
1636 domain_update_iommu_cap(domain); in domain_detach_iommu()
1638 spin_unlock_irqrestore(&domain->iommu_lock, flags); in domain_detach_iommu()
1686 static void domain_reserve_special_ranges(struct dmar_domain *domain) in domain_reserve_special_ranges() argument
1688 copy_reserved_iova(&reserved_iova_list, &domain->iovad); in domain_reserve_special_ranges()
1705 static int domain_init(struct dmar_domain *domain, int guest_width) in domain_init() argument
1711 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, in domain_init()
1713 domain_reserve_special_ranges(domain); in domain_init()
1716 iommu = domain_get_iommu(domain); in domain_init()
1719 domain->gaw = guest_width; in domain_init()
1730 domain->agaw = agaw; in domain_init()
1733 domain->iommu_coherency = 1; in domain_init()
1735 domain->iommu_coherency = 0; in domain_init()
1738 domain->iommu_snooping = 1; in domain_init()
1740 domain->iommu_snooping = 0; in domain_init()
1743 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1745 domain->iommu_superpage = 0; in domain_init()
1747 domain->nid = iommu->node; in domain_init()
1750 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in domain_init()
1751 if (!domain->pgd) in domain_init()
1753 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1757 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
1764 if (!domain) in domain_exit()
1772 domain_remove_dev_info(domain); in domain_exit()
1775 put_iova_domain(&domain->iovad); in domain_exit()
1777 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
1782 if (domain_type_is_vm(domain) || in domain_exit()
1783 test_bit(iommu->seq_id, domain->iommu_bmp)) in domain_exit()
1784 iommu_detach_domain(domain, iommu); in domain_exit()
1789 free_domain_mem(domain); in domain_exit()
1792 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one() argument
1806 BUG_ON(!domain->pgd); in domain_context_mapping_one()
1821 id = domain->id; in domain_context_mapping_one()
1822 pgd = domain->pgd; in domain_context_mapping_one()
1824 if (domain_type_is_vm_or_si(domain)) { in domain_context_mapping_one()
1825 if (domain_type_is_vm(domain)) { in domain_context_mapping_one()
1826 id = iommu_attach_vm_domain(domain, iommu); in domain_context_mapping_one()
1839 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1852 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
1870 domain_flush_cache(domain, context, sizeof(*context)); in domain_context_mapping_one()
1890 domain_attach_iommu(domain, iommu); in domain_context_mapping_one()
1896 struct dmar_domain *domain; member
1906 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
1912 domain_context_mapping(struct dmar_domain *domain, struct device *dev, in domain_context_mapping() argument
1924 return domain_context_mapping_one(domain, iommu, bus, devfn, in domain_context_mapping()
1927 data.domain = domain; in domain_context_mapping()
1968 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
1976 support = domain->iommu_superpage; in hardware_largepage_caps()
1995 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
2005 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping()
2029 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); in __domain_mapping()
2031 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); in __domain_mapping()
2048 dma_pte_free_pagetable(domain, iov_pfn, end_pfn); in __domain_mapping()
2094 domain_flush_cache(domain, first_pte, in __domain_mapping()
2105 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_sg_mapping() argument
2109 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping()
2112 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_pfn_mapping() argument
2116 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); in domain_pfn_mapping()
2139 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
2145 list_for_each_entry_safe(info, tmp, &domain->devices, link) { in domain_remove_dev_info()
2152 if (domain_type_is_vm(domain)) { in domain_remove_dev_info()
2154 domain_detach_iommu(domain, info->iommu); in domain_remove_dev_info()
2174 return info->domain; in find_domain()
2194 struct dmar_domain *domain) in dmar_insert_dev_info() argument
2207 info->domain = domain; in dmar_insert_dev_info()
2217 found = info2->domain; in dmar_insert_dev_info()
2226 list_add(&info->link, &domain->devices); in dmar_insert_dev_info()
2232 return domain; in dmar_insert_dev_info()
2244 struct dmar_domain *domain, *tmp; in get_domain_for_dev() local
2251 domain = find_domain(dev); in get_domain_for_dev()
2252 if (domain) in get_domain_for_dev()
2253 return domain; in get_domain_for_dev()
2270 domain = info->domain; in get_domain_for_dev()
2280 domain = alloc_domain(0); in get_domain_for_dev()
2281 if (!domain) in get_domain_for_dev()
2283 domain->id = iommu_attach_domain(domain, iommu); in get_domain_for_dev()
2284 if (domain->id < 0) { in get_domain_for_dev()
2285 free_domain_mem(domain); in get_domain_for_dev()
2288 domain_attach_iommu(domain, iommu); in get_domain_for_dev()
2289 if (domain_init(domain, gaw)) { in get_domain_for_dev()
2290 domain_exit(domain); in get_domain_for_dev()
2297 dma_alias & 0xff, NULL, domain); in get_domain_for_dev()
2299 if (!tmp || tmp != domain) { in get_domain_for_dev()
2300 domain_exit(domain); in get_domain_for_dev()
2301 domain = tmp; in get_domain_for_dev()
2304 if (!domain) in get_domain_for_dev()
2309 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in get_domain_for_dev()
2311 if (!tmp || tmp != domain) { in get_domain_for_dev()
2312 domain_exit(domain); in get_domain_for_dev()
2313 domain = tmp; in get_domain_for_dev()
2316 return domain; in get_domain_for_dev()
2324 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2331 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), in iommu_domain_identity_map()
2338 start, end, domain->id); in iommu_domain_identity_map()
2343 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2345 return domain_pfn_mapping(domain, first_vpfn, first_vpfn, in iommu_domain_identity_map()
2354 struct dmar_domain *domain; in iommu_prepare_identity_map() local
2357 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_prepare_identity_map()
2358 if (!domain) in iommu_prepare_identity_map()
2365 if (domain == si_domain && hw_pass_through) { in iommu_prepare_identity_map()
2385 if (end >> agaw_to_width(domain->agaw)) { in iommu_prepare_identity_map()
2388 agaw_to_width(domain->agaw), in iommu_prepare_identity_map()
2396 ret = iommu_domain_identity_map(domain, start, end); in iommu_prepare_identity_map()
2401 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); in iommu_prepare_identity_map()
2408 domain_exit(domain); in iommu_prepare_identity_map()
2447 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2510 return (info->domain == si_domain); in identity_mapping()
2515 static int domain_add_dev_info(struct dmar_domain *domain, in domain_add_dev_info() argument
2527 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2528 if (ndomain != domain) in domain_add_dev_info()
2531 ret = domain_context_mapping(domain, dev, translation); in domain_add_dev_info()
2533 domain_remove_one_dev_info(domain, dev); in domain_add_dev_info()
2933 struct dmar_domain *domain, in intel_alloc_iova() argument
2939 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); in intel_alloc_iova()
2947 iova = alloc_iova(&domain->iovad, nrpages, in intel_alloc_iova()
2952 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); in intel_alloc_iova()
2964 struct dmar_domain *domain; in __get_valid_domain_for_dev() local
2967 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); in __get_valid_domain_for_dev()
2968 if (!domain) { in __get_valid_domain_for_dev()
2976 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); in __get_valid_domain_for_dev()
2984 return domain; in __get_valid_domain_for_dev()
2994 return info->domain; in get_valid_domain_for_dev()
3049 struct dmar_domain *domain; in __intel_map_single() local
3062 domain = get_valid_domain_for_dev(dev); in __intel_map_single()
3063 if (!domain) in __intel_map_single()
3066 iommu = domain_get_iommu(domain); in __intel_map_single()
3069 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); in __intel_map_single()
3088 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), in __intel_map_single()
3095 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); in __intel_map_single()
3105 __free_iova(&domain->iovad, iova); in __intel_map_single()
3142 struct dmar_domain *domain = deferred_flush[i].domain[j]; in flush_unmaps() local
3146 iommu_flush_iotlb_psi(iommu, domain->id, in flush_unmaps()
3151 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], in flush_unmaps()
3154 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); in flush_unmaps()
3187 deferred_flush[iommu_id].domain[next] = dom; in add_unmap()
3202 struct dmar_domain *domain; in intel_unmap() local
3211 domain = find_domain(dev); in intel_unmap()
3212 BUG_ON(!domain); in intel_unmap()
3214 iommu = domain_get_iommu(domain); in intel_unmap()
3216 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); in intel_unmap()
3227 freelist = domain_unmap(domain, start_pfn, last_pfn); in intel_unmap()
3230 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, in intel_unmap()
3233 __free_iova(&domain->iovad, iova); in intel_unmap()
3236 add_unmap(domain, iova, freelist); in intel_unmap()
3337 struct dmar_domain *domain; in intel_map_sg() local
3350 domain = get_valid_domain_for_dev(dev); in intel_map_sg()
3351 if (!domain) in intel_map_sg()
3354 iommu = domain_get_iommu(domain); in intel_map_sg()
3359 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), in intel_map_sg()
3378 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); in intel_map_sg()
3380 dma_pte_free_pagetable(domain, start_vpfn, in intel_map_sg()
3382 __free_iova(&domain->iovad, iova); in intel_map_sg()
3388 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); in intel_map_sg()
4024 struct dmar_domain *domain; in device_notifier() local
4032 domain = find_domain(dev); in device_notifier()
4033 if (!domain) in device_notifier()
4037 domain_remove_one_dev_info(domain, dev); in device_notifier()
4038 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) in device_notifier()
4039 domain_exit(domain); in device_notifier()
4291 static void domain_remove_one_dev_info(struct dmar_domain *domain, in domain_remove_one_dev_info() argument
4305 list_for_each_entry_safe(info, tmp, &domain->devices, link) { in domain_remove_one_dev_info()
4335 domain_detach_iommu(domain, iommu); in domain_remove_one_dev_info()
4336 if (!domain_type_is_vm_or_si(domain)) in domain_remove_one_dev_info()
4337 iommu_detach_domain(domain, iommu); in domain_remove_one_dev_info()
4341 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
4345 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, in md_domain_init()
4347 domain_reserve_special_ranges(domain); in md_domain_init()
4350 domain->gaw = guest_width; in md_domain_init()
4352 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
4354 domain->iommu_coherency = 0; in md_domain_init()
4355 domain->iommu_snooping = 0; in md_domain_init()
4356 domain->iommu_superpage = 0; in md_domain_init()
4357 domain->max_addr = 0; in md_domain_init()
4360 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in md_domain_init()
4361 if (!domain->pgd) in md_domain_init()
4363 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
4370 struct iommu_domain *domain; in intel_iommu_domain_alloc() local
4389 domain = &dmar_domain->domain; in intel_iommu_domain_alloc()
4390 domain->geometry.aperture_start = 0; in intel_iommu_domain_alloc()
4391 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); in intel_iommu_domain_alloc()
4392 domain->geometry.force_aperture = true; in intel_iommu_domain_alloc()
4394 return domain; in intel_iommu_domain_alloc()
4397 static void intel_iommu_domain_free(struct iommu_domain *domain) in intel_iommu_domain_free() argument
4399 domain_exit(to_dmar_domain(domain)); in intel_iommu_domain_free()
4402 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
4405 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_attach_device()
4467 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
4470 domain_remove_one_dev_info(to_dmar_domain(domain), dev); in intel_iommu_detach_device()
4473 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
4477 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_map()
4511 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
4514 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_unmap()
4559 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
4562 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iova_to_phys()