Lines Matching refs:domain
92 struct protection_domain *domain; /* Domain the device is bound to */ member
115 static void update_domain(struct protection_domain *domain);
116 static int protection_domain_init(struct protection_domain *domain);
126 return container_of(dom, struct protection_domain, domain); in to_pdomain()
344 struct iommu_domain *domain; in init_iommu_group() local
351 domain = iommu_group_default_domain(group); in init_iommu_group()
352 if (!domain) in init_iommu_group()
355 dma_domain = to_pdomain(domain)->priv; in init_iommu_group()
1104 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1111 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1114 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1124 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1135 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1138 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1142 static void domain_flush_tlb(struct protection_domain *domain) in domain_flush_tlb() argument
1144 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); in domain_flush_tlb()
1148 static void domain_flush_tlb_pde(struct protection_domain *domain) in domain_flush_tlb_pde() argument
1150 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in domain_flush_tlb_pde()
1153 static void domain_flush_complete(struct protection_domain *domain) in domain_flush_complete() argument
1158 if (!domain->dev_iommu[i]) in domain_flush_complete()
1173 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1177 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1193 static bool increase_address_space(struct protection_domain *domain, in increase_address_space() argument
1198 if (domain->mode == PAGE_MODE_6_LEVEL) in increase_address_space()
1206 *pte = PM_LEVEL_PDE(domain->mode, in increase_address_space()
1207 virt_to_phys(domain->pt_root)); in increase_address_space()
1208 domain->pt_root = pte; in increase_address_space()
1209 domain->mode += 1; in increase_address_space()
1210 domain->updated = true; in increase_address_space()
1215 static u64 *alloc_pte(struct protection_domain *domain, in alloc_pte() argument
1226 while (address > PM_LEVEL_SIZE(domain->mode)) in alloc_pte()
1227 increase_address_space(domain, gfp); in alloc_pte()
1229 level = domain->mode - 1; in alloc_pte()
1230 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in alloc_pte()
1263 static u64 *fetch_pte(struct protection_domain *domain, in fetch_pte() argument
1270 if (address > PM_LEVEL_SIZE(domain->mode)) in fetch_pte()
1273 level = domain->mode - 1; in fetch_pte()
1274 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in fetch_pte()
1470 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1519 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); in alloc_new_range()
1527 update_domain(&dma_dom->domain); in alloc_new_range()
1532 update_domain(&dma_dom->domain); in alloc_new_range()
1660 static void add_domain_to_list(struct protection_domain *domain) in add_domain_to_list() argument
1665 list_add(&domain->list, &amd_iommu_pd_list); in add_domain_to_list()
1673 static void del_domain_from_list(struct protection_domain *domain) in del_domain_from_list() argument
1678 list_del(&domain->list); in del_domain_from_list()
1740 static void free_pagetable(struct protection_domain *domain) in DEFINE_FREE_PT_FN()
1742 unsigned long root = (unsigned long)domain->pt_root; in DEFINE_FREE_PT_FN()
1744 switch (domain->mode) { in DEFINE_FREE_PT_FN()
1800 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1802 if (domain->glx == 2) in free_gcr3_table()
1803 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1804 else if (domain->glx == 1) in free_gcr3_table()
1805 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1807 BUG_ON(domain->glx != 0); in free_gcr3_table()
1809 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1823 del_domain_from_list(&dom->domain); in dma_ops_domain_free()
1825 free_pagetable(&dom->domain); in dma_ops_domain_free()
1850 if (protection_domain_init(&dma_dom->domain)) in dma_ops_domain_alloc()
1853 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
1854 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
1855 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
1856 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
1857 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
1862 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
1887 static bool dma_ops_domain(struct protection_domain *domain) in dma_ops_domain() argument
1889 return domain->flags & PD_DMA_OPS_MASK; in dma_ops_domain()
1892 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) in set_dte_entry() argument
1897 if (domain->mode != PAGE_MODE_NONE) in set_dte_entry()
1898 pte_root = virt_to_phys(domain->pt_root); in set_dte_entry()
1900 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1909 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1910 u64 gcr3 = __pa(domain->gcr3_tbl); in set_dte_entry()
1911 u64 glx = domain->glx; in set_dte_entry()
1936 flags |= domain->id; in set_dte_entry()
1952 struct protection_domain *domain) in do_attach() argument
1963 dev_data->domain = domain; in do_attach()
1964 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1967 domain->dev_iommu[iommu->index] += 1; in do_attach()
1968 domain->dev_cnt += 1; in do_attach()
1971 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
1973 set_dte_entry(alias, domain, ats); in do_attach()
1989 if (!dev_data->domain) in do_detach()
1996 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
1997 dev_data->domain->dev_cnt -= 1; in do_detach()
2000 dev_data->domain = NULL; in do_detach()
2015 struct protection_domain *domain) in __attach_device() argument
2026 spin_lock(&domain->lock); in __attach_device()
2029 if (dev_data->domain != NULL) in __attach_device()
2033 do_attach(dev_data, domain); in __attach_device()
2040 spin_unlock(&domain->lock); in __attach_device()
2137 struct protection_domain *domain) in attach_device() argument
2146 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2165 ret = __attach_device(dev_data, domain); in attach_device()
2173 domain_flush_tlb_pde(domain); in attach_device()
2183 struct protection_domain *domain; in __detach_device() local
2191 if (WARN_ON(!dev_data->domain)) in __detach_device()
2194 domain = dev_data->domain; in __detach_device()
2196 spin_lock(&domain->lock); in __detach_device()
2200 spin_unlock(&domain->lock); in __detach_device()
2208 struct protection_domain *domain; in detach_device() local
2213 domain = dev_data->domain; in detach_device()
2220 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2231 struct iommu_domain *domain; in amd_iommu_add_device() local
2262 domain = iommu_get_domain_for_dev(dev); in amd_iommu_add_device()
2263 if (domain->type == IOMMU_DOMAIN_IDENTITY) in amd_iommu_add_device()
2304 struct protection_domain *domain; in get_domain() local
2314 domain = to_pdomain(io_domain); in get_domain()
2315 if (!dma_ops_domain(domain)) in get_domain()
2318 return domain; in get_domain()
2321 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
2325 list_for_each_entry(dev_data, &domain->dev_list, list) in update_device_table()
2326 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
2329 static void update_domain(struct protection_domain *domain) in update_domain() argument
2331 if (!domain->updated) in update_domain()
2334 update_device_table(domain); in update_domain()
2336 domain_flush_devices(domain); in update_domain()
2337 domain_flush_tlb_pde(domain); in update_domain()
2339 domain->updated = false; in update_domain()
2357 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, in dma_ops_get_pte()
2363 update_domain(&dom->domain); in dma_ops_get_pte()
2496 domain_flush_tlb(&dma_dom->domain); in __map_single()
2499 domain_flush_pages(&dma_dom->domain, address, size); in __map_single()
2548 domain_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
2562 struct protection_domain *domain; in map_page() local
2569 domain = get_domain(dev); in map_page()
2570 if (PTR_ERR(domain) == -EINVAL) in map_page()
2572 else if (IS_ERR(domain)) in map_page()
2577 spin_lock_irqsave(&domain->lock, flags); in map_page()
2579 addr = __map_single(dev, domain->priv, paddr, size, dir, false, in map_page()
2584 domain_flush_complete(domain); in map_page()
2587 spin_unlock_irqrestore(&domain->lock, flags); in map_page()
2599 struct protection_domain *domain; in unmap_page() local
2603 domain = get_domain(dev); in unmap_page()
2604 if (IS_ERR(domain)) in unmap_page()
2607 spin_lock_irqsave(&domain->lock, flags); in unmap_page()
2609 __unmap_single(domain->priv, dma_addr, size, dir); in unmap_page()
2611 domain_flush_complete(domain); in unmap_page()
2613 spin_unlock_irqrestore(&domain->lock, flags); in unmap_page()
2625 struct protection_domain *domain; in map_sg() local
2634 domain = get_domain(dev); in map_sg()
2635 if (IS_ERR(domain)) in map_sg()
2640 spin_lock_irqsave(&domain->lock, flags); in map_sg()
2645 s->dma_address = __map_single(dev, domain->priv, in map_sg()
2656 domain_flush_complete(domain); in map_sg()
2659 spin_unlock_irqrestore(&domain->lock, flags); in map_sg()
2665 __unmap_single(domain->priv, s->dma_address, in map_sg()
2684 struct protection_domain *domain; in unmap_sg() local
2690 domain = get_domain(dev); in unmap_sg()
2691 if (IS_ERR(domain)) in unmap_sg()
2694 spin_lock_irqsave(&domain->lock, flags); in unmap_sg()
2697 __unmap_single(domain->priv, s->dma_address, in unmap_sg()
2702 domain_flush_complete(domain); in unmap_sg()
2704 spin_unlock_irqrestore(&domain->lock, flags); in unmap_sg()
2715 struct protection_domain *domain; in alloc_coherent() local
2721 domain = get_domain(dev); in alloc_coherent()
2722 if (PTR_ERR(domain) == -EINVAL) { in alloc_coherent()
2726 } else if (IS_ERR(domain)) in alloc_coherent()
2748 spin_lock_irqsave(&domain->lock, flags); in alloc_coherent()
2750 *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), in alloc_coherent()
2754 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2758 domain_flush_complete(domain); in alloc_coherent()
2760 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2779 struct protection_domain *domain; in free_coherent() local
2788 domain = get_domain(dev); in free_coherent()
2789 if (IS_ERR(domain)) in free_coherent()
2792 spin_lock_irqsave(&domain->lock, flags); in free_coherent()
2794 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); in free_coherent()
2796 domain_flush_complete(domain); in free_coherent()
2798 spin_unlock_irqrestore(&domain->lock, flags); in free_coherent()
2863 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
2870 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2871 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2879 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
2881 if (!domain) in protection_domain_free()
2884 del_domain_from_list(domain); in protection_domain_free()
2886 if (domain->id) in protection_domain_free()
2887 domain_id_free(domain->id); in protection_domain_free()
2889 kfree(domain); in protection_domain_free()
2892 static int protection_domain_init(struct protection_domain *domain) in protection_domain_init() argument
2894 spin_lock_init(&domain->lock); in protection_domain_init()
2895 mutex_init(&domain->api_lock); in protection_domain_init()
2896 domain->id = domain_id_alloc(); in protection_domain_init()
2897 if (!domain->id) in protection_domain_init()
2899 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2906 struct protection_domain *domain; in protection_domain_alloc() local
2908 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
2909 if (!domain) in protection_domain_alloc()
2912 if (protection_domain_init(domain)) in protection_domain_alloc()
2915 add_domain_to_list(domain); in protection_domain_alloc()
2917 return domain; in protection_domain_alloc()
2920 kfree(domain); in protection_domain_alloc()
2943 pdomain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
2944 pdomain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
2945 pdomain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
2954 pdomain = &dma_domain->domain; in amd_iommu_domain_alloc()
2967 return &pdomain->domain; in amd_iommu_domain_alloc()
2972 struct protection_domain *domain; in amd_iommu_domain_free() local
2977 domain = to_pdomain(dom); in amd_iommu_domain_free()
2979 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
2980 cleanup_domain(domain); in amd_iommu_domain_free()
2982 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
2984 if (domain->mode != PAGE_MODE_NONE) in amd_iommu_domain_free()
2985 free_pagetable(domain); in amd_iommu_domain_free()
2987 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
2988 free_gcr3_table(domain); in amd_iommu_domain_free()
2990 protection_domain_free(domain); in amd_iommu_domain_free()
3005 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3018 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
3032 if (dev_data->domain) in amd_iommu_attach_device()
3035 ret = attach_device(dev, domain); in amd_iommu_attach_device()
3045 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
3049 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_map()
3057 mutex_lock(&domain->api_lock); in amd_iommu_map()
3058 ret = iommu_map_page(domain, iova, paddr, prot, page_size); in amd_iommu_map()
3059 mutex_unlock(&domain->api_lock); in amd_iommu_map()
3067 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
3070 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_unmap()
3073 mutex_lock(&domain->api_lock); in amd_iommu_unmap()
3074 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3075 mutex_unlock(&domain->api_lock); in amd_iommu_unmap()
3077 domain_flush_tlb_pde(domain); in amd_iommu_unmap()
3085 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
3089 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_iova_to_phys()
3092 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
3201 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
3204 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
3207 domain->mode = PAGE_MODE_NONE; in amd_iommu_domain_direct_map()
3208 domain->updated = true; in amd_iommu_domain_direct_map()
3211 update_domain(domain); in amd_iommu_domain_direct_map()
3214 free_pagetable(domain); in amd_iommu_domain_direct_map()
3216 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
3222 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
3236 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3244 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
3248 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
3249 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
3252 domain->glx = levels; in amd_iommu_domain_enable_v2()
3253 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
3254 domain->updated = true; in amd_iommu_domain_enable_v2()
3256 update_domain(domain); in amd_iommu_domain_enable_v2()
3261 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3267 static int __flush_pasid(struct protection_domain *domain, int pasid, in __flush_pasid() argument
3274 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
3277 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
3284 if (domain->dev_iommu[i] == 0) in __flush_pasid()
3293 domain_flush_complete(domain); in __flush_pasid()
3296 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3319 domain_flush_complete(domain); in __flush_pasid()
3328 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, in __amd_iommu_flush_page() argument
3333 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
3339 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
3343 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
3344 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
3345 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
3351 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) in __amd_iommu_flush_tlb() argument
3355 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
3361 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
3365 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
3366 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
3367 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3405 static int __set_gcr3(struct protection_domain *domain, int pasid, in __set_gcr3() argument
3410 if (domain->mode != PAGE_MODE_NONE) in __set_gcr3()
3413 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3419 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
3422 static int __clear_gcr3(struct protection_domain *domain, int pasid) in __clear_gcr3() argument
3426 if (domain->mode != PAGE_MODE_NONE) in __clear_gcr3()
3429 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3435 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
3441 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
3445 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3446 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
3447 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3455 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
3459 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3460 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
3461 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3498 return &pdomain->domain; in amd_iommu_get_v2_domain()
3898 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, in irq_remapping_alloc() argument
3925 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in irq_remapping_alloc()
3943 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3966 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3973 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_alloc()
3977 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, in irq_remapping_free() argument
3986 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_free()
3994 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_free()
3997 static void irq_remapping_activate(struct irq_domain *domain, in irq_remapping_activate() argument
4006 static void irq_remapping_deactivate(struct irq_domain *domain, in irq_remapping_deactivate() argument