Lines Matching refs:domain
99 struct protection_domain *domain; /* Domain the device is bound to */ member
121 static void update_domain(struct protection_domain *domain);
132 return container_of(dom, struct protection_domain, domain); in to_pdomain()
1171 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1178 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1181 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1191 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1202 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1205 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1209 static void domain_flush_tlb(struct protection_domain *domain) in domain_flush_tlb() argument
1211 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); in domain_flush_tlb()
1215 static void domain_flush_tlb_pde(struct protection_domain *domain) in domain_flush_tlb_pde() argument
1217 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in domain_flush_tlb_pde()
1220 static void domain_flush_complete(struct protection_domain *domain) in domain_flush_complete() argument
1225 if (!domain->dev_iommu[i]) in domain_flush_complete()
1240 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1244 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1260 static bool increase_address_space(struct protection_domain *domain, in increase_address_space() argument
1265 if (domain->mode == PAGE_MODE_6_LEVEL) in increase_address_space()
1273 *pte = PM_LEVEL_PDE(domain->mode, in increase_address_space()
1274 virt_to_phys(domain->pt_root)); in increase_address_space()
1275 domain->pt_root = pte; in increase_address_space()
1276 domain->mode += 1; in increase_address_space()
1277 domain->updated = true; in increase_address_space()
1282 static u64 *alloc_pte(struct protection_domain *domain, in alloc_pte() argument
1293 while (address > PM_LEVEL_SIZE(domain->mode)) in alloc_pte()
1294 increase_address_space(domain, gfp); in alloc_pte()
1296 level = domain->mode - 1; in alloc_pte()
1297 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in alloc_pte()
1330 static u64 *fetch_pte(struct protection_domain *domain, in fetch_pte() argument
1337 if (address > PM_LEVEL_SIZE(domain->mode)) in fetch_pte()
1340 level = domain->mode - 1; in fetch_pte()
1341 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in fetch_pte()
1496 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, in dma_ops_unity_map()
1625 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1674 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); in alloc_new_range()
1682 update_domain(&dma_dom->domain); in alloc_new_range()
1687 update_domain(&dma_dom->domain); in alloc_new_range()
1813 static void add_domain_to_list(struct protection_domain *domain) in add_domain_to_list() argument
1818 list_add(&domain->list, &amd_iommu_pd_list); in add_domain_to_list()
1826 static void del_domain_from_list(struct protection_domain *domain) in del_domain_from_list() argument
1831 list_del(&domain->list); in del_domain_from_list()
1893 static void free_pagetable(struct protection_domain *domain) in DEFINE_FREE_PT_FN()
1895 unsigned long root = (unsigned long)domain->pt_root; in DEFINE_FREE_PT_FN()
1897 switch (domain->mode) { in DEFINE_FREE_PT_FN()
1953 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1955 if (domain->glx == 2) in free_gcr3_table()
1956 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1957 else if (domain->glx == 1) in free_gcr3_table()
1958 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1959 else if (domain->glx != 0) in free_gcr3_table()
1962 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1976 del_domain_from_list(&dom->domain); in dma_ops_domain_free()
1978 free_pagetable(&dom->domain); in dma_ops_domain_free()
2003 spin_lock_init(&dma_dom->domain.lock); in dma_ops_domain_alloc()
2005 dma_dom->domain.id = domain_id_alloc(); in dma_ops_domain_alloc()
2006 if (dma_dom->domain.id == 0) in dma_ops_domain_alloc()
2008 INIT_LIST_HEAD(&dma_dom->domain.dev_list); in dma_ops_domain_alloc()
2009 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
2010 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
2011 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
2012 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
2013 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
2019 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
2044 static bool dma_ops_domain(struct protection_domain *domain) in dma_ops_domain() argument
2046 return domain->flags & PD_DMA_OPS_MASK; in dma_ops_domain()
2049 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) in set_dte_entry() argument
2054 if (domain->mode != PAGE_MODE_NONE) in set_dte_entry()
2055 pte_root = virt_to_phys(domain->pt_root); in set_dte_entry()
2057 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
2066 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
2067 u64 gcr3 = __pa(domain->gcr3_tbl); in set_dte_entry()
2068 u64 glx = domain->glx; in set_dte_entry()
2093 flags |= domain->id; in set_dte_entry()
2109 struct protection_domain *domain) in do_attach() argument
2118 dev_data->domain = domain; in do_attach()
2119 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2120 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
2123 domain->dev_iommu[iommu->index] += 1; in do_attach()
2124 domain->dev_cnt += 1; in do_attach()
2137 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
2138 dev_data->domain->dev_cnt -= 1; in do_detach()
2141 dev_data->domain = NULL; in do_detach()
2154 struct protection_domain *domain) in __attach_device() argument
2160 spin_lock(&domain->lock); in __attach_device()
2170 if (head->domain != NULL) in __attach_device()
2174 do_attach(head, domain); in __attach_device()
2178 do_attach(entry, domain); in __attach_device()
2185 spin_unlock(&domain->lock); in __attach_device()
2282 struct protection_domain *domain) in attach_device() argument
2291 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2308 ret = __attach_device(dev_data, domain); in attach_device()
2316 domain_flush_tlb_pde(domain); in attach_device()
2327 struct protection_domain *domain; in __detach_device() local
2330 BUG_ON(!dev_data->domain); in __detach_device()
2332 domain = dev_data->domain; in __detach_device()
2334 spin_lock_irqsave(&domain->lock, flags); in __detach_device()
2345 spin_unlock_irqrestore(&domain->lock, flags); in __detach_device()
2353 (dev_data->domain == NULL && domain != pt_domain)) in __detach_device()
2362 struct protection_domain *domain; in detach_device() local
2367 domain = dev_data->domain; in detach_device()
2374 if (domain->flags & PD_IOMMUV2_MASK) in detach_device()
2394 if (dev_data->domain) in domain_for_device()
2395 return dev_data->domain; in domain_for_device()
2401 if (alias_data->domain != NULL) { in domain_for_device()
2402 __attach_device(dev_data, alias_data->domain); in domain_for_device()
2403 dom = alias_data->domain; in domain_for_device()
2415 struct protection_domain *domain; in device_change_notifier() local
2447 domain = domain_for_device(dev); in device_change_notifier()
2503 struct protection_domain *domain; in get_domain() local
2510 domain = domain_for_device(dev); in get_domain()
2511 if (domain != NULL && !dma_ops_domain(domain)) in get_domain()
2514 if (domain != NULL) in get_domain()
2515 return domain; in get_domain()
2521 attach_device(dev, &dma_dom->domain); in get_domain()
2523 dma_dom->domain.id, dev_name(dev)); in get_domain()
2525 return &dma_dom->domain; in get_domain()
2528 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
2532 list_for_each_entry(dev_data, &domain->dev_list, list) in update_device_table()
2533 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
2536 static void update_domain(struct protection_domain *domain) in update_domain() argument
2538 if (!domain->updated) in update_domain()
2541 update_device_table(domain); in update_domain()
2543 domain_flush_devices(domain); in update_domain()
2544 domain_flush_tlb_pde(domain); in update_domain()
2546 domain->updated = false; in update_domain()
2564 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, in dma_ops_get_pte()
2570 update_domain(&dom->domain); in dma_ops_get_pte()
2703 domain_flush_tlb(&dma_dom->domain); in __map_single()
2706 domain_flush_pages(&dma_dom->domain, address, size); in __map_single()
2755 domain_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
2769 struct protection_domain *domain; in map_page() local
2776 domain = get_domain(dev); in map_page()
2777 if (PTR_ERR(domain) == -EINVAL) in map_page()
2779 else if (IS_ERR(domain)) in map_page()
2784 spin_lock_irqsave(&domain->lock, flags); in map_page()
2786 addr = __map_single(dev, domain->priv, paddr, size, dir, false, in map_page()
2791 domain_flush_complete(domain); in map_page()
2794 spin_unlock_irqrestore(&domain->lock, flags); in map_page()
2806 struct protection_domain *domain; in unmap_page() local
2810 domain = get_domain(dev); in unmap_page()
2811 if (IS_ERR(domain)) in unmap_page()
2814 spin_lock_irqsave(&domain->lock, flags); in unmap_page()
2816 __unmap_single(domain->priv, dma_addr, size, dir); in unmap_page()
2818 domain_flush_complete(domain); in unmap_page()
2820 spin_unlock_irqrestore(&domain->lock, flags); in unmap_page()
2832 struct protection_domain *domain; in map_sg() local
2841 domain = get_domain(dev); in map_sg()
2842 if (IS_ERR(domain)) in map_sg()
2847 spin_lock_irqsave(&domain->lock, flags); in map_sg()
2852 s->dma_address = __map_single(dev, domain->priv, in map_sg()
2863 domain_flush_complete(domain); in map_sg()
2866 spin_unlock_irqrestore(&domain->lock, flags); in map_sg()
2872 __unmap_single(domain->priv, s->dma_address, in map_sg()
2891 struct protection_domain *domain; in unmap_sg() local
2897 domain = get_domain(dev); in unmap_sg()
2898 if (IS_ERR(domain)) in unmap_sg()
2901 spin_lock_irqsave(&domain->lock, flags); in unmap_sg()
2904 __unmap_single(domain->priv, s->dma_address, in unmap_sg()
2909 domain_flush_complete(domain); in unmap_sg()
2911 spin_unlock_irqrestore(&domain->lock, flags); in unmap_sg()
2922 struct protection_domain *domain; in alloc_coherent() local
2928 domain = get_domain(dev); in alloc_coherent()
2929 if (PTR_ERR(domain) == -EINVAL) { in alloc_coherent()
2933 } else if (IS_ERR(domain)) in alloc_coherent()
2955 spin_lock_irqsave(&domain->lock, flags); in alloc_coherent()
2957 *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), in alloc_coherent()
2961 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2965 domain_flush_complete(domain); in alloc_coherent()
2967 spin_unlock_irqrestore(&domain->lock, flags); in alloc_coherent()
2986 struct protection_domain *domain; in free_coherent() local
2995 domain = get_domain(dev); in free_coherent()
2996 if (IS_ERR(domain)) in free_coherent()
2999 spin_lock_irqsave(&domain->lock, flags); in free_coherent()
3001 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); in free_coherent()
3003 domain_flush_complete(domain); in free_coherent()
3005 spin_unlock_irqrestore(&domain->lock, flags); in free_coherent()
3063 attach_device(&dev->dev, &dma_dom->domain); in prealloc_protection_domains()
3128 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; in amd_iommu_init_dma_ops()
3177 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
3184 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
3185 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
3193 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
3195 if (!domain) in protection_domain_free()
3198 del_domain_from_list(domain); in protection_domain_free()
3200 if (domain->id) in protection_domain_free()
3201 domain_id_free(domain->id); in protection_domain_free()
3203 kfree(domain); in protection_domain_free()
3208 struct protection_domain *domain; in protection_domain_alloc() local
3210 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
3211 if (!domain) in protection_domain_alloc()
3214 spin_lock_init(&domain->lock); in protection_domain_alloc()
3215 mutex_init(&domain->api_lock); in protection_domain_alloc()
3216 domain->id = domain_id_alloc(); in protection_domain_alloc()
3217 if (!domain->id) in protection_domain_alloc()
3219 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_alloc()
3221 add_domain_to_list(domain); in protection_domain_alloc()
3223 return domain; in protection_domain_alloc()
3226 kfree(domain); in protection_domain_alloc()
3263 pdomain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
3264 pdomain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
3265 pdomain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
3267 return &pdomain->domain; in amd_iommu_domain_alloc()
3277 struct protection_domain *domain; in amd_iommu_domain_free() local
3282 domain = to_pdomain(dom); in amd_iommu_domain_free()
3284 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
3285 cleanup_domain(domain); in amd_iommu_domain_free()
3287 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
3289 if (domain->mode != PAGE_MODE_NONE) in amd_iommu_domain_free()
3290 free_pagetable(domain); in amd_iommu_domain_free()
3292 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
3293 free_gcr3_table(domain); in amd_iommu_domain_free()
3295 protection_domain_free(domain); in amd_iommu_domain_free()
3310 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3323 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
3337 if (dev_data->domain) in amd_iommu_attach_device()
3340 ret = attach_device(dev, domain); in amd_iommu_attach_device()
3350 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
3354 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_map()
3362 mutex_lock(&domain->api_lock); in amd_iommu_map()
3363 ret = iommu_map_page(domain, iova, paddr, prot, page_size); in amd_iommu_map()
3364 mutex_unlock(&domain->api_lock); in amd_iommu_map()
3372 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
3375 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_unmap()
3378 mutex_lock(&domain->api_lock); in amd_iommu_unmap()
3379 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3380 mutex_unlock(&domain->api_lock); in amd_iommu_unmap()
3382 domain_flush_tlb_pde(domain); in amd_iommu_unmap()
3390 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
3394 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_iova_to_phys()
3397 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
3487 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
3490 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
3493 domain->mode = PAGE_MODE_NONE; in amd_iommu_domain_direct_map()
3494 domain->updated = true; in amd_iommu_domain_direct_map()
3497 update_domain(domain); in amd_iommu_domain_direct_map()
3500 free_pagetable(domain); in amd_iommu_domain_direct_map()
3502 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
3508 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
3522 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3530 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
3534 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
3535 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
3538 domain->glx = levels; in amd_iommu_domain_enable_v2()
3539 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
3540 domain->updated = true; in amd_iommu_domain_enable_v2()
3542 update_domain(domain); in amd_iommu_domain_enable_v2()
3547 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3553 static int __flush_pasid(struct protection_domain *domain, int pasid, in __flush_pasid() argument
3560 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
3563 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
3570 if (domain->dev_iommu[i] == 0) in __flush_pasid()
3579 domain_flush_complete(domain); in __flush_pasid()
3582 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3600 domain_flush_complete(domain); in __flush_pasid()
3609 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, in __amd_iommu_flush_page() argument
3614 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
3620 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
3624 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
3625 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
3626 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
3632 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) in __amd_iommu_flush_tlb() argument
3636 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
3642 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
3646 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
3647 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
3648 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3686 static int __set_gcr3(struct protection_domain *domain, int pasid, in __set_gcr3() argument
3691 if (domain->mode != PAGE_MODE_NONE) in __set_gcr3()
3694 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3700 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
3703 static int __clear_gcr3(struct protection_domain *domain, int pasid) in __clear_gcr3() argument
3707 if (domain->mode != PAGE_MODE_NONE) in __clear_gcr3()
3710 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3716 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
3722 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
3726 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3727 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
3728 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3736 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
3740 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3741 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
3742 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3779 return &pdomain->domain; in amd_iommu_get_v2_domain()
4151 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); in set_affinity()