Lines Matching refs:dma_dom
1488 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, in dma_ops_unity_map() argument
1496 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, in dma_ops_unity_map()
1504 if (addr < dma_dom->aperture_size) in dma_ops_unity_map()
1506 dma_dom->aperture[0]->bitmap); in dma_ops_unity_map()
1537 static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, in init_unity_mappings_for_device() argument
1546 ret = dma_ops_unity_map(dma_dom, e); in init_unity_mappings_for_device()
1595 static int alloc_new_range(struct dma_ops_domain *dma_dom, in alloc_new_range() argument
1598 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; in alloc_new_range()
1609 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); in alloc_new_range()
1610 if (!dma_dom->aperture[index]) in alloc_new_range()
1613 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); in alloc_new_range()
1614 if (!dma_dom->aperture[index]->bitmap) in alloc_new_range()
1617 dma_dom->aperture[index]->offset = dma_dom->aperture_size; in alloc_new_range()
1620 unsigned long address = dma_dom->aperture_size; in alloc_new_range()
1625 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1630 dma_dom->aperture[index]->pte_pages[i] = pte_page; in alloc_new_range()
1636 old_size = dma_dom->aperture_size; in alloc_new_range()
1637 dma_dom->aperture_size += APERTURE_RANGE_SIZE; in alloc_new_range()
1641 dma_dom->aperture_size > MSI_ADDR_BASE_LO) { in alloc_new_range()
1648 dma_ops_reserve_addresses(dma_dom, spage, pages); in alloc_new_range()
1654 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1655 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1661 dma_ops_reserve_addresses(dma_dom, startpage, pages); in alloc_new_range()
1671 for (i = dma_dom->aperture[index]->offset; in alloc_new_range()
1672 i < dma_dom->aperture_size; in alloc_new_range()
1674 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); in alloc_new_range()
1678 dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, in alloc_new_range()
1682 update_domain(&dma_dom->domain); in alloc_new_range()
1687 update_domain(&dma_dom->domain); in alloc_new_range()
1689 free_page((unsigned long)dma_dom->aperture[index]->bitmap); in alloc_new_range()
1691 kfree(dma_dom->aperture[index]); in alloc_new_range()
1692 dma_dom->aperture[index] = NULL; in alloc_new_range()
1997 struct dma_ops_domain *dma_dom; in dma_ops_domain_alloc() local
1999 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); in dma_ops_domain_alloc()
2000 if (!dma_dom) in dma_ops_domain_alloc()
2003 spin_lock_init(&dma_dom->domain.lock); in dma_ops_domain_alloc()
2005 dma_dom->domain.id = domain_id_alloc(); in dma_ops_domain_alloc()
2006 if (dma_dom->domain.id == 0) in dma_ops_domain_alloc()
2008 INIT_LIST_HEAD(&dma_dom->domain.dev_list); in dma_ops_domain_alloc()
2009 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
2010 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
2011 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
2012 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
2013 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
2016 dma_dom->need_flush = false; in dma_ops_domain_alloc()
2017 dma_dom->target_dev = 0xffff; in dma_ops_domain_alloc()
2019 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
2021 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) in dma_ops_domain_alloc()
2028 dma_dom->aperture[0]->bitmap[0] = 1; in dma_ops_domain_alloc()
2029 dma_dom->next_address = 0; in dma_ops_domain_alloc()
2032 return dma_dom; in dma_ops_domain_alloc()
2035 dma_ops_domain_free(dma_dom); in dma_ops_domain_alloc()
2504 struct dma_ops_domain *dma_dom; in get_domain() local
2518 dma_dom = find_protection_domain(devid); in get_domain()
2519 if (!dma_dom) in get_domain()
2520 dma_dom = amd_iommu_rlookup_table[devid]->default_dom; in get_domain()
2521 attach_device(dev, &dma_dom->domain); in get_domain()
2523 dma_dom->domain.id, dev_name(dev)); in get_domain()
2525 return &dma_dom->domain; in get_domain()
2644 struct dma_ops_domain *dma_dom, in __map_single() argument
2669 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, in __map_single()
2677 dma_dom->next_address = dma_dom->aperture_size; in __map_single()
2679 if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) in __map_single()
2691 ret = dma_ops_domain_map(dma_dom, start, paddr, dir); in __map_single()
2702 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { in __map_single()
2703 domain_flush_tlb(&dma_dom->domain); in __map_single()
2704 dma_dom->need_flush = false; in __map_single()
2706 domain_flush_pages(&dma_dom->domain, address, size); in __map_single()
2715 dma_ops_domain_unmap(dma_dom, start); in __map_single()
2718 dma_ops_free_addresses(dma_dom, address, pages); in __map_single()
2727 static void __unmap_single(struct dma_ops_domain *dma_dom, in __unmap_single() argument
2737 (dma_addr + size > dma_dom->aperture_size)) in __unmap_single()
2746 dma_ops_domain_unmap(dma_dom, start); in __unmap_single()
2752 dma_ops_free_addresses(dma_dom, dma_addr, pages); in __unmap_single()
2754 if (amd_iommu_unmap_flush || dma_dom->need_flush) { in __unmap_single()
2755 domain_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
2756 dma_dom->need_flush = false; in __unmap_single()
3031 struct dma_ops_domain *dma_dom; in prealloc_protection_domains() local
3057 dma_dom = dma_ops_domain_alloc(); in prealloc_protection_domains()
3058 if (!dma_dom) in prealloc_protection_domains()
3060 init_unity_mappings_for_device(dma_dom, devid); in prealloc_protection_domains()
3061 dma_dom->target_dev = devid; in prealloc_protection_domains()
3063 attach_device(&dev->dev, &dma_dom->domain); in prealloc_protection_domains()
3065 list_add_tail(&dma_dom->list, &iommu_pd_list); in prealloc_protection_domains()