Lines Matching refs:domain
55 struct iommu_domain *domain; member
82 static int __iommu_attach_device(struct iommu_domain *domain,
84 static int __iommu_attach_group(struct iommu_domain *domain,
86 static void __iommu_detach_group(struct iommu_domain *domain,
331 struct iommu_domain *domain = group->default_domain; in iommu_group_create_direct_mappings() local
337 if (!domain || domain->type != IOMMU_DOMAIN_DMA) in iommu_group_create_direct_mappings()
340 BUG_ON(!domain->ops->pgsize_bitmap); in iommu_group_create_direct_mappings()
342 pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); in iommu_group_create_direct_mappings()
357 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_group_create_direct_mappings()
361 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); in iommu_group_create_direct_mappings()
434 if (group->domain) in iommu_group_add_device()
435 __iommu_attach_device(group->domain, dev); in iommu_group_add_device()
851 if (!group->domain) in iommu_group_get_for_dev()
852 group->domain = group->default_domain; in iommu_group_get_for_dev()
1051 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
1055 BUG_ON(!domain); in iommu_set_fault_handler()
1057 domain->handler = handler; in iommu_set_fault_handler()
1058 domain->handler_token = token; in iommu_set_fault_handler()
1065 struct iommu_domain *domain; in __iommu_domain_alloc() local
1070 domain = bus->iommu_ops->domain_alloc(type); in __iommu_domain_alloc()
1071 if (!domain) in __iommu_domain_alloc()
1074 domain->ops = bus->iommu_ops; in __iommu_domain_alloc()
1075 domain->type = type; in __iommu_domain_alloc()
1077 return domain; in __iommu_domain_alloc()
1086 void iommu_domain_free(struct iommu_domain *domain) in iommu_domain_free() argument
1088 domain->ops->domain_free(domain); in iommu_domain_free()
1092 static int __iommu_attach_device(struct iommu_domain *domain, in __iommu_attach_device() argument
1096 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
1099 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
1105 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_attach_device() argument
1113 return __iommu_attach_device(domain, dev); in iommu_attach_device()
1124 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
1134 static void __iommu_detach_device(struct iommu_domain *domain, in __iommu_detach_device() argument
1137 if (unlikely(domain->ops->detach_dev == NULL)) in __iommu_detach_device()
1140 domain->ops->detach_dev(domain, dev); in __iommu_detach_device()
1144 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_detach_device() argument
1151 return __iommu_detach_device(domain, dev); in iommu_detach_device()
1159 __iommu_detach_group(domain, group); in iommu_detach_device()
1169 struct iommu_domain *domain; in iommu_get_domain_for_dev() local
1177 domain = group->domain; in iommu_get_domain_for_dev()
1181 return domain; in iommu_get_domain_for_dev()
1197 struct iommu_domain *domain = data; in iommu_group_do_attach_device() local
1199 return __iommu_attach_device(domain, dev); in iommu_group_do_attach_device()
1202 static int __iommu_attach_group(struct iommu_domain *domain, in __iommu_attach_group() argument
1207 if (group->default_domain && group->domain != group->default_domain) in __iommu_attach_group()
1210 ret = __iommu_group_for_each_dev(group, domain, in __iommu_attach_group()
1213 group->domain = domain; in __iommu_attach_group()
1218 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
1223 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
1232 struct iommu_domain *domain = data; in iommu_group_do_detach_device() local
1234 __iommu_detach_device(domain, dev); in iommu_group_do_detach_device()
1239 static void __iommu_detach_group(struct iommu_domain *domain, in __iommu_detach_group() argument
1245 __iommu_group_for_each_dev(group, domain, in __iommu_detach_group()
1247 group->domain = NULL; in __iommu_detach_group()
1251 if (group->domain == group->default_domain) in __iommu_detach_group()
1260 group->domain = group->default_domain; in __iommu_detach_group()
1263 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
1266 __iommu_detach_group(domain, group); in iommu_detach_group()
1271 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
1273 if (unlikely(domain->ops->iova_to_phys == NULL)) in iommu_iova_to_phys()
1276 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
1280 static size_t iommu_pgsize(struct iommu_domain *domain, in iommu_pgsize() argument
1300 pgsize &= domain->ops->pgsize_bitmap; in iommu_pgsize()
1312 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
1320 if (unlikely(domain->ops->map == NULL || in iommu_map()
1321 domain->ops->pgsize_bitmap == 0UL)) in iommu_map()
1324 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in iommu_map()
1328 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); in iommu_map()
1344 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); in iommu_map()
1349 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in iommu_map()
1360 iommu_unmap(domain, orig_iova, orig_size - size); in iommu_map()
1368 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) in iommu_unmap() argument
1374 if (unlikely(domain->ops->unmap == NULL || in iommu_unmap()
1375 domain->ops->pgsize_bitmap == 0UL)) in iommu_unmap()
1378 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in iommu_unmap()
1382 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); in iommu_unmap()
1402 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); in iommu_unmap()
1404 unmapped_page = domain->ops->unmap(domain, iova, pgsize); in iommu_unmap()
1420 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in default_iommu_map_sg() argument
1428 if (unlikely(domain->ops->pgsize_bitmap == 0UL)) in default_iommu_map_sg()
1431 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); in default_iommu_map_sg()
1445 ret = iommu_map(domain, iova + mapped, phys, s->length, prot); in default_iommu_map_sg()
1456 iommu_unmap(domain, iova, mapped); in default_iommu_map_sg()
1463 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, in iommu_domain_window_enable() argument
1466 if (unlikely(domain->ops->domain_window_enable == NULL)) in iommu_domain_window_enable()
1469 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, in iommu_domain_window_enable()
1474 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) in iommu_domain_window_disable() argument
1476 if (unlikely(domain->ops->domain_window_disable == NULL)) in iommu_domain_window_disable()
1479 return domain->ops->domain_window_disable(domain, wnd_nr); in iommu_domain_window_disable()
1496 int iommu_domain_get_attr(struct iommu_domain *domain, in iommu_domain_get_attr() argument
1507 *geometry = domain->geometry; in iommu_domain_get_attr()
1512 *paging = (domain->ops->pgsize_bitmap != 0UL); in iommu_domain_get_attr()
1517 if (domain->ops->domain_get_windows != NULL) in iommu_domain_get_attr()
1518 *count = domain->ops->domain_get_windows(domain); in iommu_domain_get_attr()
1524 if (!domain->ops->domain_get_attr) in iommu_domain_get_attr()
1527 ret = domain->ops->domain_get_attr(domain, attr, data); in iommu_domain_get_attr()
1534 int iommu_domain_set_attr(struct iommu_domain *domain, in iommu_domain_set_attr() argument
1544 if (domain->ops->domain_set_windows != NULL) in iommu_domain_set_attr()
1545 ret = domain->ops->domain_set_windows(domain, *count); in iommu_domain_set_attr()
1551 if (domain->ops->domain_set_attr == NULL) in iommu_domain_set_attr()
1554 ret = domain->ops->domain_set_attr(domain, attr, data); in iommu_domain_set_attr()