Lines Matching refs:sh_domain
93 struct shmobile_iommu_domain *sh_domain; in shmobile_iommu_domain_alloc() local
99 sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL); in shmobile_iommu_domain_alloc()
100 if (!sh_domain) in shmobile_iommu_domain_alloc()
102 ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); in shmobile_iommu_domain_alloc()
104 kfree(sh_domain); in shmobile_iommu_domain_alloc()
108 sh_domain->l2[i].pgtable = NULL; in shmobile_iommu_domain_alloc()
109 spin_lock_init(&sh_domain->map_lock); in shmobile_iommu_domain_alloc()
110 spin_lock_init(&sh_domain->attached_list_lock); in shmobile_iommu_domain_alloc()
111 INIT_LIST_HEAD(&sh_domain->attached_list); in shmobile_iommu_domain_alloc()
112 return &sh_domain->domain; in shmobile_iommu_domain_alloc()
117 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_domain_free() local
121 if (sh_domain->l2[i].pgtable) in shmobile_iommu_domain_free()
122 pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE); in shmobile_iommu_domain_free()
124 pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); in shmobile_iommu_domain_free()
125 kfree(sh_domain); in shmobile_iommu_domain_free()
132 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_attach_device() local
137 spin_lock(&sh_domain->attached_list_lock); in shmobile_iommu_attach_device()
139 if (archdata->attached != sh_domain) { in shmobile_iommu_attach_device()
142 ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE, in shmobile_iommu_attach_device()
145 archdata->attached = sh_domain; in shmobile_iommu_attach_device()
147 list_add(&archdata->attached_list, &sh_domain->attached_list); in shmobile_iommu_attach_device()
153 spin_unlock(&sh_domain->attached_list_lock); in shmobile_iommu_attach_device()
161 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_detach_device() local
165 spin_lock(&sh_domain->attached_list_lock); in shmobile_iommu_detach_device()
175 spin_unlock(&sh_domain->attached_list_lock); in shmobile_iommu_detach_device()
178 static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain) in domain_tlb_flush() argument
182 spin_lock(&sh_domain->attached_list_lock); in domain_tlb_flush()
183 list_for_each_entry(archdata, &sh_domain->attached_list, attached_list) in domain_tlb_flush()
185 spin_unlock(&sh_domain->attached_list_lock); in domain_tlb_flush()
188 static int l2alloc(struct shmobile_iommu_domain *sh_domain, in l2alloc() argument
193 if (!sh_domain->l2[l1index].pgtable) { in l2alloc()
194 ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE); in l2alloc()
198 pgtable_write(&sh_domain->l1, l1index, 1, in l2alloc()
199 sh_domain->l2[l1index].handle | 0x1); in l2alloc()
209 static void l2free(struct shmobile_iommu_domain *sh_domain, in l2free() argument
213 pgtable_write(&sh_domain->l1, l1index, 1, 0); in l2free()
214 if (sh_domain->l2[l1index].pgtable) { in l2free()
215 *l2 = sh_domain->l2[l1index]; in l2free()
216 sh_domain->l2[l1index].pgtable = NULL; in l2free()
224 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_map() local
232 spin_lock(&sh_domain->map_lock); in shmobile_iommu_map()
233 ret = l2alloc(sh_domain, l1index); in shmobile_iommu_map()
235 pgtable_write(&sh_domain->l2[l1index], l2index, 1, in shmobile_iommu_map()
237 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_map()
241 spin_lock(&sh_domain->map_lock); in shmobile_iommu_map()
242 ret = l2alloc(sh_domain, l1index); in shmobile_iommu_map()
244 pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, in shmobile_iommu_map()
246 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_map()
249 spin_lock(&sh_domain->map_lock); in shmobile_iommu_map()
250 l2free(sh_domain, l1index, &l2); in shmobile_iommu_map()
251 pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02); in shmobile_iommu_map()
252 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_map()
259 domain_tlb_flush(sh_domain); in shmobile_iommu_map()
268 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_unmap() local
275 spin_lock(&sh_domain->map_lock); in shmobile_iommu_unmap()
276 l2free(sh_domain, l1index, &l2); in shmobile_iommu_unmap()
277 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_unmap()
282 spin_lock(&sh_domain->map_lock); in shmobile_iommu_unmap()
283 if (sh_domain->l2[l1index].pgtable) in shmobile_iommu_unmap()
284 l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); in shmobile_iommu_unmap()
289 pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0); in shmobile_iommu_unmap()
293 pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0); in shmobile_iommu_unmap()
297 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_unmap()
300 domain_tlb_flush(sh_domain); in shmobile_iommu_unmap()
308 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_iova_to_phys() local
314 spin_lock(&sh_domain->map_lock); in shmobile_iommu_iova_to_phys()
315 if (sh_domain->l2[l1index].pgtable) in shmobile_iommu_iova_to_phys()
316 l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); in shmobile_iommu_iova_to_phys()
318 l1entry = pgtable_read(&sh_domain->l1, l1index); in shmobile_iommu_iova_to_phys()
319 spin_unlock(&sh_domain->map_lock); in shmobile_iommu_iova_to_phys()