Lines Matching refs:domain
199 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) in ipmmu_ctx_read() argument
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read()
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, in ipmmu_ctx_write() argument
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write()
215 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
219 while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
230 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
234 reg = ipmmu_ctx_read(domain, IMCTR); in ipmmu_tlb_invalidate()
236 ipmmu_ctx_write(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
238 ipmmu_tlb_sync(domain); in ipmmu_tlb_invalidate()
244 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_enable() argument
247 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable()
258 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | in ipmmu_utlb_enable()
265 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_disable() argument
268 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable()
275 struct ipmmu_vmsa_domain *domain = cookie; in ipmmu_tlb_flush_all() local
277 ipmmu_tlb_invalidate(domain); in ipmmu_tlb_flush_all()
289 struct ipmmu_vmsa_domain *domain = cookie; in ipmmu_flush_pgtable() local
295 dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size, in ipmmu_flush_pgtable()
310 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_init_context() argument
325 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; in ipmmu_domain_init_context()
326 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, in ipmmu_domain_init_context()
327 domain->cfg.ias = 32; in ipmmu_domain_init_context()
328 domain->cfg.oas = 40; in ipmmu_domain_init_context()
329 domain->cfg.tlb = &ipmmu_gather_ops; in ipmmu_domain_init_context()
331 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, in ipmmu_domain_init_context()
332 domain); in ipmmu_domain_init_context()
333 if (!domain->iop) in ipmmu_domain_init_context()
340 domain->context_id = 0; in ipmmu_domain_init_context()
343 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; in ipmmu_domain_init_context()
344 ipmmu_ctx_write(domain, IMTTLBR0, ttbr); in ipmmu_domain_init_context()
345 ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); in ipmmu_domain_init_context()
352 ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | in ipmmu_domain_init_context()
357 ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); in ipmmu_domain_init_context()
360 ipmmu_ctx_write(domain, IMBUSCR, in ipmmu_domain_init_context()
361 ipmmu_ctx_read(domain, IMBUSCR) & in ipmmu_domain_init_context()
368 ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); in ipmmu_domain_init_context()
377 ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); in ipmmu_domain_init_context()
382 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_destroy_context() argument
390 ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); in ipmmu_domain_destroy_context()
391 ipmmu_tlb_sync(domain); in ipmmu_domain_destroy_context()
398 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_irq() argument
401 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq()
405 status = ipmmu_ctx_read(domain, IMSTR); in ipmmu_domain_irq()
409 iova = ipmmu_ctx_read(domain, IMEAR); in ipmmu_domain_irq()
417 ipmmu_ctx_write(domain, IMSTR, 0); in ipmmu_domain_irq()
436 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
450 struct ipmmu_vmsa_domain *domain; in ipmmu_irq() local
455 io_domain = mmu->mapping->domain; in ipmmu_irq()
456 domain = to_vmsa_domain(io_domain); in ipmmu_irq()
458 return ipmmu_domain_irq(domain); in ipmmu_irq()
467 struct ipmmu_vmsa_domain *domain; in ipmmu_domain_alloc() local
472 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in ipmmu_domain_alloc()
473 if (!domain) in ipmmu_domain_alloc()
476 spin_lock_init(&domain->lock); in ipmmu_domain_alloc()
478 return &domain->io_domain; in ipmmu_domain_alloc()
483 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_domain_free() local
489 ipmmu_domain_destroy_context(domain); in ipmmu_domain_free()
490 free_io_pgtable_ops(domain->iop); in ipmmu_domain_free()
491 kfree(domain); in ipmmu_domain_free()
499 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_attach_device() local
509 spin_lock_irqsave(&domain->lock, flags); in ipmmu_attach_device()
511 if (!domain->mmu) { in ipmmu_attach_device()
513 domain->mmu = mmu; in ipmmu_attach_device()
514 ret = ipmmu_domain_init_context(domain); in ipmmu_attach_device()
515 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
521 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
525 spin_unlock_irqrestore(&domain->lock, flags); in ipmmu_attach_device()
531 ipmmu_utlb_enable(domain, archdata->utlbs[i]); in ipmmu_attach_device()
540 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_detach_device() local
544 ipmmu_utlb_disable(domain, archdata->utlbs[i]); in ipmmu_detach_device()
554 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_map() local
556 if (!domain) in ipmmu_map()
559 return domain->iop->map(domain->iop, iova, paddr, size, prot); in ipmmu_map()
565 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_unmap() local
567 return domain->iop->unmap(domain->iop, iova, size); in ipmmu_unmap()
573 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_iova_to_phys() local
577 return domain->iop->iova_to_phys(domain->iop, iova); in ipmmu_iova_to_phys()