Lines Matching refs:iommu
21 struct intel_iommu *iommu; member
28 struct intel_iommu *iommu; member
73 if (unlikely(!irq_iommu->iommu)) { in get_irte()
79 *entry = *(irq_iommu->iommu->ir_table->base + index); in get_irte()
85 static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) in alloc_irte() argument
87 struct ir_table *table = iommu->ir_table; in alloc_irte()
102 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
106 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
114 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
117 irq_iommu->iommu = iommu; in alloc_irte()
127 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
135 return qi_submit_sync(&desc, iommu); in qi_flush_iec()
154 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) in set_irte_irq() argument
166 irq_iommu->iommu = iommu; in set_irte_irq()
179 struct intel_iommu *iommu; in modify_irte() local
189 iommu = irq_iommu->iommu; in modify_irte()
192 irte = &iommu->ir_table->base[index]; in modify_irte()
196 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
198 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
209 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_ir()
210 return ir_hpet[i].iommu; in map_hpet_to_ir()
219 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_ir()
220 return ir_ioapic[i].iommu; in map_ioapic_to_ir()
232 return drhd->iommu; in map_dev_to_ir()
238 struct intel_iommu *iommu; in clear_entries() local
244 iommu = irq_iommu->iommu; in clear_entries()
247 start = iommu->ir_table->base + index; in clear_entries()
254 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
257 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
273 irq_iommu->iommu = NULL; in free_irte()
328 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
355 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
427 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
433 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
435 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
437 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
441 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
443 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
445 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
451 qi_global_iec(iommu); in iommu_set_irq_remapping()
453 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
456 iommu->gcmd |= DMA_GCMD_IRE; in iommu_set_irq_remapping()
457 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ in iommu_set_irq_remapping()
458 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
460 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
473 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
476 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
482 if (iommu->ir_table) in intel_setup_irq_remapping()
489 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, in intel_setup_irq_remapping()
494 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
501 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
507 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
517 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
519 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
520 free_pages((unsigned long)iommu->ir_table->base, in intel_teardown_irq_remapping()
522 kfree(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
523 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
524 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
531 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
536 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
543 qi_global_iec(iommu); in iommu_disable_irq_remapping()
545 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
547 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
551 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
552 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
554 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
558 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
573 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
575 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
576 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
577 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
578 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
589 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
614 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
615 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
619 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
620 if (intel_setup_irq_remapping(iommu)) in intel_prepare_irq_remapping()
633 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
643 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
648 if (iommu->qi) in intel_enable_irq_remapping()
654 dmar_fault(-1, iommu); in intel_enable_irq_remapping()
660 iommu_disable_irq_remapping(iommu); in intel_enable_irq_remapping()
662 dmar_disable_qi(iommu); in intel_enable_irq_remapping()
668 for_each_iommu(iommu, drhd) in intel_enable_irq_remapping()
669 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_enable_irq_remapping()
671 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); in intel_enable_irq_remapping()
681 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
682 int ret = dmar_enable_qi(iommu); in intel_enable_irq_remapping()
687 drhd->reg_base_addr, iommu->ecap, ret); in intel_enable_irq_remapping()
695 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
696 iommu_set_irq_remapping(iommu, eim); in intel_enable_irq_remapping()
722 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
745 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
748 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
756 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
767 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
790 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
793 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
803 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
806 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
812 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
826 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
828 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
835 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
840 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
841 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
844 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
845 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
855 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
859 for_each_iommu(iommu, drhd) in parse_ioapics_under_ir()
860 if (ecap_ir_support(iommu->ecap)) { in parse_ioapics_under_ir()
861 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) in parse_ioapics_under_ir()
901 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
906 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
907 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
910 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
918 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
920 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
921 if (iommu->qi) in reenable_irq_remapping()
922 dmar_reenable_qi(iommu); in reenable_irq_remapping()
927 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
928 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
932 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
975 struct intel_iommu *iommu; in intel_setup_ioapic_entry() local
981 iommu = map_ioapic_to_ir(ioapic_id); in intel_setup_ioapic_entry()
982 if (!iommu) { in intel_setup_ioapic_entry()
986 index = alloc_irte(iommu, irq, 1); in intel_setup_ioapic_entry()
1142 struct intel_iommu *iommu; in intel_msi_alloc_irq() local
1146 iommu = map_dev_to_ir(dev); in intel_msi_alloc_irq()
1147 if (!iommu) { in intel_msi_alloc_irq()
1152 index = alloc_irte(iommu, irq, nvec); in intel_msi_alloc_irq()
1168 struct intel_iommu *iommu; in intel_msi_setup_irq() local
1172 iommu = map_dev_to_ir(pdev); in intel_msi_setup_irq()
1173 if (iommu) { in intel_msi_setup_irq()
1179 set_irte_irq(irq, iommu, index, sub_handle); in intel_msi_setup_irq()
1190 struct intel_iommu *iommu; in intel_alloc_hpet_msi() local
1194 iommu = map_hpet_to_ir(id); in intel_alloc_hpet_msi()
1195 if (iommu) { in intel_alloc_hpet_msi()
1196 index = alloc_irte(iommu, irq, 1); in intel_alloc_hpet_msi()
1223 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1228 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1230 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1234 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1236 iommu->reg_phys); in dmar_ir_add()
1243 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1246 iommu->reg_phys); in dmar_ir_add()
1247 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1251 if (!iommu->qi) { in dmar_ir_add()
1253 dmar_fault(-1, iommu); in dmar_ir_add()
1254 iommu_disable_irq_remapping(iommu); in dmar_ir_add()
1255 dmar_disable_qi(iommu); in dmar_ir_add()
1259 ret = dmar_enable_qi(iommu); in dmar_ir_add()
1261 iommu_set_irq_remapping(iommu, eim); in dmar_ir_add()
1264 iommu->reg_phys, iommu->ecap, ret); in dmar_ir_add()
1265 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1266 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1275 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1279 if (iommu == NULL) in dmar_ir_hotplug()
1281 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1285 if (!iommu->ir_table) in dmar_ir_hotplug()
1286 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1288 if (iommu->ir_table) { in dmar_ir_hotplug()
1289 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1293 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1294 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1295 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()