Lines Matching refs:iommu
206 return dev->archdata.iommu; in get_dev_data()
368 if (dev->archdata.iommu) in iommu_init_device()
394 struct amd_iommu *iommu; in iommu_init_device() local
396 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
397 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
400 dev->archdata.iommu = dev_data; in iommu_init_device()
583 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
662 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
670 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
671 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; in iommu_poll_events()
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
677 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
697 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
701 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
704 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
705 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
712 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
737 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
740 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
743 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
744 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
750 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
751 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
756 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
760 iommu_poll_events(iommu); in amd_iommu_int_thread()
765 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
781 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
814 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
820 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
821 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; in copy_cmd_to_buffer()
827 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
981 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
988 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); in iommu_queue_command_sync()
991 spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
993 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in iommu_queue_command_sync()
994 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in iommu_queue_command_sync()
995 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; in iommu_queue_command_sync()
996 left = (head - next_tail) % iommu->cmd_buf_size; in iommu_queue_command_sync()
1004 copy_cmd_to_buffer(iommu, &sync_cmd, tail); in iommu_queue_command_sync()
1006 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1014 copy_cmd_to_buffer(iommu, cmd, tail); in iommu_queue_command_sync()
1017 iommu->need_sync = sync; in iommu_queue_command_sync()
1019 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1024 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1026 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1033 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1039 if (!iommu->need_sync) in iommu_completion_wait()
1044 ret = iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1051 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1057 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1060 static void iommu_flush_dte_all(struct amd_iommu *iommu) in iommu_flush_dte_all() argument
1065 iommu_flush_dte(iommu, devid); in iommu_flush_dte_all()
1067 iommu_completion_wait(iommu); in iommu_flush_dte_all()
1074 static void iommu_flush_tlb_all(struct amd_iommu *iommu) in iommu_flush_tlb_all() argument
1082 iommu_queue_command(iommu, &cmd); in iommu_flush_tlb_all()
1085 iommu_completion_wait(iommu); in iommu_flush_tlb_all()
1088 static void iommu_flush_all(struct amd_iommu *iommu) in iommu_flush_all() argument
1094 iommu_queue_command(iommu, &cmd); in iommu_flush_all()
1095 iommu_completion_wait(iommu); in iommu_flush_all()
1098 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1104 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1107 static void iommu_flush_irt_all(struct amd_iommu *iommu) in iommu_flush_irt_all() argument
1112 iommu_flush_irt(iommu, devid); in iommu_flush_irt_all()
1114 iommu_completion_wait(iommu); in iommu_flush_irt_all()
1117 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1119 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1120 iommu_flush_all(iommu); in iommu_flush_all_caches()
1122 iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1123 iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1124 iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1134 struct amd_iommu *iommu; in device_flush_iotlb() local
1139 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1143 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1151 struct amd_iommu *iommu; in device_flush_dte() local
1154 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1156 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1470 static int iommu_for_unity_map(struct amd_iommu *iommu, in iommu_for_unity_map() argument
1477 if (amd_iommu_rlookup_table[bdf] == iommu) in iommu_for_unity_map()
1518 static int iommu_init_unity_mappings(struct amd_iommu *iommu) in iommu_init_unity_mappings() argument
1524 if (!iommu_for_unity_map(iommu, entry)) in iommu_init_unity_mappings()
1526 ret = dma_ops_unity_map(iommu->default_dom, entry); in iommu_init_unity_mappings()
1599 struct amd_iommu *iommu; in alloc_new_range() local
1652 for_each_iommu(iommu) { in alloc_new_range()
1653 if (iommu->exclusion_start && in alloc_new_range()
1654 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1655 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1657 int pages = iommu_num_pages(iommu->exclusion_start, in alloc_new_range()
1658 iommu->exclusion_length, in alloc_new_range()
1660 startpage = iommu->exclusion_start >> PAGE_SHIFT; in alloc_new_range()
2111 struct amd_iommu *iommu; in do_attach() local
2114 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
2123 domain->dev_iommu[iommu->index] += 1; in do_attach()
2132 struct amd_iommu *iommu; in do_detach() local
2134 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2137 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
2418 struct amd_iommu *iommu; in device_change_notifier() local
2426 iommu = amd_iommu_rlookup_table[devid]; in device_change_notifier()
2473 iommu_completion_wait(iommu); in device_change_notifier()
3116 struct amd_iommu *iommu; in amd_iommu_init_dma_ops() local
3124 for_each_iommu(iommu) { in amd_iommu_init_dma_ops()
3125 iommu->default_dom = dma_ops_domain_alloc(); in amd_iommu_init_dma_ops()
3126 if (iommu->default_dom == NULL) in amd_iommu_init_dma_ops()
3128 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; in amd_iommu_init_dma_ops()
3129 ret = iommu_init_unity_mappings(iommu); in amd_iommu_init_dma_ops()
3160 for_each_iommu(iommu) { in amd_iommu_init_dma_ops()
3161 dma_ops_domain_free(iommu->default_dom); in amd_iommu_init_dma_ops()
3301 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device()
3302 struct amd_iommu *iommu; in amd_iommu_detach_device() local
3313 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
3314 if (!iommu) in amd_iommu_detach_device()
3317 iommu_completion_wait(iommu); in amd_iommu_detach_device()
3325 struct amd_iommu *iommu; in amd_iommu_attach_device() local
3331 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
3333 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3334 if (!iommu) in amd_iommu_attach_device()
3342 iommu_completion_wait(iommu); in amd_iommu_attach_device()
3583 struct amd_iommu *iommu; in __flush_pasid() local
3589 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3594 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
3752 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
3758 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3763 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
3885 struct amd_iommu *iommu; in get_irq_table() local
3891 iommu = amd_iommu_rlookup_table[devid]; in get_irq_table()
3892 if (!iommu) in get_irq_table()
3904 iommu_flush_dte(iommu, devid); in get_irq_table()
3938 iommu_flush_dte(iommu, devid); in get_irq_table()
3942 iommu_flush_dte(iommu, alias); in get_irq_table()
3946 iommu_completion_wait(iommu); in get_irq_table()
4019 struct amd_iommu *iommu; in modify_irte() local
4022 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
4023 if (iommu == NULL) in modify_irte()
4034 iommu_flush_irt(iommu, devid); in modify_irte()
4035 iommu_completion_wait(iommu); in modify_irte()
4043 struct amd_iommu *iommu; in free_irte() local
4046 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
4047 if (iommu == NULL) in free_irte()
4058 iommu_flush_irt(iommu, devid); in free_irte()
4059 iommu_completion_wait(iommu); in free_irte()