Lines Matching refs:iommu

276 	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];  in find_dev_data()  local
285 if (translation_pre_enabled(iommu)) in find_dev_data()
397 struct amd_iommu *iommu; in iommu_init_device() local
399 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
400 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
573 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
575 struct device *dev = iommu->iommu.dev; in iommu_print_event()
662 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
670 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
677 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
695 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
699 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
702 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
703 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
710 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
735 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
738 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
741 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
742 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
757 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
761 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
764 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
765 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
771 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
779 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
801 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
807 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
812 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
823 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
824 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
829 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
833 iommu_poll_events(iommu); in amd_iommu_int_thread()
838 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
844 iommu_poll_ga_log(iommu); in amd_iommu_int_thread()
850 amd_iommu_restart_event_logging(iommu); in amd_iommu_int_thread()
866 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
882 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
886 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
899 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
906 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
907 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
911 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
914 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
918 struct amd_iommu *iommu, in build_completion_wait() argument
921 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1071 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1078 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1080 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1094 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1100 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1103 iommu->need_sync = sync; in __iommu_queue_command_sync()
1108 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1115 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1116 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1117 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1122 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1124 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1131 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1138 if (!iommu->need_sync) in iommu_completion_wait()
1141 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1143 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1144 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1146 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1150 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1153 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1158 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1164 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1167 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1172 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1174 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1181 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1189 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1192 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1195 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1201 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1203 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1206 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1212 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1213 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1216 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1222 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1225 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1230 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1232 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1235 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1237 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1238 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1240 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1241 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1242 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1252 struct amd_iommu *iommu; in device_flush_iotlb() local
1257 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1261 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1266 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1268 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1276 struct amd_iommu *iommu; in device_flush_dte() local
1280 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1284 device_flush_dte_alias, iommu); in device_flush_dte()
1286 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1292 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1929 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1931 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1974 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1976 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1993 struct amd_iommu *iommu; in do_attach() local
1996 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
2004 domain->dev_iommu[iommu->index] += 1; in do_attach()
2019 struct amd_iommu *iommu; in do_detach() local
2021 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2039 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2227 struct amd_iommu *iommu; in amd_iommu_probe_device() local
2237 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_probe_device()
2240 return &iommu->iommu; in amd_iommu_probe_device()
2249 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
2250 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2253 iommu_completion_wait(iommu); in amd_iommu_probe_device()
2271 struct amd_iommu *iommu; in amd_iommu_release_device() local
2276 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_release_device()
2279 iommu_completion_wait(iommu); in amd_iommu_release_device()
2525 struct amd_iommu *iommu; in amd_iommu_detach_device() local
2538 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
2539 if (!iommu) in amd_iommu_detach_device()
2548 iommu_completion_wait(iommu); in amd_iommu_detach_device()
2556 struct amd_iommu *iommu; in amd_iommu_attach_device() local
2565 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2566 if (!iommu) in amd_iommu_attach_device()
2583 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2909 struct amd_iommu *iommu; in __flush_pasid() local
2920 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2925 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
3083 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
3087 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3092 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
3248 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
3253 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
3273 struct amd_iommu *iommu; in alloc_irq_table() local
3279 iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_table()
3280 if (!iommu) in alloc_irq_table()
3290 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3308 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3319 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3322 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3325 iommu_completion_wait(iommu); in alloc_irq_table()
3343 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_index() local
3345 if (!iommu) in alloc_irq_index()
3360 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3370 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3392 struct amd_iommu *iommu; in modify_irte_ga() local
3396 iommu = amd_iommu_rlookup_table[devid]; in modify_irte_ga()
3397 if (iommu == NULL) in modify_irte_ga()
3425 iommu_flush_irt(iommu, devid); in modify_irte_ga()
3426 iommu_completion_wait(iommu); in modify_irte_ga()
3434 struct amd_iommu *iommu; in modify_irte() local
3437 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
3438 if (iommu == NULL) in modify_irte()
3449 iommu_flush_irt(iommu, devid); in modify_irte()
3450 iommu_completion_wait(iommu); in modify_irte()
3458 struct amd_iommu *iommu; in free_irte() local
3461 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
3462 if (iommu == NULL) in free_irte()
3470 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3473 iommu_flush_irt(iommu, devid); in free_irte()
3474 iommu_completion_wait(iommu); in free_irte()
3631 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in get_irq_domain_for_devid() local
3633 if (!iommu) in get_irq_domain_for_devid()
3639 return iommu->ir_domain; in get_irq_domain_for_devid()
3676 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_prepare_irte() local
3678 if (!iommu) in irq_remapping_prepare_irte()
3683 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, in irq_remapping_prepare_irte()
3769 struct amd_iommu *iommu; in irq_remapping_alloc() local
3779 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_alloc()
3781 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3870 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3880 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate() local
3883 if (!iommu) in irq_remapping_activate()
3886 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3888 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3897 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate() local
3899 if (iommu) in irq_remapping_deactivate()
3900 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3971 struct amd_iommu *iommu; in amd_ir_set_vcpu_affinity() local
3998 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3999 if (iommu == NULL) in amd_ir_set_vcpu_affinity()
4025 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
4035 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
4047 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity() local
4050 if (!iommu) in amd_ir_set_affinity()
4057 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
4083 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
4087 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
4090 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
4091 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
4096 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
4097 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
4099 iommu->index); in amd_iommu_create_irq_domain()
4106 struct amd_iommu *iommu; in amd_iommu_update_ga() local
4117 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_update_ga()
4118 if (!iommu) in amd_iommu_update_ga()
4140 iommu_flush_irt(iommu, devid); in amd_iommu_update_ga()
4141 iommu_completion_wait(iommu); in amd_iommu_update_ga()