1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2006, Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) Ashok Raj <ashok.raj@intel.com>
6*4882a593Smuzhiyun * Copyright (C) Shaohua Li <shaohua.li@intel.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __DMAR_H__
10*4882a593Smuzhiyun #define __DMAR_H__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/acpi.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/msi.h>
15*4882a593Smuzhiyun #include <linux/irqreturn.h>
16*4882a593Smuzhiyun #include <linux/rwsem.h>
17*4882a593Smuzhiyun #include <linux/rculist.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct acpi_dmar_header;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_X86
22*4882a593Smuzhiyun # define DMAR_UNITS_SUPPORTED MAX_IO_APICS
23*4882a593Smuzhiyun #else
24*4882a593Smuzhiyun # define DMAR_UNITS_SUPPORTED 64
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* DMAR Flags */
28*4882a593Smuzhiyun #define DMAR_INTR_REMAP 0x1
29*4882a593Smuzhiyun #define DMAR_X2APIC_OPT_OUT 0x2
30*4882a593Smuzhiyun #define DMAR_PLATFORM_OPT_IN 0x4
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct intel_iommu;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct dmar_dev_scope {
35*4882a593Smuzhiyun struct device __rcu *dev;
36*4882a593Smuzhiyun u8 bus;
37*4882a593Smuzhiyun u8 devfn;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #ifdef CONFIG_DMAR_TABLE
41*4882a593Smuzhiyun extern struct acpi_table_header *dmar_tbl;
42*4882a593Smuzhiyun struct dmar_drhd_unit {
43*4882a593Smuzhiyun struct list_head list; /* list of drhd units */
44*4882a593Smuzhiyun struct acpi_dmar_header *hdr; /* ACPI header */
45*4882a593Smuzhiyun u64 reg_base_addr; /* register base address*/
46*4882a593Smuzhiyun struct dmar_dev_scope *devices;/* target device array */
47*4882a593Smuzhiyun int devices_cnt; /* target device count */
48*4882a593Smuzhiyun u16 segment; /* PCI domain */
49*4882a593Smuzhiyun u8 ignored:1; /* ignore drhd */
50*4882a593Smuzhiyun u8 include_all:1;
51*4882a593Smuzhiyun u8 gfx_dedicated:1; /* graphic dedicated */
52*4882a593Smuzhiyun struct intel_iommu *iommu;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun struct dmar_pci_path {
56*4882a593Smuzhiyun u8 bus;
57*4882a593Smuzhiyun u8 device;
58*4882a593Smuzhiyun u8 function;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct dmar_pci_notify_info {
62*4882a593Smuzhiyun struct pci_dev *dev;
63*4882a593Smuzhiyun unsigned long event;
64*4882a593Smuzhiyun int bus;
65*4882a593Smuzhiyun u16 seg;
66*4882a593Smuzhiyun u16 level;
67*4882a593Smuzhiyun struct dmar_pci_path path[];
68*4882a593Smuzhiyun } __attribute__((packed));
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun extern struct rw_semaphore dmar_global_lock;
71*4882a593Smuzhiyun extern struct list_head dmar_drhd_units;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define for_each_drhd_unit(drhd) \
74*4882a593Smuzhiyun list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
75*4882a593Smuzhiyun dmar_rcu_check())
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define for_each_active_drhd_unit(drhd) \
78*4882a593Smuzhiyun list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
79*4882a593Smuzhiyun dmar_rcu_check()) \
80*4882a593Smuzhiyun if (drhd->ignored) {} else
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define for_each_active_iommu(i, drhd) \
83*4882a593Smuzhiyun list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
84*4882a593Smuzhiyun dmar_rcu_check()) \
85*4882a593Smuzhiyun if (i=drhd->iommu, drhd->ignored) {} else
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define for_each_iommu(i, drhd) \
88*4882a593Smuzhiyun list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
89*4882a593Smuzhiyun dmar_rcu_check()) \
90*4882a593Smuzhiyun if (i=drhd->iommu, 0) {} else
91*4882a593Smuzhiyun
dmar_rcu_check(void)92*4882a593Smuzhiyun static inline bool dmar_rcu_check(void)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun return rwsem_is_locked(&dmar_global_lock) ||
95*4882a593Smuzhiyun system_state == SYSTEM_BOOTING;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define for_each_dev_scope(devs, cnt, i, tmp) \
101*4882a593Smuzhiyun for ((i) = 0; ((tmp) = (i) < (cnt) ? \
102*4882a593Smuzhiyun dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
103*4882a593Smuzhiyun (i)++)
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define for_each_active_dev_scope(devs, cnt, i, tmp) \
106*4882a593Smuzhiyun for_each_dev_scope((devs), (cnt), (i), (tmp)) \
107*4882a593Smuzhiyun if (!(tmp)) { continue; } else
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun extern int dmar_table_init(void);
110*4882a593Smuzhiyun extern int dmar_dev_scope_init(void);
111*4882a593Smuzhiyun extern void dmar_register_bus_notifier(void);
112*4882a593Smuzhiyun extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
113*4882a593Smuzhiyun struct dmar_dev_scope **devices, u16 segment);
114*4882a593Smuzhiyun extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
115*4882a593Smuzhiyun extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
116*4882a593Smuzhiyun extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
117*4882a593Smuzhiyun void *start, void*end, u16 segment,
118*4882a593Smuzhiyun struct dmar_dev_scope *devices,
119*4882a593Smuzhiyun int devices_cnt);
120*4882a593Smuzhiyun extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
121*4882a593Smuzhiyun u16 segment, struct dmar_dev_scope *devices,
122*4882a593Smuzhiyun int count);
123*4882a593Smuzhiyun /* Intel IOMMU detection */
124*4882a593Smuzhiyun extern int detect_intel_iommu(void);
125*4882a593Smuzhiyun extern int enable_drhd_fault_handling(void);
126*4882a593Smuzhiyun extern int dmar_device_add(acpi_handle handle);
127*4882a593Smuzhiyun extern int dmar_device_remove(acpi_handle handle);
128*4882a593Smuzhiyun
dmar_res_noop(struct acpi_dmar_header * hdr,void * arg)129*4882a593Smuzhiyun static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #ifdef CONFIG_INTEL_IOMMU
135*4882a593Smuzhiyun extern int iommu_detected, no_iommu;
136*4882a593Smuzhiyun extern int intel_iommu_init(void);
137*4882a593Smuzhiyun extern void intel_iommu_shutdown(void);
138*4882a593Smuzhiyun extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
139*4882a593Smuzhiyun extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
140*4882a593Smuzhiyun extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
141*4882a593Smuzhiyun extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
142*4882a593Smuzhiyun extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
143*4882a593Smuzhiyun extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
144*4882a593Smuzhiyun #else /* !CONFIG_INTEL_IOMMU: */
intel_iommu_init(void)145*4882a593Smuzhiyun static inline int intel_iommu_init(void) { return -ENODEV; }
intel_iommu_shutdown(void)146*4882a593Smuzhiyun static inline void intel_iommu_shutdown(void) { }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #define dmar_parse_one_rmrr dmar_res_noop
149*4882a593Smuzhiyun #define dmar_parse_one_atsr dmar_res_noop
150*4882a593Smuzhiyun #define dmar_check_one_atsr dmar_res_noop
151*4882a593Smuzhiyun #define dmar_release_one_atsr dmar_res_noop
152*4882a593Smuzhiyun
dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info * info)153*4882a593Smuzhiyun static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
dmar_iommu_hotplug(struct dmar_drhd_unit * dmaru,bool insert)158*4882a593Smuzhiyun static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun #endif /* CONFIG_INTEL_IOMMU */
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
165*4882a593Smuzhiyun extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
166*4882a593Smuzhiyun #else /* CONFIG_IRQ_REMAP */
dmar_ir_hotplug(struct dmar_drhd_unit * dmaru,bool insert)167*4882a593Smuzhiyun static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
168*4882a593Smuzhiyun { return 0; }
169*4882a593Smuzhiyun #endif /* CONFIG_IRQ_REMAP */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun extern bool dmar_platform_optin(void);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #else /* CONFIG_DMAR_TABLE */
174*4882a593Smuzhiyun
dmar_device_add(void * handle)175*4882a593Smuzhiyun static inline int dmar_device_add(void *handle)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
dmar_device_remove(void * handle)180*4882a593Smuzhiyun static inline int dmar_device_remove(void *handle)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
dmar_platform_optin(void)185*4882a593Smuzhiyun static inline bool dmar_platform_optin(void)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return false;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #endif /* CONFIG_DMAR_TABLE */
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun struct irte {
193*4882a593Smuzhiyun union {
194*4882a593Smuzhiyun /* Shared between remapped and posted mode*/
195*4882a593Smuzhiyun struct {
196*4882a593Smuzhiyun __u64 present : 1, /* 0 */
197*4882a593Smuzhiyun fpd : 1, /* 1 */
198*4882a593Smuzhiyun __res0 : 6, /* 2 - 6 */
199*4882a593Smuzhiyun avail : 4, /* 8 - 11 */
200*4882a593Smuzhiyun __res1 : 3, /* 12 - 14 */
201*4882a593Smuzhiyun pst : 1, /* 15 */
202*4882a593Smuzhiyun vector : 8, /* 16 - 23 */
203*4882a593Smuzhiyun __res2 : 40; /* 24 - 63 */
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Remapped mode */
207*4882a593Smuzhiyun struct {
208*4882a593Smuzhiyun __u64 r_present : 1, /* 0 */
209*4882a593Smuzhiyun r_fpd : 1, /* 1 */
210*4882a593Smuzhiyun dst_mode : 1, /* 2 */
211*4882a593Smuzhiyun redir_hint : 1, /* 3 */
212*4882a593Smuzhiyun trigger_mode : 1, /* 4 */
213*4882a593Smuzhiyun dlvry_mode : 3, /* 5 - 7 */
214*4882a593Smuzhiyun r_avail : 4, /* 8 - 11 */
215*4882a593Smuzhiyun r_res0 : 4, /* 12 - 15 */
216*4882a593Smuzhiyun r_vector : 8, /* 16 - 23 */
217*4882a593Smuzhiyun r_res1 : 8, /* 24 - 31 */
218*4882a593Smuzhiyun dest_id : 32; /* 32 - 63 */
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Posted mode */
222*4882a593Smuzhiyun struct {
223*4882a593Smuzhiyun __u64 p_present : 1, /* 0 */
224*4882a593Smuzhiyun p_fpd : 1, /* 1 */
225*4882a593Smuzhiyun p_res0 : 6, /* 2 - 7 */
226*4882a593Smuzhiyun p_avail : 4, /* 8 - 11 */
227*4882a593Smuzhiyun p_res1 : 2, /* 12 - 13 */
228*4882a593Smuzhiyun p_urgent : 1, /* 14 */
229*4882a593Smuzhiyun p_pst : 1, /* 15 */
230*4882a593Smuzhiyun p_vector : 8, /* 16 - 23 */
231*4882a593Smuzhiyun p_res2 : 14, /* 24 - 37 */
232*4882a593Smuzhiyun pda_l : 26; /* 38 - 63 */
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun __u64 low;
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun union {
238*4882a593Smuzhiyun /* Shared between remapped and posted mode*/
239*4882a593Smuzhiyun struct {
240*4882a593Smuzhiyun __u64 sid : 16, /* 64 - 79 */
241*4882a593Smuzhiyun sq : 2, /* 80 - 81 */
242*4882a593Smuzhiyun svt : 2, /* 82 - 83 */
243*4882a593Smuzhiyun __res3 : 44; /* 84 - 127 */
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Posted mode*/
247*4882a593Smuzhiyun struct {
248*4882a593Smuzhiyun __u64 p_sid : 16, /* 64 - 79 */
249*4882a593Smuzhiyun p_sq : 2, /* 80 - 81 */
250*4882a593Smuzhiyun p_svt : 2, /* 82 - 83 */
251*4882a593Smuzhiyun p_res3 : 12, /* 84 - 95 */
252*4882a593Smuzhiyun pda_h : 32; /* 96 - 127 */
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun __u64 high;
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun
dmar_copy_shared_irte(struct irte * dst,struct irte * src)258*4882a593Smuzhiyun static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun dst->present = src->present;
261*4882a593Smuzhiyun dst->fpd = src->fpd;
262*4882a593Smuzhiyun dst->avail = src->avail;
263*4882a593Smuzhiyun dst->pst = src->pst;
264*4882a593Smuzhiyun dst->vector = src->vector;
265*4882a593Smuzhiyun dst->sid = src->sid;
266*4882a593Smuzhiyun dst->sq = src->sq;
267*4882a593Smuzhiyun dst->svt = src->svt;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun #define PDA_LOW_BIT 26
271*4882a593Smuzhiyun #define PDA_HIGH_BIT 32
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Can't use the common MSI interrupt functions
274*4882a593Smuzhiyun * since DMAR is not a pci device
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun struct irq_data;
277*4882a593Smuzhiyun extern void dmar_msi_unmask(struct irq_data *data);
278*4882a593Smuzhiyun extern void dmar_msi_mask(struct irq_data *data);
279*4882a593Smuzhiyun extern void dmar_msi_read(int irq, struct msi_msg *msg);
280*4882a593Smuzhiyun extern void dmar_msi_write(int irq, struct msi_msg *msg);
281*4882a593Smuzhiyun extern int dmar_set_interrupt(struct intel_iommu *iommu);
282*4882a593Smuzhiyun extern irqreturn_t dmar_fault(int irq, void *dev_id);
283*4882a593Smuzhiyun extern int dmar_alloc_hwirq(int id, int node, void *arg);
284*4882a593Smuzhiyun extern void dmar_free_hwirq(int irq);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #endif /* __DMAR_H__ */
287