1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4*4882a593Smuzhiyun * Author: Joerg Roedel <jroedel@suse.de>
5*4882a593Smuzhiyun * Leo Duran <leo.duran@amd.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "AMD-Vi: " fmt
9*4882a593Smuzhiyun #define dev_fmt(fmt) pr_fmt(fmt)
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/acpi.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/bitmap.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/syscore_ops.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/msi.h>
19*4882a593Smuzhiyun #include <linux/amd-iommu.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun #include <linux/kmemleak.h>
22*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
23*4882a593Smuzhiyun #include <linux/iopoll.h>
24*4882a593Smuzhiyun #include <asm/pci-direct.h>
25*4882a593Smuzhiyun #include <asm/iommu.h>
26*4882a593Smuzhiyun #include <asm/apic.h>
27*4882a593Smuzhiyun #include <asm/msidef.h>
28*4882a593Smuzhiyun #include <asm/gart.h>
29*4882a593Smuzhiyun #include <asm/x86_init.h>
30*4882a593Smuzhiyun #include <asm/iommu_table.h>
31*4882a593Smuzhiyun #include <asm/io_apic.h>
32*4882a593Smuzhiyun #include <asm/irq_remapping.h>
33*4882a593Smuzhiyun #include <asm/set_memory.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/crash_dump.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include "amd_iommu.h"
38*4882a593Smuzhiyun #include "../irq_remapping.h"
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * definitions for the ACPI scanning code
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun #define IVRS_HEADER_LENGTH 48
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46*4882a593Smuzhiyun #define ACPI_IVMD_TYPE_ALL 0x20
47*4882a593Smuzhiyun #define ACPI_IVMD_TYPE 0x21
48*4882a593Smuzhiyun #define ACPI_IVMD_TYPE_RANGE 0x22
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define IVHD_DEV_ALL 0x01
51*4882a593Smuzhiyun #define IVHD_DEV_SELECT 0x02
52*4882a593Smuzhiyun #define IVHD_DEV_SELECT_RANGE_START 0x03
53*4882a593Smuzhiyun #define IVHD_DEV_RANGE_END 0x04
54*4882a593Smuzhiyun #define IVHD_DEV_ALIAS 0x42
55*4882a593Smuzhiyun #define IVHD_DEV_ALIAS_RANGE 0x43
56*4882a593Smuzhiyun #define IVHD_DEV_EXT_SELECT 0x46
57*4882a593Smuzhiyun #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58*4882a593Smuzhiyun #define IVHD_DEV_SPECIAL 0x48
59*4882a593Smuzhiyun #define IVHD_DEV_ACPI_HID 0xf0
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define UID_NOT_PRESENT 0
62*4882a593Smuzhiyun #define UID_IS_INTEGER 1
63*4882a593Smuzhiyun #define UID_IS_CHARACTER 2
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define IVHD_SPECIAL_IOAPIC 1
66*4882a593Smuzhiyun #define IVHD_SPECIAL_HPET 2
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69*4882a593Smuzhiyun #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70*4882a593Smuzhiyun #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71*4882a593Smuzhiyun #define IVHD_FLAG_ISOC_EN_MASK 0x08
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define IVMD_FLAG_EXCL_RANGE 0x08
74*4882a593Smuzhiyun #define IVMD_FLAG_IW 0x04
75*4882a593Smuzhiyun #define IVMD_FLAG_IR 0x02
76*4882a593Smuzhiyun #define IVMD_FLAG_UNITY_MAP 0x01
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define ACPI_DEVFLAG_INITPASS 0x01
79*4882a593Smuzhiyun #define ACPI_DEVFLAG_EXTINT 0x02
80*4882a593Smuzhiyun #define ACPI_DEVFLAG_NMI 0x04
81*4882a593Smuzhiyun #define ACPI_DEVFLAG_SYSMGT1 0x10
82*4882a593Smuzhiyun #define ACPI_DEVFLAG_SYSMGT2 0x20
83*4882a593Smuzhiyun #define ACPI_DEVFLAG_LINT0 0x40
84*4882a593Smuzhiyun #define ACPI_DEVFLAG_LINT1 0x80
85*4882a593Smuzhiyun #define ACPI_DEVFLAG_ATSDIS 0x10000000
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define LOOP_TIMEOUT 2000000
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * ACPI table definitions
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * These data structures are laid over the table to parse the important values
92*4882a593Smuzhiyun * out of it.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun extern const struct iommu_ops amd_iommu_ops;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * structure describing one IOMMU in the ACPI table. Typically followed by one
99*4882a593Smuzhiyun * or more ivhd_entrys.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun struct ivhd_header {
102*4882a593Smuzhiyun u8 type;
103*4882a593Smuzhiyun u8 flags;
104*4882a593Smuzhiyun u16 length;
105*4882a593Smuzhiyun u16 devid;
106*4882a593Smuzhiyun u16 cap_ptr;
107*4882a593Smuzhiyun u64 mmio_phys;
108*4882a593Smuzhiyun u16 pci_seg;
109*4882a593Smuzhiyun u16 info;
110*4882a593Smuzhiyun u32 efr_attr;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Following only valid on IVHD type 11h and 40h */
113*4882a593Smuzhiyun u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114*4882a593Smuzhiyun u64 res;
115*4882a593Smuzhiyun } __attribute__((packed));
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * A device entry describing which devices a specific IOMMU translates and
119*4882a593Smuzhiyun * which requestor ids they use.
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun struct ivhd_entry {
122*4882a593Smuzhiyun u8 type;
123*4882a593Smuzhiyun u16 devid;
124*4882a593Smuzhiyun u8 flags;
125*4882a593Smuzhiyun u32 ext;
126*4882a593Smuzhiyun u32 hidh;
127*4882a593Smuzhiyun u64 cid;
128*4882a593Smuzhiyun u8 uidf;
129*4882a593Smuzhiyun u8 uidl;
130*4882a593Smuzhiyun u8 uid;
131*4882a593Smuzhiyun } __attribute__((packed));
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * An AMD IOMMU memory definition structure. It defines things like exclusion
135*4882a593Smuzhiyun * ranges for devices and regions that should be unity mapped.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun struct ivmd_header {
138*4882a593Smuzhiyun u8 type;
139*4882a593Smuzhiyun u8 flags;
140*4882a593Smuzhiyun u16 length;
141*4882a593Smuzhiyun u16 devid;
142*4882a593Smuzhiyun u16 aux;
143*4882a593Smuzhiyun u64 resv;
144*4882a593Smuzhiyun u64 range_start;
145*4882a593Smuzhiyun u64 range_length;
146*4882a593Smuzhiyun } __attribute__((packed));
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun bool amd_iommu_dump;
149*4882a593Smuzhiyun bool amd_iommu_irq_remap __read_mostly;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
152*4882a593Smuzhiyun static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun static bool amd_iommu_detected;
155*4882a593Smuzhiyun static bool __initdata amd_iommu_disabled;
156*4882a593Smuzhiyun static int amd_iommu_target_ivhd_type;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun u16 amd_iommu_last_bdf; /* largest PCI device id we have
159*4882a593Smuzhiyun to handle */
160*4882a593Smuzhiyun LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
161*4882a593Smuzhiyun we find in ACPI */
162*4882a593Smuzhiyun bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
165*4882a593Smuzhiyun system */
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Array to assign indices to IOMMUs*/
168*4882a593Smuzhiyun struct amd_iommu *amd_iommus[MAX_IOMMUS];
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Number of IOMMUs present in the system */
171*4882a593Smuzhiyun static int amd_iommus_present;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* IOMMUs have a non-present cache? */
174*4882a593Smuzhiyun bool amd_iommu_np_cache __read_mostly;
175*4882a593Smuzhiyun bool amd_iommu_iotlb_sup __read_mostly = true;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun u32 amd_iommu_max_pasid __read_mostly = ~0;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun bool amd_iommu_v2_present __read_mostly;
180*4882a593Smuzhiyun static bool amd_iommu_pc_present __read_mostly;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun bool amd_iommu_force_isolation __read_mostly;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Pointer to the device table which is shared by all AMD IOMMUs
186*4882a593Smuzhiyun * it is indexed by the PCI device id or the HT unit id and contains
187*4882a593Smuzhiyun * information about the domain the device belongs to as well as the
188*4882a593Smuzhiyun * page table root pointer.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun struct dev_table_entry *amd_iommu_dev_table;
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Pointer to a device table which the content of old device table
193*4882a593Smuzhiyun * will be copied to. It's only be used in kdump kernel.
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun static struct dev_table_entry *old_dev_tbl_cpy;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * The alias table is a driver specific data structure which contains the
199*4882a593Smuzhiyun * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
200*4882a593Smuzhiyun * More than one device can share the same requestor id.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun u16 *amd_iommu_alias_table;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * The rlookup table is used to find the IOMMU which is responsible
206*4882a593Smuzhiyun * for a specific device. It is also indexed by the PCI device id.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun struct amd_iommu **amd_iommu_rlookup_table;
209*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_rlookup_table);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * This table is used to find the irq remapping table for a given device id
213*4882a593Smuzhiyun * quickly.
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun struct irq_remap_table **irq_lookup_table;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
219*4882a593Smuzhiyun * to know which ones are already in use.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun unsigned long *amd_iommu_pd_alloc_bitmap;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun static u32 dev_table_size; /* size of the device table */
224*4882a593Smuzhiyun static u32 alias_table_size; /* size of the alias table */
225*4882a593Smuzhiyun static u32 rlookup_table_size; /* size if the rlookup table */
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun enum iommu_init_state {
228*4882a593Smuzhiyun IOMMU_START_STATE,
229*4882a593Smuzhiyun IOMMU_IVRS_DETECTED,
230*4882a593Smuzhiyun IOMMU_ACPI_FINISHED,
231*4882a593Smuzhiyun IOMMU_ENABLED,
232*4882a593Smuzhiyun IOMMU_PCI_INIT,
233*4882a593Smuzhiyun IOMMU_INTERRUPTS_EN,
234*4882a593Smuzhiyun IOMMU_DMA_OPS,
235*4882a593Smuzhiyun IOMMU_INITIALIZED,
236*4882a593Smuzhiyun IOMMU_NOT_FOUND,
237*4882a593Smuzhiyun IOMMU_INIT_ERROR,
238*4882a593Smuzhiyun IOMMU_CMDLINE_DISABLED,
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Early ioapic and hpet maps from kernel command line */
242*4882a593Smuzhiyun #define EARLY_MAP_SIZE 4
243*4882a593Smuzhiyun static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
244*4882a593Smuzhiyun static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
245*4882a593Smuzhiyun static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static int __initdata early_ioapic_map_size;
248*4882a593Smuzhiyun static int __initdata early_hpet_map_size;
249*4882a593Smuzhiyun static int __initdata early_acpihid_map_size;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun static bool __initdata cmdline_maps;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun static enum iommu_init_state init_state = IOMMU_START_STATE;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun static int amd_iommu_enable_interrupts(void);
256*4882a593Smuzhiyun static int __init iommu_go_to_state(enum iommu_init_state state);
257*4882a593Smuzhiyun static void init_device_table_dma(void);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun static bool amd_iommu_pre_enabled = true;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun static u32 amd_iommu_ivinfo __initdata;
262*4882a593Smuzhiyun
translation_pre_enabled(struct amd_iommu * iommu)263*4882a593Smuzhiyun bool translation_pre_enabled(struct amd_iommu *iommu)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun EXPORT_SYMBOL(translation_pre_enabled);
268*4882a593Smuzhiyun
clear_translation_pre_enabled(struct amd_iommu * iommu)269*4882a593Smuzhiyun static void clear_translation_pre_enabled(struct amd_iommu *iommu)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
init_translation_status(struct amd_iommu * iommu)274*4882a593Smuzhiyun static void init_translation_status(struct amd_iommu *iommu)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun u64 ctrl;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
279*4882a593Smuzhiyun if (ctrl & (1<<CONTROL_IOMMU_EN))
280*4882a593Smuzhiyun iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
update_last_devid(u16 devid)283*4882a593Smuzhiyun static inline void update_last_devid(u16 devid)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun if (devid > amd_iommu_last_bdf)
286*4882a593Smuzhiyun amd_iommu_last_bdf = devid;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
tbl_size(int entry_size)289*4882a593Smuzhiyun static inline unsigned long tbl_size(int entry_size)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned shift = PAGE_SHIFT +
292*4882a593Smuzhiyun get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return 1UL << shift;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
amd_iommu_get_num_iommus(void)297*4882a593Smuzhiyun int amd_iommu_get_num_iommus(void)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun return amd_iommus_present;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
check_feature_on_all_iommus(u64 mask)303*4882a593Smuzhiyun static bool check_feature_on_all_iommus(u64 mask)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun bool ret = false;
306*4882a593Smuzhiyun struct amd_iommu *iommu;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun for_each_iommu(iommu) {
309*4882a593Smuzhiyun ret = iommu_feature(iommu, mask);
310*4882a593Smuzhiyun if (!ret)
311*4882a593Smuzhiyun return false;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return true;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun #endif
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * For IVHD type 0x11/0x40, EFR is also available via IVHD.
320*4882a593Smuzhiyun * Default to IVHD EFR since it is available sooner
321*4882a593Smuzhiyun * (i.e. before PCI init).
322*4882a593Smuzhiyun */
early_iommu_features_init(struct amd_iommu * iommu,struct ivhd_header * h)323*4882a593Smuzhiyun static void __init early_iommu_features_init(struct amd_iommu *iommu,
324*4882a593Smuzhiyun struct ivhd_header *h)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
327*4882a593Smuzhiyun iommu->features = h->efr_reg;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Access to l1 and l2 indexed register spaces */
331*4882a593Smuzhiyun
iommu_read_l1(struct amd_iommu * iommu,u16 l1,u8 address)332*4882a593Smuzhiyun static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun u32 val;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
337*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, 0xfc, &val);
338*4882a593Smuzhiyun return val;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
iommu_write_l1(struct amd_iommu * iommu,u16 l1,u8 address,u32 val)341*4882a593Smuzhiyun static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
344*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xfc, val);
345*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
iommu_read_l2(struct amd_iommu * iommu,u8 address)348*4882a593Smuzhiyun static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun u32 val;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf0, address);
353*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, 0xf4, &val);
354*4882a593Smuzhiyun return val;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
iommu_write_l2(struct amd_iommu * iommu,u8 address,u32 val)357*4882a593Smuzhiyun static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
360*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf4, val);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /****************************************************************************
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * AMD IOMMU MMIO register space handling functions
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * These functions are used to program the IOMMU device registers in
368*4882a593Smuzhiyun * MMIO space required for that driver.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun ****************************************************************************/
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * This function set the exclusion range in the IOMMU. DMA accesses to the
374*4882a593Smuzhiyun * exclusion range are passed through untranslated
375*4882a593Smuzhiyun */
iommu_set_exclusion_range(struct amd_iommu * iommu)376*4882a593Smuzhiyun static void iommu_set_exclusion_range(struct amd_iommu *iommu)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun u64 start = iommu->exclusion_start & PAGE_MASK;
379*4882a593Smuzhiyun u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
380*4882a593Smuzhiyun u64 entry;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (!iommu->exclusion_start)
383*4882a593Smuzhiyun return;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun entry = start | MMIO_EXCL_ENABLE_MASK;
386*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
387*4882a593Smuzhiyun &entry, sizeof(entry));
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun entry = limit;
390*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
391*4882a593Smuzhiyun &entry, sizeof(entry));
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
iommu_set_cwwb_range(struct amd_iommu * iommu)394*4882a593Smuzhiyun static void iommu_set_cwwb_range(struct amd_iommu *iommu)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
397*4882a593Smuzhiyun u64 entry = start & PM_ADDR_MASK;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (!iommu_feature(iommu, FEATURE_SNP))
400*4882a593Smuzhiyun return;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* Note:
403*4882a593Smuzhiyun * Re-purpose Exclusion base/limit registers for Completion wait
404*4882a593Smuzhiyun * write-back base/limit.
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
407*4882a593Smuzhiyun &entry, sizeof(entry));
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Note:
410*4882a593Smuzhiyun * Default to 4 Kbytes, which can be specified by setting base
411*4882a593Smuzhiyun * address equal to the limit address.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
414*4882a593Smuzhiyun &entry, sizeof(entry));
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* Programs the physical address of the device table into the IOMMU hardware */
iommu_set_device_table(struct amd_iommu * iommu)418*4882a593Smuzhiyun static void iommu_set_device_table(struct amd_iommu *iommu)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun u64 entry;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun BUG_ON(iommu->mmio_base == NULL);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun entry = iommu_virt_to_phys(amd_iommu_dev_table);
425*4882a593Smuzhiyun entry |= (dev_table_size >> 12) - 1;
426*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
427*4882a593Smuzhiyun &entry, sizeof(entry));
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* Generic functions to enable/disable certain features of the IOMMU. */
iommu_feature_enable(struct amd_iommu * iommu,u8 bit)431*4882a593Smuzhiyun static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun u64 ctrl;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
436*4882a593Smuzhiyun ctrl |= (1ULL << bit);
437*4882a593Smuzhiyun writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
iommu_feature_disable(struct amd_iommu * iommu,u8 bit)440*4882a593Smuzhiyun static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun u64 ctrl;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
445*4882a593Smuzhiyun ctrl &= ~(1ULL << bit);
446*4882a593Smuzhiyun writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
iommu_set_inv_tlb_timeout(struct amd_iommu * iommu,int timeout)449*4882a593Smuzhiyun static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun u64 ctrl;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
454*4882a593Smuzhiyun ctrl &= ~CTRL_INV_TO_MASK;
455*4882a593Smuzhiyun ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
456*4882a593Smuzhiyun writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Function to enable the hardware */
iommu_enable(struct amd_iommu * iommu)460*4882a593Smuzhiyun static void iommu_enable(struct amd_iommu *iommu)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
iommu_disable(struct amd_iommu * iommu)465*4882a593Smuzhiyun static void iommu_disable(struct amd_iommu *iommu)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun if (!iommu->mmio_base)
468*4882a593Smuzhiyun return;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Disable command buffer */
471*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Disable event logging and event interrupts */
474*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
475*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Disable IOMMU GA_LOG */
478*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_GALOG_EN);
479*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_GAINT_EN);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Disable IOMMU hardware itself */
482*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
487*4882a593Smuzhiyun * the system has one.
488*4882a593Smuzhiyun */
iommu_map_mmio_space(u64 address,u64 end)489*4882a593Smuzhiyun static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun if (!request_mem_region(address, end, "amd_iommu")) {
492*4882a593Smuzhiyun pr_err("Can not reserve memory region %llx-%llx for mmio\n",
493*4882a593Smuzhiyun address, end);
494*4882a593Smuzhiyun pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
495*4882a593Smuzhiyun return NULL;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return (u8 __iomem *)ioremap(address, end);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
iommu_unmap_mmio_space(struct amd_iommu * iommu)501*4882a593Smuzhiyun static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun if (iommu->mmio_base)
504*4882a593Smuzhiyun iounmap(iommu->mmio_base);
505*4882a593Smuzhiyun release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
get_ivhd_header_size(struct ivhd_header * h)508*4882a593Smuzhiyun static inline u32 get_ivhd_header_size(struct ivhd_header *h)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun u32 size = 0;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun switch (h->type) {
513*4882a593Smuzhiyun case 0x10:
514*4882a593Smuzhiyun size = 24;
515*4882a593Smuzhiyun break;
516*4882a593Smuzhiyun case 0x11:
517*4882a593Smuzhiyun case 0x40:
518*4882a593Smuzhiyun size = 40;
519*4882a593Smuzhiyun break;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun return size;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /****************************************************************************
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * The functions below belong to the first pass of AMD IOMMU ACPI table
527*4882a593Smuzhiyun * parsing. In this pass we try to find out the highest device id this
528*4882a593Smuzhiyun * code has to handle. Upon this information the size of the shared data
529*4882a593Smuzhiyun * structures is determined later.
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun ****************************************************************************/
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * This function calculates the length of a given IVHD entry
535*4882a593Smuzhiyun */
ivhd_entry_length(u8 * ivhd)536*4882a593Smuzhiyun static inline int ivhd_entry_length(u8 *ivhd)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun u32 type = ((struct ivhd_entry *)ivhd)->type;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (type < 0x80) {
541*4882a593Smuzhiyun return 0x04 << (*ivhd >> 6);
542*4882a593Smuzhiyun } else if (type == IVHD_DEV_ACPI_HID) {
543*4882a593Smuzhiyun /* For ACPI_HID, offset 21 is uid len */
544*4882a593Smuzhiyun return *((u8 *)ivhd + 21) + 22;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun return 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * After reading the highest device id from the IOMMU PCI capability header
551*4882a593Smuzhiyun * this function looks if there is a higher device id defined in the ACPI table
552*4882a593Smuzhiyun */
find_last_devid_from_ivhd(struct ivhd_header * h)553*4882a593Smuzhiyun static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u8 *p = (void *)h, *end = (void *)h;
556*4882a593Smuzhiyun struct ivhd_entry *dev;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun u32 ivhd_size = get_ivhd_header_size(h);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (!ivhd_size) {
561*4882a593Smuzhiyun pr_err("Unsupported IVHD type %#x\n", h->type);
562*4882a593Smuzhiyun return -EINVAL;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun p += ivhd_size;
566*4882a593Smuzhiyun end += h->length;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun while (p < end) {
569*4882a593Smuzhiyun dev = (struct ivhd_entry *)p;
570*4882a593Smuzhiyun switch (dev->type) {
571*4882a593Smuzhiyun case IVHD_DEV_ALL:
572*4882a593Smuzhiyun /* Use maximum BDF value for DEV_ALL */
573*4882a593Smuzhiyun update_last_devid(0xffff);
574*4882a593Smuzhiyun break;
575*4882a593Smuzhiyun case IVHD_DEV_SELECT:
576*4882a593Smuzhiyun case IVHD_DEV_RANGE_END:
577*4882a593Smuzhiyun case IVHD_DEV_ALIAS:
578*4882a593Smuzhiyun case IVHD_DEV_EXT_SELECT:
579*4882a593Smuzhiyun /* all the above subfield types refer to device ids */
580*4882a593Smuzhiyun update_last_devid(dev->devid);
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun default:
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun p += ivhd_entry_length(p);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun WARN_ON(p != end);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
check_ivrs_checksum(struct acpi_table_header * table)593*4882a593Smuzhiyun static int __init check_ivrs_checksum(struct acpi_table_header *table)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun int i;
596*4882a593Smuzhiyun u8 checksum = 0, *p = (u8 *)table;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun for (i = 0; i < table->length; ++i)
599*4882a593Smuzhiyun checksum += p[i];
600*4882a593Smuzhiyun if (checksum != 0) {
601*4882a593Smuzhiyun /* ACPI table corrupt */
602*4882a593Smuzhiyun pr_err(FW_BUG "IVRS invalid checksum\n");
603*4882a593Smuzhiyun return -ENODEV;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * Iterate over all IVHD entries in the ACPI table and find the highest device
611*4882a593Smuzhiyun * id which we need to handle. This is the first of three functions which parse
612*4882a593Smuzhiyun * the ACPI table. So we check the checksum here.
613*4882a593Smuzhiyun */
find_last_devid_acpi(struct acpi_table_header * table)614*4882a593Smuzhiyun static int __init find_last_devid_acpi(struct acpi_table_header *table)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun u8 *p = (u8 *)table, *end = (u8 *)table;
617*4882a593Smuzhiyun struct ivhd_header *h;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun p += IVRS_HEADER_LENGTH;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun end += table->length;
622*4882a593Smuzhiyun while (p < end) {
623*4882a593Smuzhiyun h = (struct ivhd_header *)p;
624*4882a593Smuzhiyun if (h->type == amd_iommu_target_ivhd_type) {
625*4882a593Smuzhiyun int ret = find_last_devid_from_ivhd(h);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (ret)
628*4882a593Smuzhiyun return ret;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun p += h->length;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun WARN_ON(p != end);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /****************************************************************************
638*4882a593Smuzhiyun *
639*4882a593Smuzhiyun * The following functions belong to the code path which parses the ACPI table
640*4882a593Smuzhiyun * the second time. In this ACPI parsing iteration we allocate IOMMU specific
641*4882a593Smuzhiyun * data structures, initialize the device/alias/rlookup table and also
642*4882a593Smuzhiyun * basically initialize the hardware.
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun ****************************************************************************/
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Allocates the command buffer. This buffer is per AMD IOMMU. We can
648*4882a593Smuzhiyun * write commands to that buffer later and the IOMMU will execute them
649*4882a593Smuzhiyun * asynchronously
650*4882a593Smuzhiyun */
alloc_command_buffer(struct amd_iommu * iommu)651*4882a593Smuzhiyun static int __init alloc_command_buffer(struct amd_iommu *iommu)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
654*4882a593Smuzhiyun get_order(CMD_BUFFER_SIZE));
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun return iommu->cmd_buf ? 0 : -ENOMEM;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun * This function restarts event logging in case the IOMMU experienced
661*4882a593Smuzhiyun * an event log buffer overflow.
662*4882a593Smuzhiyun */
amd_iommu_restart_event_logging(struct amd_iommu * iommu)663*4882a593Smuzhiyun void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
666*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * This function resets the command buffer if the IOMMU stopped fetching
671*4882a593Smuzhiyun * commands from it.
672*4882a593Smuzhiyun */
amd_iommu_reset_cmd_buffer(struct amd_iommu * iommu)673*4882a593Smuzhiyun void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
678*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
679*4882a593Smuzhiyun iommu->cmd_buf_head = 0;
680*4882a593Smuzhiyun iommu->cmd_buf_tail = 0;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /*
686*4882a593Smuzhiyun * This function writes the command buffer address to the hardware and
687*4882a593Smuzhiyun * enables it.
688*4882a593Smuzhiyun */
iommu_enable_command_buffer(struct amd_iommu * iommu)689*4882a593Smuzhiyun static void iommu_enable_command_buffer(struct amd_iommu *iommu)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun u64 entry;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun BUG_ON(iommu->cmd_buf == NULL);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun entry = iommu_virt_to_phys(iommu->cmd_buf);
696*4882a593Smuzhiyun entry |= MMIO_CMD_SIZE_512;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
699*4882a593Smuzhiyun &entry, sizeof(entry));
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun amd_iommu_reset_cmd_buffer(iommu);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * This function disables the command buffer
706*4882a593Smuzhiyun */
iommu_disable_command_buffer(struct amd_iommu * iommu)707*4882a593Smuzhiyun static void iommu_disable_command_buffer(struct amd_iommu *iommu)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
free_command_buffer(struct amd_iommu * iommu)712*4882a593Smuzhiyun static void __init free_command_buffer(struct amd_iommu *iommu)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
iommu_alloc_4k_pages(struct amd_iommu * iommu,gfp_t gfp,size_t size)717*4882a593Smuzhiyun static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
718*4882a593Smuzhiyun gfp_t gfp, size_t size)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun int order = get_order(size);
721*4882a593Smuzhiyun void *buf = (void *)__get_free_pages(gfp, order);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (buf &&
724*4882a593Smuzhiyun iommu_feature(iommu, FEATURE_SNP) &&
725*4882a593Smuzhiyun set_memory_4k((unsigned long)buf, (1 << order))) {
726*4882a593Smuzhiyun free_pages((unsigned long)buf, order);
727*4882a593Smuzhiyun buf = NULL;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return buf;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* allocates the memory where the IOMMU will log its events to */
alloc_event_buffer(struct amd_iommu * iommu)734*4882a593Smuzhiyun static int __init alloc_event_buffer(struct amd_iommu *iommu)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
737*4882a593Smuzhiyun EVT_BUFFER_SIZE);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return iommu->evt_buf ? 0 : -ENOMEM;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
iommu_enable_event_buffer(struct amd_iommu * iommu)742*4882a593Smuzhiyun static void iommu_enable_event_buffer(struct amd_iommu *iommu)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun u64 entry;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun BUG_ON(iommu->evt_buf == NULL);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
751*4882a593Smuzhiyun &entry, sizeof(entry));
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /* set head and tail to zero manually */
754*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
755*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * This function disables the event log buffer
762*4882a593Smuzhiyun */
iommu_disable_event_buffer(struct amd_iommu * iommu)763*4882a593Smuzhiyun static void iommu_disable_event_buffer(struct amd_iommu *iommu)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
free_event_buffer(struct amd_iommu * iommu)768*4882a593Smuzhiyun static void __init free_event_buffer(struct amd_iommu *iommu)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* allocates the memory where the IOMMU will log its events to */
alloc_ppr_log(struct amd_iommu * iommu)774*4882a593Smuzhiyun static int __init alloc_ppr_log(struct amd_iommu *iommu)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
777*4882a593Smuzhiyun PPR_LOG_SIZE);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return iommu->ppr_log ? 0 : -ENOMEM;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
iommu_enable_ppr_log(struct amd_iommu * iommu)782*4882a593Smuzhiyun static void iommu_enable_ppr_log(struct amd_iommu *iommu)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun u64 entry;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (iommu->ppr_log == NULL)
787*4882a593Smuzhiyun return;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
792*4882a593Smuzhiyun &entry, sizeof(entry));
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* set head and tail to zero manually */
795*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
796*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
799*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_PPR_EN);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
free_ppr_log(struct amd_iommu * iommu)802*4882a593Smuzhiyun static void __init free_ppr_log(struct amd_iommu *iommu)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
free_ga_log(struct amd_iommu * iommu)807*4882a593Smuzhiyun static void free_ga_log(struct amd_iommu *iommu)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
810*4882a593Smuzhiyun free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
811*4882a593Smuzhiyun free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
812*4882a593Smuzhiyun #endif
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
iommu_ga_log_enable(struct amd_iommu * iommu)815*4882a593Smuzhiyun static int iommu_ga_log_enable(struct amd_iommu *iommu)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
818*4882a593Smuzhiyun u32 status, i;
819*4882a593Smuzhiyun u64 entry;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (!iommu->ga_log)
822*4882a593Smuzhiyun return -EINVAL;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* Check if already running */
825*4882a593Smuzhiyun status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
826*4882a593Smuzhiyun if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
830*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
831*4882a593Smuzhiyun &entry, sizeof(entry));
832*4882a593Smuzhiyun entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
833*4882a593Smuzhiyun (BIT_ULL(52)-1)) & ~7ULL;
834*4882a593Smuzhiyun memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
835*4882a593Smuzhiyun &entry, sizeof(entry));
836*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
837*4882a593Smuzhiyun writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_GAINT_EN);
841*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_GALOG_EN);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun for (i = 0; i < LOOP_TIMEOUT; ++i) {
844*4882a593Smuzhiyun status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
845*4882a593Smuzhiyun if (status & (MMIO_STATUS_GALOG_RUN_MASK))
846*4882a593Smuzhiyun break;
847*4882a593Smuzhiyun udelay(10);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (WARN_ON(i >= LOOP_TIMEOUT))
851*4882a593Smuzhiyun return -EINVAL;
852*4882a593Smuzhiyun #endif /* CONFIG_IRQ_REMAP */
853*4882a593Smuzhiyun return 0;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
iommu_init_ga_log(struct amd_iommu * iommu)856*4882a593Smuzhiyun static int iommu_init_ga_log(struct amd_iommu *iommu)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
859*4882a593Smuzhiyun if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
860*4882a593Smuzhiyun return 0;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
863*4882a593Smuzhiyun get_order(GA_LOG_SIZE));
864*4882a593Smuzhiyun if (!iommu->ga_log)
865*4882a593Smuzhiyun goto err_out;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
868*4882a593Smuzhiyun get_order(8));
869*4882a593Smuzhiyun if (!iommu->ga_log_tail)
870*4882a593Smuzhiyun goto err_out;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun return 0;
873*4882a593Smuzhiyun err_out:
874*4882a593Smuzhiyun free_ga_log(iommu);
875*4882a593Smuzhiyun return -EINVAL;
876*4882a593Smuzhiyun #else
877*4882a593Smuzhiyun return 0;
878*4882a593Smuzhiyun #endif /* CONFIG_IRQ_REMAP */
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
alloc_cwwb_sem(struct amd_iommu * iommu)881*4882a593Smuzhiyun static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun return iommu->cmd_sem ? 0 : -ENOMEM;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
free_cwwb_sem(struct amd_iommu * iommu)888*4882a593Smuzhiyun static void __init free_cwwb_sem(struct amd_iommu *iommu)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun if (iommu->cmd_sem)
891*4882a593Smuzhiyun free_page((unsigned long)iommu->cmd_sem);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
iommu_enable_xt(struct amd_iommu * iommu)894*4882a593Smuzhiyun static void iommu_enable_xt(struct amd_iommu *iommu)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
897*4882a593Smuzhiyun /*
898*4882a593Smuzhiyun * XT mode (32-bit APIC destination ID) requires
899*4882a593Smuzhiyun * GA mode (128-bit IRTE support) as a prerequisite.
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
902*4882a593Smuzhiyun amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
903*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_XT_EN);
904*4882a593Smuzhiyun #endif /* CONFIG_IRQ_REMAP */
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
iommu_enable_gt(struct amd_iommu * iommu)907*4882a593Smuzhiyun static void iommu_enable_gt(struct amd_iommu *iommu)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun if (!iommu_feature(iommu, FEATURE_GT))
910*4882a593Smuzhiyun return;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_GT_EN);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /* sets a specific bit in the device table entry. */
set_dev_entry_bit(u16 devid,u8 bit)916*4882a593Smuzhiyun static void set_dev_entry_bit(u16 devid, u8 bit)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun int i = (bit >> 6) & 0x03;
919*4882a593Smuzhiyun int _bit = bit & 0x3f;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
get_dev_entry_bit(u16 devid,u8 bit)924*4882a593Smuzhiyun static int get_dev_entry_bit(u16 devid, u8 bit)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun int i = (bit >> 6) & 0x03;
927*4882a593Smuzhiyun int _bit = bit & 0x3f;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun
copy_device_table(void)933*4882a593Smuzhiyun static bool copy_device_table(void)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
936*4882a593Smuzhiyun struct dev_table_entry *old_devtb = NULL;
937*4882a593Smuzhiyun u32 lo, hi, devid, old_devtb_size;
938*4882a593Smuzhiyun phys_addr_t old_devtb_phys;
939*4882a593Smuzhiyun struct amd_iommu *iommu;
940*4882a593Smuzhiyun u16 dom_id, dte_v, irq_v;
941*4882a593Smuzhiyun gfp_t gfp_flag;
942*4882a593Smuzhiyun u64 tmp;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (!amd_iommu_pre_enabled)
945*4882a593Smuzhiyun return false;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun pr_warn("Translation is already enabled - trying to copy translation structures\n");
948*4882a593Smuzhiyun for_each_iommu(iommu) {
949*4882a593Smuzhiyun /* All IOMMUs should use the same device table with the same size */
950*4882a593Smuzhiyun lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
951*4882a593Smuzhiyun hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
952*4882a593Smuzhiyun entry = (((u64) hi) << 32) + lo;
953*4882a593Smuzhiyun if (last_entry && last_entry != entry) {
954*4882a593Smuzhiyun pr_err("IOMMU:%d should use the same dev table as others!\n",
955*4882a593Smuzhiyun iommu->index);
956*4882a593Smuzhiyun return false;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun last_entry = entry;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
961*4882a593Smuzhiyun if (old_devtb_size != dev_table_size) {
962*4882a593Smuzhiyun pr_err("The device table size of IOMMU:%d is not expected!\n",
963*4882a593Smuzhiyun iommu->index);
964*4882a593Smuzhiyun return false;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * When SME is enabled in the first kernel, the entry includes the
970*4882a593Smuzhiyun * memory encryption mask(sme_me_mask), we must remove the memory
971*4882a593Smuzhiyun * encryption mask to obtain the true physical address in kdump kernel.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyun old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (old_devtb_phys >= 0x100000000ULL) {
976*4882a593Smuzhiyun pr_err("The address of old device table is above 4G, not trustworthy!\n");
977*4882a593Smuzhiyun return false;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun old_devtb = (sme_active() && is_kdump_kernel())
980*4882a593Smuzhiyun ? (__force void *)ioremap_encrypted(old_devtb_phys,
981*4882a593Smuzhiyun dev_table_size)
982*4882a593Smuzhiyun : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (!old_devtb)
985*4882a593Smuzhiyun return false;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
988*4882a593Smuzhiyun old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
989*4882a593Smuzhiyun get_order(dev_table_size));
990*4882a593Smuzhiyun if (old_dev_tbl_cpy == NULL) {
991*4882a593Smuzhiyun pr_err("Failed to allocate memory for copying old device table!\n");
992*4882a593Smuzhiyun return false;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
996*4882a593Smuzhiyun old_dev_tbl_cpy[devid] = old_devtb[devid];
997*4882a593Smuzhiyun dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
998*4882a593Smuzhiyun dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (dte_v && dom_id) {
1001*4882a593Smuzhiyun old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1002*4882a593Smuzhiyun old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1003*4882a593Smuzhiyun __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1004*4882a593Smuzhiyun /* If gcr3 table existed, mask it out */
1005*4882a593Smuzhiyun if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1006*4882a593Smuzhiyun tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1007*4882a593Smuzhiyun tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1008*4882a593Smuzhiyun old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1009*4882a593Smuzhiyun tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1010*4882a593Smuzhiyun tmp |= DTE_FLAG_GV;
1011*4882a593Smuzhiyun old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1016*4882a593Smuzhiyun int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1017*4882a593Smuzhiyun int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
1018*4882a593Smuzhiyun if (irq_v && (int_ctl || int_tab_len)) {
1019*4882a593Smuzhiyun if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1020*4882a593Smuzhiyun (int_tab_len != DTE_IRQ_TABLE_LEN)) {
1021*4882a593Smuzhiyun pr_err("Wrong old irq remapping flag: %#x\n", devid);
1022*4882a593Smuzhiyun return false;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun memunmap(old_devtb);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun return true;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
amd_iommu_apply_erratum_63(u16 devid)1033*4882a593Smuzhiyun void amd_iommu_apply_erratum_63(u16 devid)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun int sysmgt;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1038*4882a593Smuzhiyun (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (sysmgt == 0x01)
1041*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_IW);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Writes the specific IOMMU for a device into the rlookup table */
set_iommu_for_device(struct amd_iommu * iommu,u16 devid)1045*4882a593Smuzhiyun static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun amd_iommu_rlookup_table[devid] = iommu;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /*
1051*4882a593Smuzhiyun * This function takes the device specific flags read from the ACPI
1052*4882a593Smuzhiyun * table and sets up the device table entry with that information
1053*4882a593Smuzhiyun */
set_dev_entry_from_acpi(struct amd_iommu * iommu,u16 devid,u32 flags,u32 ext_flags)1054*4882a593Smuzhiyun static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1055*4882a593Smuzhiyun u16 devid, u32 flags, u32 ext_flags)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_INITPASS)
1058*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1059*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_EXTINT)
1060*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1061*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_NMI)
1062*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1063*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_SYSMGT1)
1064*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1065*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_SYSMGT2)
1066*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1067*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_LINT0)
1068*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1069*4882a593Smuzhiyun if (flags & ACPI_DEVFLAG_LINT1)
1070*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun amd_iommu_apply_erratum_63(devid);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun set_iommu_for_device(iommu, devid);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
add_special_device(u8 type,u8 id,u16 * devid,bool cmd_line)1077*4882a593Smuzhiyun int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun struct devid_map *entry;
1080*4882a593Smuzhiyun struct list_head *list;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (type == IVHD_SPECIAL_IOAPIC)
1083*4882a593Smuzhiyun list = &ioapic_map;
1084*4882a593Smuzhiyun else if (type == IVHD_SPECIAL_HPET)
1085*4882a593Smuzhiyun list = &hpet_map;
1086*4882a593Smuzhiyun else
1087*4882a593Smuzhiyun return -EINVAL;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun list_for_each_entry(entry, list, list) {
1090*4882a593Smuzhiyun if (!(entry->id == id && entry->cmd_line))
1091*4882a593Smuzhiyun continue;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun pr_info("Command-line override present for %s id %d - ignoring\n",
1094*4882a593Smuzhiyun type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun *devid = entry->devid;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun return 0;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1102*4882a593Smuzhiyun if (!entry)
1103*4882a593Smuzhiyun return -ENOMEM;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun entry->id = id;
1106*4882a593Smuzhiyun entry->devid = *devid;
1107*4882a593Smuzhiyun entry->cmd_line = cmd_line;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun list_add_tail(&entry->list, list);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun return 0;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
add_acpi_hid_device(u8 * hid,u8 * uid,u16 * devid,bool cmd_line)1114*4882a593Smuzhiyun static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1115*4882a593Smuzhiyun bool cmd_line)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun struct acpihid_map_entry *entry;
1118*4882a593Smuzhiyun struct list_head *list = &acpihid_map;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun list_for_each_entry(entry, list, list) {
1121*4882a593Smuzhiyun if (strcmp(entry->hid, hid) ||
1122*4882a593Smuzhiyun (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1123*4882a593Smuzhiyun !entry->cmd_line)
1124*4882a593Smuzhiyun continue;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun pr_info("Command-line override for hid:%s uid:%s\n",
1127*4882a593Smuzhiyun hid, uid);
1128*4882a593Smuzhiyun *devid = entry->devid;
1129*4882a593Smuzhiyun return 0;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1133*4882a593Smuzhiyun if (!entry)
1134*4882a593Smuzhiyun return -ENOMEM;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun memcpy(entry->uid, uid, strlen(uid));
1137*4882a593Smuzhiyun memcpy(entry->hid, hid, strlen(hid));
1138*4882a593Smuzhiyun entry->devid = *devid;
1139*4882a593Smuzhiyun entry->cmd_line = cmd_line;
1140*4882a593Smuzhiyun entry->root_devid = (entry->devid & (~0x7));
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1143*4882a593Smuzhiyun entry->cmd_line ? "cmd" : "ivrs",
1144*4882a593Smuzhiyun entry->hid, entry->uid, entry->root_devid);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun list_add_tail(&entry->list, list);
1147*4882a593Smuzhiyun return 0;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
add_early_maps(void)1150*4882a593Smuzhiyun static int __init add_early_maps(void)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun int i, ret;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun for (i = 0; i < early_ioapic_map_size; ++i) {
1155*4882a593Smuzhiyun ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1156*4882a593Smuzhiyun early_ioapic_map[i].id,
1157*4882a593Smuzhiyun &early_ioapic_map[i].devid,
1158*4882a593Smuzhiyun early_ioapic_map[i].cmd_line);
1159*4882a593Smuzhiyun if (ret)
1160*4882a593Smuzhiyun return ret;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun for (i = 0; i < early_hpet_map_size; ++i) {
1164*4882a593Smuzhiyun ret = add_special_device(IVHD_SPECIAL_HPET,
1165*4882a593Smuzhiyun early_hpet_map[i].id,
1166*4882a593Smuzhiyun &early_hpet_map[i].devid,
1167*4882a593Smuzhiyun early_hpet_map[i].cmd_line);
1168*4882a593Smuzhiyun if (ret)
1169*4882a593Smuzhiyun return ret;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun for (i = 0; i < early_acpihid_map_size; ++i) {
1173*4882a593Smuzhiyun ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1174*4882a593Smuzhiyun early_acpihid_map[i].uid,
1175*4882a593Smuzhiyun &early_acpihid_map[i].devid,
1176*4882a593Smuzhiyun early_acpihid_map[i].cmd_line);
1177*4882a593Smuzhiyun if (ret)
1178*4882a593Smuzhiyun return ret;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return 0;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1186*4882a593Smuzhiyun * initializes the hardware and our data structures with it.
1187*4882a593Smuzhiyun */
init_iommu_from_acpi(struct amd_iommu * iommu,struct ivhd_header * h)1188*4882a593Smuzhiyun static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1189*4882a593Smuzhiyun struct ivhd_header *h)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun u8 *p = (u8 *)h;
1192*4882a593Smuzhiyun u8 *end = p, flags = 0;
1193*4882a593Smuzhiyun u16 devid = 0, devid_start = 0, devid_to = 0;
1194*4882a593Smuzhiyun u32 dev_i, ext_flags = 0;
1195*4882a593Smuzhiyun bool alias = false;
1196*4882a593Smuzhiyun struct ivhd_entry *e;
1197*4882a593Smuzhiyun u32 ivhd_size;
1198*4882a593Smuzhiyun int ret;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun ret = add_early_maps();
1202*4882a593Smuzhiyun if (ret)
1203*4882a593Smuzhiyun return ret;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun amd_iommu_apply_ivrs_quirks();
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun /*
1208*4882a593Smuzhiyun * First save the recommended feature enable bits from ACPI
1209*4882a593Smuzhiyun */
1210*4882a593Smuzhiyun iommu->acpi_flags = h->flags;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /*
1213*4882a593Smuzhiyun * Done. Now parse the device entries
1214*4882a593Smuzhiyun */
1215*4882a593Smuzhiyun ivhd_size = get_ivhd_header_size(h);
1216*4882a593Smuzhiyun if (!ivhd_size) {
1217*4882a593Smuzhiyun pr_err("Unsupported IVHD type %#x\n", h->type);
1218*4882a593Smuzhiyun return -EINVAL;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun p += ivhd_size;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun end += h->length;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun while (p < end) {
1227*4882a593Smuzhiyun e = (struct ivhd_entry *)p;
1228*4882a593Smuzhiyun switch (e->type) {
1229*4882a593Smuzhiyun case IVHD_DEV_ALL:
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1234*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1235*4882a593Smuzhiyun break;
1236*4882a593Smuzhiyun case IVHD_DEV_SELECT:
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1239*4882a593Smuzhiyun "flags: %02x\n",
1240*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1241*4882a593Smuzhiyun PCI_SLOT(e->devid),
1242*4882a593Smuzhiyun PCI_FUNC(e->devid),
1243*4882a593Smuzhiyun e->flags);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun devid = e->devid;
1246*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1247*4882a593Smuzhiyun break;
1248*4882a593Smuzhiyun case IVHD_DEV_SELECT_RANGE_START:
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun DUMP_printk(" DEV_SELECT_RANGE_START\t "
1251*4882a593Smuzhiyun "devid: %02x:%02x.%x flags: %02x\n",
1252*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1253*4882a593Smuzhiyun PCI_SLOT(e->devid),
1254*4882a593Smuzhiyun PCI_FUNC(e->devid),
1255*4882a593Smuzhiyun e->flags);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun devid_start = e->devid;
1258*4882a593Smuzhiyun flags = e->flags;
1259*4882a593Smuzhiyun ext_flags = 0;
1260*4882a593Smuzhiyun alias = false;
1261*4882a593Smuzhiyun break;
1262*4882a593Smuzhiyun case IVHD_DEV_ALIAS:
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1265*4882a593Smuzhiyun "flags: %02x devid_to: %02x:%02x.%x\n",
1266*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1267*4882a593Smuzhiyun PCI_SLOT(e->devid),
1268*4882a593Smuzhiyun PCI_FUNC(e->devid),
1269*4882a593Smuzhiyun e->flags,
1270*4882a593Smuzhiyun PCI_BUS_NUM(e->ext >> 8),
1271*4882a593Smuzhiyun PCI_SLOT(e->ext >> 8),
1272*4882a593Smuzhiyun PCI_FUNC(e->ext >> 8));
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun devid = e->devid;
1275*4882a593Smuzhiyun devid_to = e->ext >> 8;
1276*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1277*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1278*4882a593Smuzhiyun amd_iommu_alias_table[devid] = devid_to;
1279*4882a593Smuzhiyun break;
1280*4882a593Smuzhiyun case IVHD_DEV_ALIAS_RANGE:
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1283*4882a593Smuzhiyun "devid: %02x:%02x.%x flags: %02x "
1284*4882a593Smuzhiyun "devid_to: %02x:%02x.%x\n",
1285*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1286*4882a593Smuzhiyun PCI_SLOT(e->devid),
1287*4882a593Smuzhiyun PCI_FUNC(e->devid),
1288*4882a593Smuzhiyun e->flags,
1289*4882a593Smuzhiyun PCI_BUS_NUM(e->ext >> 8),
1290*4882a593Smuzhiyun PCI_SLOT(e->ext >> 8),
1291*4882a593Smuzhiyun PCI_FUNC(e->ext >> 8));
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun devid_start = e->devid;
1294*4882a593Smuzhiyun flags = e->flags;
1295*4882a593Smuzhiyun devid_to = e->ext >> 8;
1296*4882a593Smuzhiyun ext_flags = 0;
1297*4882a593Smuzhiyun alias = true;
1298*4882a593Smuzhiyun break;
1299*4882a593Smuzhiyun case IVHD_DEV_EXT_SELECT:
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1302*4882a593Smuzhiyun "flags: %02x ext: %08x\n",
1303*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1304*4882a593Smuzhiyun PCI_SLOT(e->devid),
1305*4882a593Smuzhiyun PCI_FUNC(e->devid),
1306*4882a593Smuzhiyun e->flags, e->ext);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun devid = e->devid;
1309*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid, e->flags,
1310*4882a593Smuzhiyun e->ext);
1311*4882a593Smuzhiyun break;
1312*4882a593Smuzhiyun case IVHD_DEV_EXT_SELECT_RANGE:
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1315*4882a593Smuzhiyun "%02x:%02x.%x flags: %02x ext: %08x\n",
1316*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1317*4882a593Smuzhiyun PCI_SLOT(e->devid),
1318*4882a593Smuzhiyun PCI_FUNC(e->devid),
1319*4882a593Smuzhiyun e->flags, e->ext);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun devid_start = e->devid;
1322*4882a593Smuzhiyun flags = e->flags;
1323*4882a593Smuzhiyun ext_flags = e->ext;
1324*4882a593Smuzhiyun alias = false;
1325*4882a593Smuzhiyun break;
1326*4882a593Smuzhiyun case IVHD_DEV_RANGE_END:
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1329*4882a593Smuzhiyun PCI_BUS_NUM(e->devid),
1330*4882a593Smuzhiyun PCI_SLOT(e->devid),
1331*4882a593Smuzhiyun PCI_FUNC(e->devid));
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun devid = e->devid;
1334*4882a593Smuzhiyun for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1335*4882a593Smuzhiyun if (alias) {
1336*4882a593Smuzhiyun amd_iommu_alias_table[dev_i] = devid_to;
1337*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu,
1338*4882a593Smuzhiyun devid_to, flags, ext_flags);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, dev_i,
1341*4882a593Smuzhiyun flags, ext_flags);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun break;
1344*4882a593Smuzhiyun case IVHD_DEV_SPECIAL: {
1345*4882a593Smuzhiyun u8 handle, type;
1346*4882a593Smuzhiyun const char *var;
1347*4882a593Smuzhiyun u16 devid;
1348*4882a593Smuzhiyun int ret;
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun handle = e->ext & 0xff;
1351*4882a593Smuzhiyun devid = (e->ext >> 8) & 0xffff;
1352*4882a593Smuzhiyun type = (e->ext >> 24) & 0xff;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (type == IVHD_SPECIAL_IOAPIC)
1355*4882a593Smuzhiyun var = "IOAPIC";
1356*4882a593Smuzhiyun else if (type == IVHD_SPECIAL_HPET)
1357*4882a593Smuzhiyun var = "HPET";
1358*4882a593Smuzhiyun else
1359*4882a593Smuzhiyun var = "UNKNOWN";
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1362*4882a593Smuzhiyun var, (int)handle,
1363*4882a593Smuzhiyun PCI_BUS_NUM(devid),
1364*4882a593Smuzhiyun PCI_SLOT(devid),
1365*4882a593Smuzhiyun PCI_FUNC(devid));
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun ret = add_special_device(type, handle, &devid, false);
1368*4882a593Smuzhiyun if (ret)
1369*4882a593Smuzhiyun return ret;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /*
1372*4882a593Smuzhiyun * add_special_device might update the devid in case a
1373*4882a593Smuzhiyun * command-line override is present. So call
1374*4882a593Smuzhiyun * set_dev_entry_from_acpi after add_special_device.
1375*4882a593Smuzhiyun */
1376*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun break;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun case IVHD_DEV_ACPI_HID: {
1381*4882a593Smuzhiyun u16 devid;
1382*4882a593Smuzhiyun u8 hid[ACPIHID_HID_LEN];
1383*4882a593Smuzhiyun u8 uid[ACPIHID_UID_LEN];
1384*4882a593Smuzhiyun int ret;
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun if (h->type != 0x40) {
1387*4882a593Smuzhiyun pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1388*4882a593Smuzhiyun e->type);
1389*4882a593Smuzhiyun break;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1393*4882a593Smuzhiyun hid[ACPIHID_HID_LEN - 1] = '\0';
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun if (!(*hid)) {
1396*4882a593Smuzhiyun pr_err(FW_BUG "Invalid HID.\n");
1397*4882a593Smuzhiyun break;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun uid[0] = '\0';
1401*4882a593Smuzhiyun switch (e->uidf) {
1402*4882a593Smuzhiyun case UID_NOT_PRESENT:
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (e->uidl != 0)
1405*4882a593Smuzhiyun pr_warn(FW_BUG "Invalid UID length.\n");
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun break;
1408*4882a593Smuzhiyun case UID_IS_INTEGER:
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun sprintf(uid, "%d", e->uid);
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun break;
1413*4882a593Smuzhiyun case UID_IS_CHARACTER:
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun memcpy(uid, &e->uid, e->uidl);
1416*4882a593Smuzhiyun uid[e->uidl] = '\0';
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun break;
1419*4882a593Smuzhiyun default:
1420*4882a593Smuzhiyun break;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun devid = e->devid;
1424*4882a593Smuzhiyun DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1425*4882a593Smuzhiyun hid, uid,
1426*4882a593Smuzhiyun PCI_BUS_NUM(devid),
1427*4882a593Smuzhiyun PCI_SLOT(devid),
1428*4882a593Smuzhiyun PCI_FUNC(devid));
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun flags = e->flags;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun ret = add_acpi_hid_device(hid, uid, &devid, false);
1433*4882a593Smuzhiyun if (ret)
1434*4882a593Smuzhiyun return ret;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /*
1437*4882a593Smuzhiyun * add_special_device might update the devid in case a
1438*4882a593Smuzhiyun * command-line override is present. So call
1439*4882a593Smuzhiyun * set_dev_entry_from_acpi after add_special_device.
1440*4882a593Smuzhiyun */
1441*4882a593Smuzhiyun set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun break;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun default:
1446*4882a593Smuzhiyun break;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun p += ivhd_entry_length(p);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun return 0;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
free_iommu_one(struct amd_iommu * iommu)1455*4882a593Smuzhiyun static void __init free_iommu_one(struct amd_iommu *iommu)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun free_cwwb_sem(iommu);
1458*4882a593Smuzhiyun free_command_buffer(iommu);
1459*4882a593Smuzhiyun free_event_buffer(iommu);
1460*4882a593Smuzhiyun free_ppr_log(iommu);
1461*4882a593Smuzhiyun free_ga_log(iommu);
1462*4882a593Smuzhiyun iommu_unmap_mmio_space(iommu);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
free_iommu_all(void)1465*4882a593Smuzhiyun static void __init free_iommu_all(void)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun struct amd_iommu *iommu, *next;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun for_each_iommu_safe(iommu, next) {
1470*4882a593Smuzhiyun list_del(&iommu->list);
1471*4882a593Smuzhiyun free_iommu_one(iommu);
1472*4882a593Smuzhiyun kfree(iommu);
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /*
1477*4882a593Smuzhiyun * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1478*4882a593Smuzhiyun * Workaround:
1479*4882a593Smuzhiyun * BIOS should disable L2B micellaneous clock gating by setting
1480*4882a593Smuzhiyun * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1481*4882a593Smuzhiyun */
amd_iommu_erratum_746_workaround(struct amd_iommu * iommu)1482*4882a593Smuzhiyun static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun u32 value;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun if ((boot_cpu_data.x86 != 0x15) ||
1487*4882a593Smuzhiyun (boot_cpu_data.x86_model < 0x10) ||
1488*4882a593Smuzhiyun (boot_cpu_data.x86_model > 0x1f))
1489*4882a593Smuzhiyun return;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1492*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, 0xf4, &value);
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (value & BIT(2))
1495*4882a593Smuzhiyun return;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun /* Select NB indirect register 0x90 and enable writing */
1498*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1501*4882a593Smuzhiyun pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /* Clear the enable writing bit */
1504*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun /*
1508*4882a593Smuzhiyun * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1509*4882a593Smuzhiyun * Workaround:
1510*4882a593Smuzhiyun * BIOS should enable ATS write permission check by setting
1511*4882a593Smuzhiyun * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1512*4882a593Smuzhiyun */
amd_iommu_ats_write_check_workaround(struct amd_iommu * iommu)1513*4882a593Smuzhiyun static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun u32 value;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if ((boot_cpu_data.x86 != 0x15) ||
1518*4882a593Smuzhiyun (boot_cpu_data.x86_model < 0x30) ||
1519*4882a593Smuzhiyun (boot_cpu_data.x86_model > 0x3f))
1520*4882a593Smuzhiyun return;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1523*4882a593Smuzhiyun value = iommu_read_l2(iommu, 0x47);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (value & BIT(0))
1526*4882a593Smuzhiyun return;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1529*4882a593Smuzhiyun iommu_write_l2(iommu, 0x47, value | BIT(0));
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun pci_info(iommu->dev, "Applying ATS write check workaround\n");
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun /*
1535*4882a593Smuzhiyun * This function clues the initialization function for one IOMMU
1536*4882a593Smuzhiyun * together and also allocates the command buffer and programs the
1537*4882a593Smuzhiyun * hardware. It does NOT enable the IOMMU. This is done afterwards.
1538*4882a593Smuzhiyun */
init_iommu_one(struct amd_iommu * iommu,struct ivhd_header * h)1539*4882a593Smuzhiyun static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun int ret;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun raw_spin_lock_init(&iommu->lock);
1544*4882a593Smuzhiyun iommu->cmd_sem_val = 0;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun /* Add IOMMU to internal data structures */
1547*4882a593Smuzhiyun list_add_tail(&iommu->list, &amd_iommu_list);
1548*4882a593Smuzhiyun iommu->index = amd_iommus_present++;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun if (unlikely(iommu->index >= MAX_IOMMUS)) {
1551*4882a593Smuzhiyun WARN(1, "System has more IOMMUs than supported by this driver\n");
1552*4882a593Smuzhiyun return -ENOSYS;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun /* Index is fine - add IOMMU to the array */
1556*4882a593Smuzhiyun amd_iommus[iommu->index] = iommu;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /*
1559*4882a593Smuzhiyun * Copy data from ACPI table entry to the iommu struct
1560*4882a593Smuzhiyun */
1561*4882a593Smuzhiyun iommu->devid = h->devid;
1562*4882a593Smuzhiyun iommu->cap_ptr = h->cap_ptr;
1563*4882a593Smuzhiyun iommu->pci_seg = h->pci_seg;
1564*4882a593Smuzhiyun iommu->mmio_phys = h->mmio_phys;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun switch (h->type) {
1567*4882a593Smuzhiyun case 0x10:
1568*4882a593Smuzhiyun /* Check if IVHD EFR contains proper max banks/counters */
1569*4882a593Smuzhiyun if ((h->efr_attr != 0) &&
1570*4882a593Smuzhiyun ((h->efr_attr & (0xF << 13)) != 0) &&
1571*4882a593Smuzhiyun ((h->efr_attr & (0x3F << 17)) != 0))
1572*4882a593Smuzhiyun iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1573*4882a593Smuzhiyun else
1574*4882a593Smuzhiyun iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /*
1577*4882a593Smuzhiyun * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1578*4882a593Smuzhiyun * GAM also requires GA mode. Therefore, we need to
1579*4882a593Smuzhiyun * check cmpxchg16b support before enabling it.
1580*4882a593Smuzhiyun */
1581*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_CX16) ||
1582*4882a593Smuzhiyun ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1583*4882a593Smuzhiyun amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1584*4882a593Smuzhiyun break;
1585*4882a593Smuzhiyun case 0x11:
1586*4882a593Smuzhiyun case 0x40:
1587*4882a593Smuzhiyun if (h->efr_reg & (1 << 9))
1588*4882a593Smuzhiyun iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1589*4882a593Smuzhiyun else
1590*4882a593Smuzhiyun iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /*
1593*4882a593Smuzhiyun * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1594*4882a593Smuzhiyun * XT, GAM also requires GA mode. Therefore, we need to
1595*4882a593Smuzhiyun * check cmpxchg16b support before enabling them.
1596*4882a593Smuzhiyun */
1597*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_CX16) ||
1598*4882a593Smuzhiyun ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1599*4882a593Smuzhiyun amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1600*4882a593Smuzhiyun break;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /*
1604*4882a593Smuzhiyun * Note: Since iommu_update_intcapxt() leverages
1605*4882a593Smuzhiyun * the IOMMU MMIO access to MSI capability block registers
1606*4882a593Smuzhiyun * for MSI address lo/hi/data, we need to check both
1607*4882a593Smuzhiyun * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1608*4882a593Smuzhiyun */
1609*4882a593Smuzhiyun if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1610*4882a593Smuzhiyun (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1611*4882a593Smuzhiyun amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun early_iommu_features_init(iommu, h);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun break;
1616*4882a593Smuzhiyun default:
1617*4882a593Smuzhiyun return -EINVAL;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1621*4882a593Smuzhiyun iommu->mmio_phys_end);
1622*4882a593Smuzhiyun if (!iommu->mmio_base)
1623*4882a593Smuzhiyun return -ENOMEM;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun if (alloc_cwwb_sem(iommu))
1626*4882a593Smuzhiyun return -ENOMEM;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun if (alloc_command_buffer(iommu))
1629*4882a593Smuzhiyun return -ENOMEM;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun if (alloc_event_buffer(iommu))
1632*4882a593Smuzhiyun return -ENOMEM;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun iommu->int_enabled = false;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun init_translation_status(iommu);
1637*4882a593Smuzhiyun if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1638*4882a593Smuzhiyun iommu_disable(iommu);
1639*4882a593Smuzhiyun clear_translation_pre_enabled(iommu);
1640*4882a593Smuzhiyun pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1641*4882a593Smuzhiyun iommu->index);
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun if (amd_iommu_pre_enabled)
1644*4882a593Smuzhiyun amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun ret = init_iommu_from_acpi(iommu, h);
1647*4882a593Smuzhiyun if (ret)
1648*4882a593Smuzhiyun return ret;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun ret = amd_iommu_create_irq_domain(iommu);
1651*4882a593Smuzhiyun if (ret)
1652*4882a593Smuzhiyun return ret;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun /*
1655*4882a593Smuzhiyun * Make sure IOMMU is not considered to translate itself. The IVRS
1656*4882a593Smuzhiyun * table tells us so, but this is a lie!
1657*4882a593Smuzhiyun */
1658*4882a593Smuzhiyun amd_iommu_rlookup_table[iommu->devid] = NULL;
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun return 0;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /**
1664*4882a593Smuzhiyun * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1665*4882a593Smuzhiyun * @ivrs: Pointer to the IVRS header
1666*4882a593Smuzhiyun *
1667*4882a593Smuzhiyun * This function search through all IVDB of the maximum supported IVHD
1668*4882a593Smuzhiyun */
get_highest_supported_ivhd_type(struct acpi_table_header * ivrs)1669*4882a593Smuzhiyun static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun u8 *base = (u8 *)ivrs;
1672*4882a593Smuzhiyun struct ivhd_header *ivhd = (struct ivhd_header *)
1673*4882a593Smuzhiyun (base + IVRS_HEADER_LENGTH);
1674*4882a593Smuzhiyun u8 last_type = ivhd->type;
1675*4882a593Smuzhiyun u16 devid = ivhd->devid;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun while (((u8 *)ivhd - base < ivrs->length) &&
1678*4882a593Smuzhiyun (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1679*4882a593Smuzhiyun u8 *p = (u8 *) ivhd;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun if (ivhd->devid == devid)
1682*4882a593Smuzhiyun last_type = ivhd->type;
1683*4882a593Smuzhiyun ivhd = (struct ivhd_header *)(p + ivhd->length);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun return last_type;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun /*
1690*4882a593Smuzhiyun * Iterates over all IOMMU entries in the ACPI table, allocates the
1691*4882a593Smuzhiyun * IOMMU structure and initializes it with init_iommu_one()
1692*4882a593Smuzhiyun */
init_iommu_all(struct acpi_table_header * table)1693*4882a593Smuzhiyun static int __init init_iommu_all(struct acpi_table_header *table)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun u8 *p = (u8 *)table, *end = (u8 *)table;
1696*4882a593Smuzhiyun struct ivhd_header *h;
1697*4882a593Smuzhiyun struct amd_iommu *iommu;
1698*4882a593Smuzhiyun int ret;
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun end += table->length;
1701*4882a593Smuzhiyun p += IVRS_HEADER_LENGTH;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun while (p < end) {
1704*4882a593Smuzhiyun h = (struct ivhd_header *)p;
1705*4882a593Smuzhiyun if (*p == amd_iommu_target_ivhd_type) {
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1708*4882a593Smuzhiyun "seg: %d flags: %01x info %04x\n",
1709*4882a593Smuzhiyun PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1710*4882a593Smuzhiyun PCI_FUNC(h->devid), h->cap_ptr,
1711*4882a593Smuzhiyun h->pci_seg, h->flags, h->info);
1712*4882a593Smuzhiyun DUMP_printk(" mmio-addr: %016llx\n",
1713*4882a593Smuzhiyun h->mmio_phys);
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1716*4882a593Smuzhiyun if (iommu == NULL)
1717*4882a593Smuzhiyun return -ENOMEM;
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun ret = init_iommu_one(iommu, h);
1720*4882a593Smuzhiyun if (ret)
1721*4882a593Smuzhiyun return ret;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun p += h->length;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun WARN_ON(p != end);
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun return 0;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun
init_iommu_perf_ctr(struct amd_iommu * iommu)1731*4882a593Smuzhiyun static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun u64 val;
1734*4882a593Smuzhiyun struct pci_dev *pdev = iommu->dev;
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun if (!iommu_feature(iommu, FEATURE_PC))
1737*4882a593Smuzhiyun return;
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun amd_iommu_pc_present = true;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun pci_info(pdev, "IOMMU performance counters supported\n");
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1744*4882a593Smuzhiyun iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1745*4882a593Smuzhiyun iommu->max_counters = (u8) ((val >> 7) & 0xf);
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun return;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
amd_iommu_show_cap(struct device * dev,struct device_attribute * attr,char * buf)1750*4882a593Smuzhiyun static ssize_t amd_iommu_show_cap(struct device *dev,
1751*4882a593Smuzhiyun struct device_attribute *attr,
1752*4882a593Smuzhiyun char *buf)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1755*4882a593Smuzhiyun return sprintf(buf, "%x\n", iommu->cap);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1758*4882a593Smuzhiyun
amd_iommu_show_features(struct device * dev,struct device_attribute * attr,char * buf)1759*4882a593Smuzhiyun static ssize_t amd_iommu_show_features(struct device *dev,
1760*4882a593Smuzhiyun struct device_attribute *attr,
1761*4882a593Smuzhiyun char *buf)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1764*4882a593Smuzhiyun return sprintf(buf, "%llx\n", iommu->features);
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun static struct attribute *amd_iommu_attrs[] = {
1769*4882a593Smuzhiyun &dev_attr_cap.attr,
1770*4882a593Smuzhiyun &dev_attr_features.attr,
1771*4882a593Smuzhiyun NULL,
1772*4882a593Smuzhiyun };
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun static struct attribute_group amd_iommu_group = {
1775*4882a593Smuzhiyun .name = "amd-iommu",
1776*4882a593Smuzhiyun .attrs = amd_iommu_attrs,
1777*4882a593Smuzhiyun };
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun static const struct attribute_group *amd_iommu_groups[] = {
1780*4882a593Smuzhiyun &amd_iommu_group,
1781*4882a593Smuzhiyun NULL,
1782*4882a593Smuzhiyun };
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * Note: IVHD 0x11 and 0x40 also contains exact copy
1786*4882a593Smuzhiyun * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1787*4882a593Smuzhiyun * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1788*4882a593Smuzhiyun */
late_iommu_features_init(struct amd_iommu * iommu)1789*4882a593Smuzhiyun static void __init late_iommu_features_init(struct amd_iommu *iommu)
1790*4882a593Smuzhiyun {
1791*4882a593Smuzhiyun u64 features;
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1794*4882a593Smuzhiyun return;
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun /* read extended feature bits */
1797*4882a593Smuzhiyun features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun if (!iommu->features) {
1800*4882a593Smuzhiyun iommu->features = features;
1801*4882a593Smuzhiyun return;
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun /*
1805*4882a593Smuzhiyun * Sanity check and warn if EFR values from
1806*4882a593Smuzhiyun * IVHD and MMIO conflict.
1807*4882a593Smuzhiyun */
1808*4882a593Smuzhiyun if (features != iommu->features)
1809*4882a593Smuzhiyun pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
1810*4882a593Smuzhiyun features, iommu->features);
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun
iommu_init_pci(struct amd_iommu * iommu)1813*4882a593Smuzhiyun static int __init iommu_init_pci(struct amd_iommu *iommu)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun int cap_ptr = iommu->cap_ptr;
1816*4882a593Smuzhiyun int ret;
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1819*4882a593Smuzhiyun iommu->devid & 0xff);
1820*4882a593Smuzhiyun if (!iommu->dev)
1821*4882a593Smuzhiyun return -ENODEV;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun /* Prevent binding other PCI device drivers to IOMMU devices */
1824*4882a593Smuzhiyun iommu->dev->match_driver = false;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1827*4882a593Smuzhiyun &iommu->cap);
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1830*4882a593Smuzhiyun amd_iommu_iotlb_sup = false;
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun late_iommu_features_init(iommu);
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun if (iommu_feature(iommu, FEATURE_GT)) {
1835*4882a593Smuzhiyun int glxval;
1836*4882a593Smuzhiyun u32 max_pasid;
1837*4882a593Smuzhiyun u64 pasmax;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun pasmax = iommu->features & FEATURE_PASID_MASK;
1840*4882a593Smuzhiyun pasmax >>= FEATURE_PASID_SHIFT;
1841*4882a593Smuzhiyun max_pasid = (1 << (pasmax + 1)) - 1;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun glxval = iommu->features & FEATURE_GLXVAL_MASK;
1848*4882a593Smuzhiyun glxval >>= FEATURE_GLXVAL_SHIFT;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun if (amd_iommu_max_glx_val == -1)
1851*4882a593Smuzhiyun amd_iommu_max_glx_val = glxval;
1852*4882a593Smuzhiyun else
1853*4882a593Smuzhiyun amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun if (iommu_feature(iommu, FEATURE_GT) &&
1857*4882a593Smuzhiyun iommu_feature(iommu, FEATURE_PPR)) {
1858*4882a593Smuzhiyun iommu->is_iommu_v2 = true;
1859*4882a593Smuzhiyun amd_iommu_v2_present = true;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1863*4882a593Smuzhiyun return -ENOMEM;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun ret = iommu_init_ga_log(iommu);
1866*4882a593Smuzhiyun if (ret)
1867*4882a593Smuzhiyun return ret;
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1870*4882a593Smuzhiyun amd_iommu_np_cache = true;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun init_iommu_perf_ctr(iommu);
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun if (is_rd890_iommu(iommu->dev)) {
1875*4882a593Smuzhiyun int i, j;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun iommu->root_pdev =
1878*4882a593Smuzhiyun pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1879*4882a593Smuzhiyun PCI_DEVFN(0, 0));
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /*
1882*4882a593Smuzhiyun * Some rd890 systems may not be fully reconfigured by the
1883*4882a593Smuzhiyun * BIOS, so it's necessary for us to store this information so
1884*4882a593Smuzhiyun * it can be reprogrammed on resume
1885*4882a593Smuzhiyun */
1886*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1887*4882a593Smuzhiyun &iommu->stored_addr_lo);
1888*4882a593Smuzhiyun pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1889*4882a593Smuzhiyun &iommu->stored_addr_hi);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun /* Low bit locks writes to configuration space */
1892*4882a593Smuzhiyun iommu->stored_addr_lo &= ~1;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun for (i = 0; i < 6; i++)
1895*4882a593Smuzhiyun for (j = 0; j < 0x12; j++)
1896*4882a593Smuzhiyun iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun for (i = 0; i < 0x83; i++)
1899*4882a593Smuzhiyun iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun amd_iommu_erratum_746_workaround(iommu);
1903*4882a593Smuzhiyun amd_iommu_ats_write_check_workaround(iommu);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1906*4882a593Smuzhiyun amd_iommu_groups, "ivhd%d", iommu->index);
1907*4882a593Smuzhiyun iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1908*4882a593Smuzhiyun iommu_device_register(&iommu->iommu);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun return pci_enable_device(iommu->dev);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
print_iommu_info(void)1913*4882a593Smuzhiyun static void print_iommu_info(void)
1914*4882a593Smuzhiyun {
1915*4882a593Smuzhiyun static const char * const feat_str[] = {
1916*4882a593Smuzhiyun "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1917*4882a593Smuzhiyun "IA", "GA", "HE", "PC"
1918*4882a593Smuzhiyun };
1919*4882a593Smuzhiyun struct amd_iommu *iommu;
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun for_each_iommu(iommu) {
1922*4882a593Smuzhiyun struct pci_dev *pdev = iommu->dev;
1923*4882a593Smuzhiyun int i;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1928*4882a593Smuzhiyun pr_info("Extended features (%#llx):", iommu->features);
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1931*4882a593Smuzhiyun if (iommu_feature(iommu, (1ULL << i)))
1932*4882a593Smuzhiyun pr_cont(" %s", feat_str[i]);
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun if (iommu->features & FEATURE_GAM_VAPIC)
1936*4882a593Smuzhiyun pr_cont(" GA_vAPIC");
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun pr_cont("\n");
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun if (irq_remapping_enabled) {
1942*4882a593Smuzhiyun pr_info("Interrupt remapping enabled\n");
1943*4882a593Smuzhiyun if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1944*4882a593Smuzhiyun pr_info("Virtual APIC enabled\n");
1945*4882a593Smuzhiyun if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1946*4882a593Smuzhiyun pr_info("X2APIC enabled\n");
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
amd_iommu_init_pci(void)1950*4882a593Smuzhiyun static int __init amd_iommu_init_pci(void)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun struct amd_iommu *iommu;
1953*4882a593Smuzhiyun int ret = 0;
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun for_each_iommu(iommu) {
1956*4882a593Smuzhiyun ret = iommu_init_pci(iommu);
1957*4882a593Smuzhiyun if (ret)
1958*4882a593Smuzhiyun break;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun /* Need to setup range after PCI init */
1961*4882a593Smuzhiyun iommu_set_cwwb_range(iommu);
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun /*
1965*4882a593Smuzhiyun * Order is important here to make sure any unity map requirements are
1966*4882a593Smuzhiyun * fulfilled. The unity mappings are created and written to the device
1967*4882a593Smuzhiyun * table during the amd_iommu_init_api() call.
1968*4882a593Smuzhiyun *
1969*4882a593Smuzhiyun * After that we call init_device_table_dma() to make sure any
1970*4882a593Smuzhiyun * uninitialized DTE will block DMA, and in the end we flush the caches
1971*4882a593Smuzhiyun * of all IOMMUs to make sure the changes to the device table are
1972*4882a593Smuzhiyun * active.
1973*4882a593Smuzhiyun */
1974*4882a593Smuzhiyun ret = amd_iommu_init_api();
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun init_device_table_dma();
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun for_each_iommu(iommu)
1979*4882a593Smuzhiyun iommu_flush_all_caches(iommu);
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun if (!ret)
1982*4882a593Smuzhiyun print_iommu_info();
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun return ret;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun /****************************************************************************
1988*4882a593Smuzhiyun *
1989*4882a593Smuzhiyun * The following functions initialize the MSI interrupts for all IOMMUs
1990*4882a593Smuzhiyun * in the system. It's a bit challenging because there could be multiple
1991*4882a593Smuzhiyun * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1992*4882a593Smuzhiyun * pci_dev.
1993*4882a593Smuzhiyun *
1994*4882a593Smuzhiyun ****************************************************************************/
1995*4882a593Smuzhiyun
iommu_setup_msi(struct amd_iommu * iommu)1996*4882a593Smuzhiyun static int iommu_setup_msi(struct amd_iommu *iommu)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun int r;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun r = pci_enable_msi(iommu->dev);
2001*4882a593Smuzhiyun if (r)
2002*4882a593Smuzhiyun return r;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun r = request_threaded_irq(iommu->dev->irq,
2005*4882a593Smuzhiyun amd_iommu_int_handler,
2006*4882a593Smuzhiyun amd_iommu_int_thread,
2007*4882a593Smuzhiyun 0, "AMD-Vi",
2008*4882a593Smuzhiyun iommu);
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (r) {
2011*4882a593Smuzhiyun pci_disable_msi(iommu->dev);
2012*4882a593Smuzhiyun return r;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun iommu->int_enabled = true;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun return 0;
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
2021*4882a593Smuzhiyun #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
2022*4882a593Smuzhiyun #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
2023*4882a593Smuzhiyun #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun /*
2026*4882a593Smuzhiyun * Setup the IntCapXT registers with interrupt routing information
2027*4882a593Smuzhiyun * based on the PCI MSI capability block registers, accessed via
2028*4882a593Smuzhiyun * MMIO MSI address low/hi and MSI data registers.
2029*4882a593Smuzhiyun */
iommu_update_intcapxt(struct amd_iommu * iommu)2030*4882a593Smuzhiyun static void iommu_update_intcapxt(struct amd_iommu *iommu)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun u64 val;
2033*4882a593Smuzhiyun u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
2034*4882a593Smuzhiyun u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
2035*4882a593Smuzhiyun u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
2036*4882a593Smuzhiyun bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
2037*4882a593Smuzhiyun u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun if (x2apic_enabled())
2040*4882a593Smuzhiyun dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun val = XT_INT_VEC(data & 0xFF) |
2043*4882a593Smuzhiyun XT_INT_DEST_MODE(dm) |
2044*4882a593Smuzhiyun XT_INT_DEST_LO(dest) |
2045*4882a593Smuzhiyun XT_INT_DEST_HI(dest);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun /**
2048*4882a593Smuzhiyun * Current IOMMU implemtation uses the same IRQ for all
2049*4882a593Smuzhiyun * 3 IOMMU interrupts.
2050*4882a593Smuzhiyun */
2051*4882a593Smuzhiyun writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2052*4882a593Smuzhiyun writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2053*4882a593Smuzhiyun writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun
_irq_notifier_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2056*4882a593Smuzhiyun static void _irq_notifier_notify(struct irq_affinity_notify *notify,
2057*4882a593Smuzhiyun const cpumask_t *mask)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun struct amd_iommu *iommu;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun for_each_iommu(iommu) {
2062*4882a593Smuzhiyun if (iommu->dev->irq == notify->irq) {
2063*4882a593Smuzhiyun iommu_update_intcapxt(iommu);
2064*4882a593Smuzhiyun break;
2065*4882a593Smuzhiyun }
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
_irq_notifier_release(struct kref * ref)2069*4882a593Smuzhiyun static void _irq_notifier_release(struct kref *ref)
2070*4882a593Smuzhiyun {
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
iommu_init_intcapxt(struct amd_iommu * iommu)2073*4882a593Smuzhiyun static int iommu_init_intcapxt(struct amd_iommu *iommu)
2074*4882a593Smuzhiyun {
2075*4882a593Smuzhiyun int ret;
2076*4882a593Smuzhiyun struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun /**
2079*4882a593Smuzhiyun * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
2080*4882a593Smuzhiyun * which can be inferred from amd_iommu_xt_mode.
2081*4882a593Smuzhiyun */
2082*4882a593Smuzhiyun if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
2083*4882a593Smuzhiyun return 0;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun /**
2086*4882a593Smuzhiyun * Also, we need to setup notifier to update the IntCapXT registers
2087*4882a593Smuzhiyun * whenever the irq affinity is changed from user-space.
2088*4882a593Smuzhiyun */
2089*4882a593Smuzhiyun notify->irq = iommu->dev->irq;
2090*4882a593Smuzhiyun notify->notify = _irq_notifier_notify,
2091*4882a593Smuzhiyun notify->release = _irq_notifier_release,
2092*4882a593Smuzhiyun ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2093*4882a593Smuzhiyun if (ret) {
2094*4882a593Smuzhiyun pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2095*4882a593Smuzhiyun iommu->devid, iommu->dev->irq);
2096*4882a593Smuzhiyun return ret;
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun iommu_update_intcapxt(iommu);
2100*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2101*4882a593Smuzhiyun return ret;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun
iommu_init_msi(struct amd_iommu * iommu)2104*4882a593Smuzhiyun static int iommu_init_msi(struct amd_iommu *iommu)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun int ret;
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun if (iommu->int_enabled)
2109*4882a593Smuzhiyun goto enable_faults;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if (iommu->dev->msi_cap)
2112*4882a593Smuzhiyun ret = iommu_setup_msi(iommu);
2113*4882a593Smuzhiyun else
2114*4882a593Smuzhiyun ret = -ENODEV;
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun if (ret)
2117*4882a593Smuzhiyun return ret;
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun enable_faults:
2120*4882a593Smuzhiyun ret = iommu_init_intcapxt(iommu);
2121*4882a593Smuzhiyun if (ret)
2122*4882a593Smuzhiyun return ret;
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun if (iommu->ppr_log != NULL)
2127*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun iommu_ga_log_enable(iommu);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun /****************************************************************************
2135*4882a593Smuzhiyun *
2136*4882a593Smuzhiyun * The next functions belong to the third pass of parsing the ACPI
2137*4882a593Smuzhiyun * table. In this last pass the memory mapping requirements are
2138*4882a593Smuzhiyun * gathered (like exclusion and unity mapping ranges).
2139*4882a593Smuzhiyun *
2140*4882a593Smuzhiyun ****************************************************************************/
2141*4882a593Smuzhiyun
free_unity_maps(void)2142*4882a593Smuzhiyun static void __init free_unity_maps(void)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun struct unity_map_entry *entry, *next;
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2147*4882a593Smuzhiyun list_del(&entry->list);
2148*4882a593Smuzhiyun kfree(entry);
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun /* called for unity map ACPI definition */
init_unity_map_range(struct ivmd_header * m)2153*4882a593Smuzhiyun static int __init init_unity_map_range(struct ivmd_header *m)
2154*4882a593Smuzhiyun {
2155*4882a593Smuzhiyun struct unity_map_entry *e = NULL;
2156*4882a593Smuzhiyun char *s;
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun e = kzalloc(sizeof(*e), GFP_KERNEL);
2159*4882a593Smuzhiyun if (e == NULL)
2160*4882a593Smuzhiyun return -ENOMEM;
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun switch (m->type) {
2163*4882a593Smuzhiyun default:
2164*4882a593Smuzhiyun kfree(e);
2165*4882a593Smuzhiyun return 0;
2166*4882a593Smuzhiyun case ACPI_IVMD_TYPE:
2167*4882a593Smuzhiyun s = "IVMD_TYPEi\t\t\t";
2168*4882a593Smuzhiyun e->devid_start = e->devid_end = m->devid;
2169*4882a593Smuzhiyun break;
2170*4882a593Smuzhiyun case ACPI_IVMD_TYPE_ALL:
2171*4882a593Smuzhiyun s = "IVMD_TYPE_ALL\t\t";
2172*4882a593Smuzhiyun e->devid_start = 0;
2173*4882a593Smuzhiyun e->devid_end = amd_iommu_last_bdf;
2174*4882a593Smuzhiyun break;
2175*4882a593Smuzhiyun case ACPI_IVMD_TYPE_RANGE:
2176*4882a593Smuzhiyun s = "IVMD_TYPE_RANGE\t\t";
2177*4882a593Smuzhiyun e->devid_start = m->devid;
2178*4882a593Smuzhiyun e->devid_end = m->aux;
2179*4882a593Smuzhiyun break;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun e->address_start = PAGE_ALIGN(m->range_start);
2182*4882a593Smuzhiyun e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2183*4882a593Smuzhiyun e->prot = m->flags >> 1;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun /*
2186*4882a593Smuzhiyun * Treat per-device exclusion ranges as r/w unity-mapped regions
2187*4882a593Smuzhiyun * since some buggy BIOSes might lead to the overwritten exclusion
2188*4882a593Smuzhiyun * range (exclusion_start and exclusion_length members). This
2189*4882a593Smuzhiyun * happens when there are multiple exclusion ranges (IVMD entries)
2190*4882a593Smuzhiyun * defined in ACPI table.
2191*4882a593Smuzhiyun */
2192*4882a593Smuzhiyun if (m->flags & IVMD_FLAG_EXCL_RANGE)
2193*4882a593Smuzhiyun e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2196*4882a593Smuzhiyun " range_start: %016llx range_end: %016llx flags: %x\n", s,
2197*4882a593Smuzhiyun PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2198*4882a593Smuzhiyun PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2199*4882a593Smuzhiyun PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2200*4882a593Smuzhiyun e->address_start, e->address_end, m->flags);
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun list_add_tail(&e->list, &amd_iommu_unity_map);
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun return 0;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun /* iterates over all memory definitions we find in the ACPI table */
init_memory_definitions(struct acpi_table_header * table)2208*4882a593Smuzhiyun static int __init init_memory_definitions(struct acpi_table_header *table)
2209*4882a593Smuzhiyun {
2210*4882a593Smuzhiyun u8 *p = (u8 *)table, *end = (u8 *)table;
2211*4882a593Smuzhiyun struct ivmd_header *m;
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun end += table->length;
2214*4882a593Smuzhiyun p += IVRS_HEADER_LENGTH;
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun while (p < end) {
2217*4882a593Smuzhiyun m = (struct ivmd_header *)p;
2218*4882a593Smuzhiyun if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2219*4882a593Smuzhiyun init_unity_map_range(m);
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun p += m->length;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun return 0;
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun /*
2228*4882a593Smuzhiyun * Init the device table to not allow DMA access for devices
2229*4882a593Smuzhiyun */
init_device_table_dma(void)2230*4882a593Smuzhiyun static void init_device_table_dma(void)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun u32 devid;
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2235*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2236*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun
uninit_device_table_dma(void)2240*4882a593Smuzhiyun static void __init uninit_device_table_dma(void)
2241*4882a593Smuzhiyun {
2242*4882a593Smuzhiyun u32 devid;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2245*4882a593Smuzhiyun amd_iommu_dev_table[devid].data[0] = 0ULL;
2246*4882a593Smuzhiyun amd_iommu_dev_table[devid].data[1] = 0ULL;
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun
init_device_table(void)2250*4882a593Smuzhiyun static void init_device_table(void)
2251*4882a593Smuzhiyun {
2252*4882a593Smuzhiyun u32 devid;
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun if (!amd_iommu_irq_remap)
2255*4882a593Smuzhiyun return;
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2258*4882a593Smuzhiyun set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun
iommu_init_flags(struct amd_iommu * iommu)2261*4882a593Smuzhiyun static void iommu_init_flags(struct amd_iommu *iommu)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2264*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2265*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2268*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2269*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2272*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2273*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2276*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2277*4882a593Smuzhiyun iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun /*
2280*4882a593Smuzhiyun * make IOMMU memory accesses cache coherent
2281*4882a593Smuzhiyun */
2282*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2283*4882a593Smuzhiyun
2284*4882a593Smuzhiyun /* Set IOTLB invalidation timeout to 1s */
2285*4882a593Smuzhiyun iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun
iommu_apply_resume_quirks(struct amd_iommu * iommu)2288*4882a593Smuzhiyun static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2289*4882a593Smuzhiyun {
2290*4882a593Smuzhiyun int i, j;
2291*4882a593Smuzhiyun u32 ioc_feature_control;
2292*4882a593Smuzhiyun struct pci_dev *pdev = iommu->root_pdev;
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun /* RD890 BIOSes may not have completely reconfigured the iommu */
2295*4882a593Smuzhiyun if (!is_rd890_iommu(iommu->dev) || !pdev)
2296*4882a593Smuzhiyun return;
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun /*
2299*4882a593Smuzhiyun * First, we need to ensure that the iommu is enabled. This is
2300*4882a593Smuzhiyun * controlled by a register in the northbridge
2301*4882a593Smuzhiyun */
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun /* Select Northbridge indirect register 0x75 and enable writing */
2304*4882a593Smuzhiyun pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2305*4882a593Smuzhiyun pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun /* Enable the iommu */
2308*4882a593Smuzhiyun if (!(ioc_feature_control & 0x1))
2309*4882a593Smuzhiyun pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun /* Restore the iommu BAR */
2312*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2313*4882a593Smuzhiyun iommu->stored_addr_lo);
2314*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2315*4882a593Smuzhiyun iommu->stored_addr_hi);
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun /* Restore the l1 indirect regs for each of the 6 l1s */
2318*4882a593Smuzhiyun for (i = 0; i < 6; i++)
2319*4882a593Smuzhiyun for (j = 0; j < 0x12; j++)
2320*4882a593Smuzhiyun iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun /* Restore the l2 indirect regs */
2323*4882a593Smuzhiyun for (i = 0; i < 0x83; i++)
2324*4882a593Smuzhiyun iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun /* Lock PCI setup registers */
2327*4882a593Smuzhiyun pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2328*4882a593Smuzhiyun iommu->stored_addr_lo | 1);
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun
iommu_enable_ga(struct amd_iommu * iommu)2331*4882a593Smuzhiyun static void iommu_enable_ga(struct amd_iommu *iommu)
2332*4882a593Smuzhiyun {
2333*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
2334*4882a593Smuzhiyun switch (amd_iommu_guest_ir) {
2335*4882a593Smuzhiyun case AMD_IOMMU_GUEST_IR_VAPIC:
2336*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_GAM_EN);
2337*4882a593Smuzhiyun fallthrough;
2338*4882a593Smuzhiyun case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2339*4882a593Smuzhiyun iommu_feature_enable(iommu, CONTROL_GA_EN);
2340*4882a593Smuzhiyun iommu->irte_ops = &irte_128_ops;
2341*4882a593Smuzhiyun break;
2342*4882a593Smuzhiyun default:
2343*4882a593Smuzhiyun iommu->irte_ops = &irte_32_ops;
2344*4882a593Smuzhiyun break;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun #endif
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun
early_enable_iommu(struct amd_iommu * iommu)2349*4882a593Smuzhiyun static void early_enable_iommu(struct amd_iommu *iommu)
2350*4882a593Smuzhiyun {
2351*4882a593Smuzhiyun iommu_disable(iommu);
2352*4882a593Smuzhiyun iommu_init_flags(iommu);
2353*4882a593Smuzhiyun iommu_set_device_table(iommu);
2354*4882a593Smuzhiyun iommu_enable_command_buffer(iommu);
2355*4882a593Smuzhiyun iommu_enable_event_buffer(iommu);
2356*4882a593Smuzhiyun iommu_set_exclusion_range(iommu);
2357*4882a593Smuzhiyun iommu_enable_ga(iommu);
2358*4882a593Smuzhiyun iommu_enable_xt(iommu);
2359*4882a593Smuzhiyun iommu_enable(iommu);
2360*4882a593Smuzhiyun iommu_flush_all_caches(iommu);
2361*4882a593Smuzhiyun }
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun /*
2364*4882a593Smuzhiyun * This function finally enables all IOMMUs found in the system after
2365*4882a593Smuzhiyun * they have been initialized.
2366*4882a593Smuzhiyun *
2367*4882a593Smuzhiyun * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2368*4882a593Smuzhiyun * the old content of device table entries. Not this case or copy failed,
2369*4882a593Smuzhiyun * just continue as normal kernel does.
2370*4882a593Smuzhiyun */
early_enable_iommus(void)2371*4882a593Smuzhiyun static void early_enable_iommus(void)
2372*4882a593Smuzhiyun {
2373*4882a593Smuzhiyun struct amd_iommu *iommu;
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun if (!copy_device_table()) {
2377*4882a593Smuzhiyun /*
2378*4882a593Smuzhiyun * If come here because of failure in copying device table from old
2379*4882a593Smuzhiyun * kernel with all IOMMUs enabled, print error message and try to
2380*4882a593Smuzhiyun * free allocated old_dev_tbl_cpy.
2381*4882a593Smuzhiyun */
2382*4882a593Smuzhiyun if (amd_iommu_pre_enabled)
2383*4882a593Smuzhiyun pr_err("Failed to copy DEV table from previous kernel.\n");
2384*4882a593Smuzhiyun if (old_dev_tbl_cpy != NULL)
2385*4882a593Smuzhiyun free_pages((unsigned long)old_dev_tbl_cpy,
2386*4882a593Smuzhiyun get_order(dev_table_size));
2387*4882a593Smuzhiyun
2388*4882a593Smuzhiyun for_each_iommu(iommu) {
2389*4882a593Smuzhiyun clear_translation_pre_enabled(iommu);
2390*4882a593Smuzhiyun early_enable_iommu(iommu);
2391*4882a593Smuzhiyun }
2392*4882a593Smuzhiyun } else {
2393*4882a593Smuzhiyun pr_info("Copied DEV table from previous kernel.\n");
2394*4882a593Smuzhiyun free_pages((unsigned long)amd_iommu_dev_table,
2395*4882a593Smuzhiyun get_order(dev_table_size));
2396*4882a593Smuzhiyun amd_iommu_dev_table = old_dev_tbl_cpy;
2397*4882a593Smuzhiyun for_each_iommu(iommu) {
2398*4882a593Smuzhiyun iommu_disable_command_buffer(iommu);
2399*4882a593Smuzhiyun iommu_disable_event_buffer(iommu);
2400*4882a593Smuzhiyun iommu_enable_command_buffer(iommu);
2401*4882a593Smuzhiyun iommu_enable_event_buffer(iommu);
2402*4882a593Smuzhiyun iommu_enable_ga(iommu);
2403*4882a593Smuzhiyun iommu_enable_xt(iommu);
2404*4882a593Smuzhiyun iommu_set_device_table(iommu);
2405*4882a593Smuzhiyun iommu_flush_all_caches(iommu);
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
2410*4882a593Smuzhiyun /*
2411*4882a593Smuzhiyun * Note: We have already checked GASup from IVRS table.
2412*4882a593Smuzhiyun * Now, we need to make sure that GAMSup is set.
2413*4882a593Smuzhiyun */
2414*4882a593Smuzhiyun if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2415*4882a593Smuzhiyun !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
2416*4882a593Smuzhiyun amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2419*4882a593Smuzhiyun amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2420*4882a593Smuzhiyun #endif
2421*4882a593Smuzhiyun }
2422*4882a593Smuzhiyun
enable_iommus_v2(void)2423*4882a593Smuzhiyun static void enable_iommus_v2(void)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun struct amd_iommu *iommu;
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun for_each_iommu(iommu) {
2428*4882a593Smuzhiyun iommu_enable_ppr_log(iommu);
2429*4882a593Smuzhiyun iommu_enable_gt(iommu);
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
enable_iommus(void)2433*4882a593Smuzhiyun static void enable_iommus(void)
2434*4882a593Smuzhiyun {
2435*4882a593Smuzhiyun early_enable_iommus();
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun enable_iommus_v2();
2438*4882a593Smuzhiyun }
2439*4882a593Smuzhiyun
disable_iommus(void)2440*4882a593Smuzhiyun static void disable_iommus(void)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun struct amd_iommu *iommu;
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun for_each_iommu(iommu)
2445*4882a593Smuzhiyun iommu_disable(iommu);
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
2448*4882a593Smuzhiyun if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2449*4882a593Smuzhiyun amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2450*4882a593Smuzhiyun #endif
2451*4882a593Smuzhiyun }
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun /*
2454*4882a593Smuzhiyun * Suspend/Resume support
2455*4882a593Smuzhiyun * disable suspend until real resume implemented
2456*4882a593Smuzhiyun */
2457*4882a593Smuzhiyun
amd_iommu_resume(void)2458*4882a593Smuzhiyun static void amd_iommu_resume(void)
2459*4882a593Smuzhiyun {
2460*4882a593Smuzhiyun struct amd_iommu *iommu;
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun for_each_iommu(iommu)
2463*4882a593Smuzhiyun iommu_apply_resume_quirks(iommu);
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun /* re-load the hardware */
2466*4882a593Smuzhiyun enable_iommus();
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun amd_iommu_enable_interrupts();
2469*4882a593Smuzhiyun }
2470*4882a593Smuzhiyun
amd_iommu_suspend(void)2471*4882a593Smuzhiyun static int amd_iommu_suspend(void)
2472*4882a593Smuzhiyun {
2473*4882a593Smuzhiyun /* disable IOMMUs to go out of the way for BIOS */
2474*4882a593Smuzhiyun disable_iommus();
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun return 0;
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun static struct syscore_ops amd_iommu_syscore_ops = {
2480*4882a593Smuzhiyun .suspend = amd_iommu_suspend,
2481*4882a593Smuzhiyun .resume = amd_iommu_resume,
2482*4882a593Smuzhiyun };
2483*4882a593Smuzhiyun
free_iommu_resources(void)2484*4882a593Smuzhiyun static void __init free_iommu_resources(void)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun kmemleak_free(irq_lookup_table);
2487*4882a593Smuzhiyun free_pages((unsigned long)irq_lookup_table,
2488*4882a593Smuzhiyun get_order(rlookup_table_size));
2489*4882a593Smuzhiyun irq_lookup_table = NULL;
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun kmem_cache_destroy(amd_iommu_irq_cache);
2492*4882a593Smuzhiyun amd_iommu_irq_cache = NULL;
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun free_pages((unsigned long)amd_iommu_rlookup_table,
2495*4882a593Smuzhiyun get_order(rlookup_table_size));
2496*4882a593Smuzhiyun amd_iommu_rlookup_table = NULL;
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun free_pages((unsigned long)amd_iommu_alias_table,
2499*4882a593Smuzhiyun get_order(alias_table_size));
2500*4882a593Smuzhiyun amd_iommu_alias_table = NULL;
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun free_pages((unsigned long)amd_iommu_dev_table,
2503*4882a593Smuzhiyun get_order(dev_table_size));
2504*4882a593Smuzhiyun amd_iommu_dev_table = NULL;
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun free_iommu_all();
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun /* SB IOAPIC is always on this device in AMD systems */
2510*4882a593Smuzhiyun #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2511*4882a593Smuzhiyun
check_ioapic_information(void)2512*4882a593Smuzhiyun static bool __init check_ioapic_information(void)
2513*4882a593Smuzhiyun {
2514*4882a593Smuzhiyun const char *fw_bug = FW_BUG;
2515*4882a593Smuzhiyun bool ret, has_sb_ioapic;
2516*4882a593Smuzhiyun int idx;
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun has_sb_ioapic = false;
2519*4882a593Smuzhiyun ret = false;
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun /*
2522*4882a593Smuzhiyun * If we have map overrides on the kernel command line the
2523*4882a593Smuzhiyun * messages in this function might not describe firmware bugs
2524*4882a593Smuzhiyun * anymore - so be careful
2525*4882a593Smuzhiyun */
2526*4882a593Smuzhiyun if (cmdline_maps)
2527*4882a593Smuzhiyun fw_bug = "";
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun for (idx = 0; idx < nr_ioapics; idx++) {
2530*4882a593Smuzhiyun int devid, id = mpc_ioapic_id(idx);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun devid = get_ioapic_devid(id);
2533*4882a593Smuzhiyun if (devid < 0) {
2534*4882a593Smuzhiyun pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2535*4882a593Smuzhiyun fw_bug, id);
2536*4882a593Smuzhiyun ret = false;
2537*4882a593Smuzhiyun } else if (devid == IOAPIC_SB_DEVID) {
2538*4882a593Smuzhiyun has_sb_ioapic = true;
2539*4882a593Smuzhiyun ret = true;
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun if (!has_sb_ioapic) {
2544*4882a593Smuzhiyun /*
2545*4882a593Smuzhiyun * We expect the SB IOAPIC to be listed in the IVRS
2546*4882a593Smuzhiyun * table. The system timer is connected to the SB IOAPIC
2547*4882a593Smuzhiyun * and if we don't have it in the list the system will
2548*4882a593Smuzhiyun * panic at boot time. This situation usually happens
2549*4882a593Smuzhiyun * when the BIOS is buggy and provides us the wrong
2550*4882a593Smuzhiyun * device id for the IOAPIC in the system.
2551*4882a593Smuzhiyun */
2552*4882a593Smuzhiyun pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun if (!ret)
2556*4882a593Smuzhiyun pr_err("Disabling interrupt remapping\n");
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun return ret;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun
free_dma_resources(void)2561*4882a593Smuzhiyun static void __init free_dma_resources(void)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2564*4882a593Smuzhiyun get_order(MAX_DOMAIN_ID/8));
2565*4882a593Smuzhiyun amd_iommu_pd_alloc_bitmap = NULL;
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun free_unity_maps();
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun
ivinfo_init(void * ivrs)2570*4882a593Smuzhiyun static void __init ivinfo_init(void *ivrs)
2571*4882a593Smuzhiyun {
2572*4882a593Smuzhiyun amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun /*
2576*4882a593Smuzhiyun * This is the hardware init function for AMD IOMMU in the system.
2577*4882a593Smuzhiyun * This function is called either from amd_iommu_init or from the interrupt
2578*4882a593Smuzhiyun * remapping setup code.
2579*4882a593Smuzhiyun *
2580*4882a593Smuzhiyun * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2581*4882a593Smuzhiyun * four times:
2582*4882a593Smuzhiyun *
2583*4882a593Smuzhiyun * 1 pass) Discover the most comprehensive IVHD type to use.
2584*4882a593Smuzhiyun *
2585*4882a593Smuzhiyun * 2 pass) Find the highest PCI device id the driver has to handle.
2586*4882a593Smuzhiyun * Upon this information the size of the data structures is
2587*4882a593Smuzhiyun * determined that needs to be allocated.
2588*4882a593Smuzhiyun *
2589*4882a593Smuzhiyun * 3 pass) Initialize the data structures just allocated with the
2590*4882a593Smuzhiyun * information in the ACPI table about available AMD IOMMUs
2591*4882a593Smuzhiyun * in the system. It also maps the PCI devices in the
2592*4882a593Smuzhiyun * system to specific IOMMUs
2593*4882a593Smuzhiyun *
2594*4882a593Smuzhiyun * 4 pass) After the basic data structures are allocated and
2595*4882a593Smuzhiyun * initialized we update them with information about memory
2596*4882a593Smuzhiyun * remapping requirements parsed out of the ACPI table in
2597*4882a593Smuzhiyun * this last pass.
2598*4882a593Smuzhiyun *
2599*4882a593Smuzhiyun * After everything is set up the IOMMUs are enabled and the necessary
2600*4882a593Smuzhiyun * hotplug and suspend notifiers are registered.
2601*4882a593Smuzhiyun */
early_amd_iommu_init(void)2602*4882a593Smuzhiyun static int __init early_amd_iommu_init(void)
2603*4882a593Smuzhiyun {
2604*4882a593Smuzhiyun struct acpi_table_header *ivrs_base;
2605*4882a593Smuzhiyun acpi_status status;
2606*4882a593Smuzhiyun int i, remap_cache_sz, ret = 0;
2607*4882a593Smuzhiyun u32 pci_id;
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun if (!amd_iommu_detected)
2610*4882a593Smuzhiyun return -ENODEV;
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun status = acpi_get_table("IVRS", 0, &ivrs_base);
2613*4882a593Smuzhiyun if (status == AE_NOT_FOUND)
2614*4882a593Smuzhiyun return -ENODEV;
2615*4882a593Smuzhiyun else if (ACPI_FAILURE(status)) {
2616*4882a593Smuzhiyun const char *err = acpi_format_exception(status);
2617*4882a593Smuzhiyun pr_err("IVRS table error: %s\n", err);
2618*4882a593Smuzhiyun return -EINVAL;
2619*4882a593Smuzhiyun }
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun /*
2622*4882a593Smuzhiyun * Validate checksum here so we don't need to do it when
2623*4882a593Smuzhiyun * we actually parse the table
2624*4882a593Smuzhiyun */
2625*4882a593Smuzhiyun ret = check_ivrs_checksum(ivrs_base);
2626*4882a593Smuzhiyun if (ret)
2627*4882a593Smuzhiyun goto out;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun ivinfo_init(ivrs_base);
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2632*4882a593Smuzhiyun DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2633*4882a593Smuzhiyun
2634*4882a593Smuzhiyun /*
2635*4882a593Smuzhiyun * First parse ACPI tables to find the largest Bus/Dev/Func
2636*4882a593Smuzhiyun * we need to handle. Upon this information the shared data
2637*4882a593Smuzhiyun * structures for the IOMMUs in the system will be allocated
2638*4882a593Smuzhiyun */
2639*4882a593Smuzhiyun ret = find_last_devid_acpi(ivrs_base);
2640*4882a593Smuzhiyun if (ret)
2641*4882a593Smuzhiyun goto out;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2644*4882a593Smuzhiyun alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2645*4882a593Smuzhiyun rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun /* Device table - directly used by all IOMMUs */
2648*4882a593Smuzhiyun ret = -ENOMEM;
2649*4882a593Smuzhiyun amd_iommu_dev_table = (void *)__get_free_pages(
2650*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2651*4882a593Smuzhiyun get_order(dev_table_size));
2652*4882a593Smuzhiyun if (amd_iommu_dev_table == NULL)
2653*4882a593Smuzhiyun goto out;
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun /*
2656*4882a593Smuzhiyun * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2657*4882a593Smuzhiyun * IOMMU see for that device
2658*4882a593Smuzhiyun */
2659*4882a593Smuzhiyun amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2660*4882a593Smuzhiyun get_order(alias_table_size));
2661*4882a593Smuzhiyun if (amd_iommu_alias_table == NULL)
2662*4882a593Smuzhiyun goto out;
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun /* IOMMU rlookup table - find the IOMMU for a specific device */
2665*4882a593Smuzhiyun amd_iommu_rlookup_table = (void *)__get_free_pages(
2666*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO,
2667*4882a593Smuzhiyun get_order(rlookup_table_size));
2668*4882a593Smuzhiyun if (amd_iommu_rlookup_table == NULL)
2669*4882a593Smuzhiyun goto out;
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2672*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO,
2673*4882a593Smuzhiyun get_order(MAX_DOMAIN_ID/8));
2674*4882a593Smuzhiyun if (amd_iommu_pd_alloc_bitmap == NULL)
2675*4882a593Smuzhiyun goto out;
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun /*
2678*4882a593Smuzhiyun * let all alias entries point to itself
2679*4882a593Smuzhiyun */
2680*4882a593Smuzhiyun for (i = 0; i <= amd_iommu_last_bdf; ++i)
2681*4882a593Smuzhiyun amd_iommu_alias_table[i] = i;
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun /*
2684*4882a593Smuzhiyun * never allocate domain 0 because its used as the non-allocated and
2685*4882a593Smuzhiyun * error value placeholder
2686*4882a593Smuzhiyun */
2687*4882a593Smuzhiyun __set_bit(0, amd_iommu_pd_alloc_bitmap);
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun /*
2690*4882a593Smuzhiyun * now the data structures are allocated and basically initialized
2691*4882a593Smuzhiyun * start the real acpi table scan
2692*4882a593Smuzhiyun */
2693*4882a593Smuzhiyun ret = init_iommu_all(ivrs_base);
2694*4882a593Smuzhiyun if (ret)
2695*4882a593Smuzhiyun goto out;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /* Disable IOMMU if there's Stoney Ridge graphics */
2698*4882a593Smuzhiyun for (i = 0; i < 32; i++) {
2699*4882a593Smuzhiyun pci_id = read_pci_config(0, i, 0, 0);
2700*4882a593Smuzhiyun if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2701*4882a593Smuzhiyun pr_info("Disable IOMMU on Stoney Ridge\n");
2702*4882a593Smuzhiyun amd_iommu_disabled = true;
2703*4882a593Smuzhiyun break;
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun }
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun /* Disable any previously enabled IOMMUs */
2708*4882a593Smuzhiyun if (!is_kdump_kernel() || amd_iommu_disabled)
2709*4882a593Smuzhiyun disable_iommus();
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun if (amd_iommu_irq_remap)
2712*4882a593Smuzhiyun amd_iommu_irq_remap = check_ioapic_information();
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun if (amd_iommu_irq_remap) {
2715*4882a593Smuzhiyun /*
2716*4882a593Smuzhiyun * Interrupt remapping enabled, create kmem_cache for the
2717*4882a593Smuzhiyun * remapping tables.
2718*4882a593Smuzhiyun */
2719*4882a593Smuzhiyun ret = -ENOMEM;
2720*4882a593Smuzhiyun if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2721*4882a593Smuzhiyun remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2722*4882a593Smuzhiyun else
2723*4882a593Smuzhiyun remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2724*4882a593Smuzhiyun amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2725*4882a593Smuzhiyun remap_cache_sz,
2726*4882a593Smuzhiyun IRQ_TABLE_ALIGNMENT,
2727*4882a593Smuzhiyun 0, NULL);
2728*4882a593Smuzhiyun if (!amd_iommu_irq_cache)
2729*4882a593Smuzhiyun goto out;
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun irq_lookup_table = (void *)__get_free_pages(
2732*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO,
2733*4882a593Smuzhiyun get_order(rlookup_table_size));
2734*4882a593Smuzhiyun kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2735*4882a593Smuzhiyun 1, GFP_KERNEL);
2736*4882a593Smuzhiyun if (!irq_lookup_table)
2737*4882a593Smuzhiyun goto out;
2738*4882a593Smuzhiyun }
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun ret = init_memory_definitions(ivrs_base);
2741*4882a593Smuzhiyun if (ret)
2742*4882a593Smuzhiyun goto out;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun /* init the device table */
2745*4882a593Smuzhiyun init_device_table();
2746*4882a593Smuzhiyun
2747*4882a593Smuzhiyun out:
2748*4882a593Smuzhiyun /* Don't leak any ACPI memory */
2749*4882a593Smuzhiyun acpi_put_table(ivrs_base);
2750*4882a593Smuzhiyun ivrs_base = NULL;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun return ret;
2753*4882a593Smuzhiyun }
2754*4882a593Smuzhiyun
amd_iommu_enable_interrupts(void)2755*4882a593Smuzhiyun static int amd_iommu_enable_interrupts(void)
2756*4882a593Smuzhiyun {
2757*4882a593Smuzhiyun struct amd_iommu *iommu;
2758*4882a593Smuzhiyun int ret = 0;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun for_each_iommu(iommu) {
2761*4882a593Smuzhiyun ret = iommu_init_msi(iommu);
2762*4882a593Smuzhiyun if (ret)
2763*4882a593Smuzhiyun goto out;
2764*4882a593Smuzhiyun }
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun out:
2767*4882a593Smuzhiyun return ret;
2768*4882a593Smuzhiyun }
2769*4882a593Smuzhiyun
detect_ivrs(void)2770*4882a593Smuzhiyun static bool detect_ivrs(void)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun struct acpi_table_header *ivrs_base;
2773*4882a593Smuzhiyun acpi_status status;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun status = acpi_get_table("IVRS", 0, &ivrs_base);
2776*4882a593Smuzhiyun if (status == AE_NOT_FOUND)
2777*4882a593Smuzhiyun return false;
2778*4882a593Smuzhiyun else if (ACPI_FAILURE(status)) {
2779*4882a593Smuzhiyun const char *err = acpi_format_exception(status);
2780*4882a593Smuzhiyun pr_err("IVRS table error: %s\n", err);
2781*4882a593Smuzhiyun return false;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun acpi_put_table(ivrs_base);
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun /* Make sure ACS will be enabled during PCI probe */
2787*4882a593Smuzhiyun pci_request_acs();
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun return true;
2790*4882a593Smuzhiyun }
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun /****************************************************************************
2793*4882a593Smuzhiyun *
2794*4882a593Smuzhiyun * AMD IOMMU Initialization State Machine
2795*4882a593Smuzhiyun *
2796*4882a593Smuzhiyun ****************************************************************************/
2797*4882a593Smuzhiyun
state_next(void)2798*4882a593Smuzhiyun static int __init state_next(void)
2799*4882a593Smuzhiyun {
2800*4882a593Smuzhiyun int ret = 0;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun switch (init_state) {
2803*4882a593Smuzhiyun case IOMMU_START_STATE:
2804*4882a593Smuzhiyun if (!detect_ivrs()) {
2805*4882a593Smuzhiyun init_state = IOMMU_NOT_FOUND;
2806*4882a593Smuzhiyun ret = -ENODEV;
2807*4882a593Smuzhiyun } else {
2808*4882a593Smuzhiyun init_state = IOMMU_IVRS_DETECTED;
2809*4882a593Smuzhiyun }
2810*4882a593Smuzhiyun break;
2811*4882a593Smuzhiyun case IOMMU_IVRS_DETECTED:
2812*4882a593Smuzhiyun ret = early_amd_iommu_init();
2813*4882a593Smuzhiyun init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2814*4882a593Smuzhiyun if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2815*4882a593Smuzhiyun pr_info("AMD IOMMU disabled\n");
2816*4882a593Smuzhiyun init_state = IOMMU_CMDLINE_DISABLED;
2817*4882a593Smuzhiyun ret = -EINVAL;
2818*4882a593Smuzhiyun }
2819*4882a593Smuzhiyun break;
2820*4882a593Smuzhiyun case IOMMU_ACPI_FINISHED:
2821*4882a593Smuzhiyun early_enable_iommus();
2822*4882a593Smuzhiyun x86_platform.iommu_shutdown = disable_iommus;
2823*4882a593Smuzhiyun init_state = IOMMU_ENABLED;
2824*4882a593Smuzhiyun break;
2825*4882a593Smuzhiyun case IOMMU_ENABLED:
2826*4882a593Smuzhiyun register_syscore_ops(&amd_iommu_syscore_ops);
2827*4882a593Smuzhiyun ret = amd_iommu_init_pci();
2828*4882a593Smuzhiyun init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2829*4882a593Smuzhiyun enable_iommus_v2();
2830*4882a593Smuzhiyun break;
2831*4882a593Smuzhiyun case IOMMU_PCI_INIT:
2832*4882a593Smuzhiyun ret = amd_iommu_enable_interrupts();
2833*4882a593Smuzhiyun init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2834*4882a593Smuzhiyun break;
2835*4882a593Smuzhiyun case IOMMU_INTERRUPTS_EN:
2836*4882a593Smuzhiyun ret = amd_iommu_init_dma_ops();
2837*4882a593Smuzhiyun init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2838*4882a593Smuzhiyun break;
2839*4882a593Smuzhiyun case IOMMU_DMA_OPS:
2840*4882a593Smuzhiyun init_state = IOMMU_INITIALIZED;
2841*4882a593Smuzhiyun break;
2842*4882a593Smuzhiyun case IOMMU_INITIALIZED:
2843*4882a593Smuzhiyun /* Nothing to do */
2844*4882a593Smuzhiyun break;
2845*4882a593Smuzhiyun case IOMMU_NOT_FOUND:
2846*4882a593Smuzhiyun case IOMMU_INIT_ERROR:
2847*4882a593Smuzhiyun case IOMMU_CMDLINE_DISABLED:
2848*4882a593Smuzhiyun /* Error states => do nothing */
2849*4882a593Smuzhiyun ret = -EINVAL;
2850*4882a593Smuzhiyun break;
2851*4882a593Smuzhiyun default:
2852*4882a593Smuzhiyun /* Unknown state */
2853*4882a593Smuzhiyun BUG();
2854*4882a593Smuzhiyun }
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun if (ret) {
2857*4882a593Smuzhiyun free_dma_resources();
2858*4882a593Smuzhiyun if (!irq_remapping_enabled) {
2859*4882a593Smuzhiyun disable_iommus();
2860*4882a593Smuzhiyun free_iommu_resources();
2861*4882a593Smuzhiyun } else {
2862*4882a593Smuzhiyun struct amd_iommu *iommu;
2863*4882a593Smuzhiyun
2864*4882a593Smuzhiyun uninit_device_table_dma();
2865*4882a593Smuzhiyun for_each_iommu(iommu)
2866*4882a593Smuzhiyun iommu_flush_all_caches(iommu);
2867*4882a593Smuzhiyun }
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun return ret;
2870*4882a593Smuzhiyun }
2871*4882a593Smuzhiyun
iommu_go_to_state(enum iommu_init_state state)2872*4882a593Smuzhiyun static int __init iommu_go_to_state(enum iommu_init_state state)
2873*4882a593Smuzhiyun {
2874*4882a593Smuzhiyun int ret = -EINVAL;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun while (init_state != state) {
2877*4882a593Smuzhiyun if (init_state == IOMMU_NOT_FOUND ||
2878*4882a593Smuzhiyun init_state == IOMMU_INIT_ERROR ||
2879*4882a593Smuzhiyun init_state == IOMMU_CMDLINE_DISABLED)
2880*4882a593Smuzhiyun break;
2881*4882a593Smuzhiyun ret = state_next();
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun return ret;
2885*4882a593Smuzhiyun }
2886*4882a593Smuzhiyun
2887*4882a593Smuzhiyun #ifdef CONFIG_IRQ_REMAP
amd_iommu_prepare(void)2888*4882a593Smuzhiyun int __init amd_iommu_prepare(void)
2889*4882a593Smuzhiyun {
2890*4882a593Smuzhiyun int ret;
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun amd_iommu_irq_remap = true;
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2895*4882a593Smuzhiyun if (ret)
2896*4882a593Smuzhiyun return ret;
2897*4882a593Smuzhiyun return amd_iommu_irq_remap ? 0 : -ENODEV;
2898*4882a593Smuzhiyun }
2899*4882a593Smuzhiyun
amd_iommu_enable(void)2900*4882a593Smuzhiyun int __init amd_iommu_enable(void)
2901*4882a593Smuzhiyun {
2902*4882a593Smuzhiyun int ret;
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun ret = iommu_go_to_state(IOMMU_ENABLED);
2905*4882a593Smuzhiyun if (ret)
2906*4882a593Smuzhiyun return ret;
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun irq_remapping_enabled = 1;
2909*4882a593Smuzhiyun return amd_iommu_xt_mode;
2910*4882a593Smuzhiyun }
2911*4882a593Smuzhiyun
amd_iommu_disable(void)2912*4882a593Smuzhiyun void amd_iommu_disable(void)
2913*4882a593Smuzhiyun {
2914*4882a593Smuzhiyun amd_iommu_suspend();
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun
amd_iommu_reenable(int mode)2917*4882a593Smuzhiyun int amd_iommu_reenable(int mode)
2918*4882a593Smuzhiyun {
2919*4882a593Smuzhiyun amd_iommu_resume();
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun return 0;
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun
amd_iommu_enable_faulting(void)2924*4882a593Smuzhiyun int __init amd_iommu_enable_faulting(void)
2925*4882a593Smuzhiyun {
2926*4882a593Smuzhiyun /* We enable MSI later when PCI is initialized */
2927*4882a593Smuzhiyun return 0;
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun #endif
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun /*
2932*4882a593Smuzhiyun * This is the core init function for AMD IOMMU hardware in the system.
2933*4882a593Smuzhiyun * This function is called from the generic x86 DMA layer initialization
2934*4882a593Smuzhiyun * code.
2935*4882a593Smuzhiyun */
amd_iommu_init(void)2936*4882a593Smuzhiyun static int __init amd_iommu_init(void)
2937*4882a593Smuzhiyun {
2938*4882a593Smuzhiyun struct amd_iommu *iommu;
2939*4882a593Smuzhiyun int ret;
2940*4882a593Smuzhiyun
2941*4882a593Smuzhiyun ret = iommu_go_to_state(IOMMU_INITIALIZED);
2942*4882a593Smuzhiyun #ifdef CONFIG_GART_IOMMU
2943*4882a593Smuzhiyun if (ret && list_empty(&amd_iommu_list)) {
2944*4882a593Smuzhiyun /*
2945*4882a593Smuzhiyun * We failed to initialize the AMD IOMMU - try fallback
2946*4882a593Smuzhiyun * to GART if possible.
2947*4882a593Smuzhiyun */
2948*4882a593Smuzhiyun gart_iommu_init();
2949*4882a593Smuzhiyun }
2950*4882a593Smuzhiyun #endif
2951*4882a593Smuzhiyun
2952*4882a593Smuzhiyun for_each_iommu(iommu)
2953*4882a593Smuzhiyun amd_iommu_debugfs_setup(iommu);
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun return ret;
2956*4882a593Smuzhiyun }
2957*4882a593Smuzhiyun
amd_iommu_sme_check(void)2958*4882a593Smuzhiyun static bool amd_iommu_sme_check(void)
2959*4882a593Smuzhiyun {
2960*4882a593Smuzhiyun if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2961*4882a593Smuzhiyun return true;
2962*4882a593Smuzhiyun
2963*4882a593Smuzhiyun /* For Fam17h, a specific level of support is required */
2964*4882a593Smuzhiyun if (boot_cpu_data.microcode >= 0x08001205)
2965*4882a593Smuzhiyun return true;
2966*4882a593Smuzhiyun
2967*4882a593Smuzhiyun if ((boot_cpu_data.microcode >= 0x08001126) &&
2968*4882a593Smuzhiyun (boot_cpu_data.microcode <= 0x080011ff))
2969*4882a593Smuzhiyun return true;
2970*4882a593Smuzhiyun
2971*4882a593Smuzhiyun pr_notice("IOMMU not currently supported when SME is active\n");
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun return false;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun
2976*4882a593Smuzhiyun /****************************************************************************
2977*4882a593Smuzhiyun *
2978*4882a593Smuzhiyun * Early detect code. This code runs at IOMMU detection time in the DMA
2979*4882a593Smuzhiyun * layer. It just looks if there is an IVRS ACPI table to detect AMD
2980*4882a593Smuzhiyun * IOMMUs
2981*4882a593Smuzhiyun *
2982*4882a593Smuzhiyun ****************************************************************************/
amd_iommu_detect(void)2983*4882a593Smuzhiyun int __init amd_iommu_detect(void)
2984*4882a593Smuzhiyun {
2985*4882a593Smuzhiyun int ret;
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2988*4882a593Smuzhiyun return -ENODEV;
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun if (!amd_iommu_sme_check())
2991*4882a593Smuzhiyun return -ENODEV;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2994*4882a593Smuzhiyun if (ret)
2995*4882a593Smuzhiyun return ret;
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun amd_iommu_detected = true;
2998*4882a593Smuzhiyun iommu_detected = 1;
2999*4882a593Smuzhiyun x86_init.iommu.iommu_init = amd_iommu_init;
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun return 1;
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun /****************************************************************************
3005*4882a593Smuzhiyun *
3006*4882a593Smuzhiyun * Parsing functions for the AMD IOMMU specific kernel command line
3007*4882a593Smuzhiyun * options.
3008*4882a593Smuzhiyun *
3009*4882a593Smuzhiyun ****************************************************************************/
3010*4882a593Smuzhiyun
parse_amd_iommu_dump(char * str)3011*4882a593Smuzhiyun static int __init parse_amd_iommu_dump(char *str)
3012*4882a593Smuzhiyun {
3013*4882a593Smuzhiyun amd_iommu_dump = true;
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun return 1;
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun
parse_amd_iommu_intr(char * str)3018*4882a593Smuzhiyun static int __init parse_amd_iommu_intr(char *str)
3019*4882a593Smuzhiyun {
3020*4882a593Smuzhiyun for (; *str; ++str) {
3021*4882a593Smuzhiyun if (strncmp(str, "legacy", 6) == 0) {
3022*4882a593Smuzhiyun amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3023*4882a593Smuzhiyun break;
3024*4882a593Smuzhiyun }
3025*4882a593Smuzhiyun if (strncmp(str, "vapic", 5) == 0) {
3026*4882a593Smuzhiyun amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3027*4882a593Smuzhiyun break;
3028*4882a593Smuzhiyun }
3029*4882a593Smuzhiyun }
3030*4882a593Smuzhiyun return 1;
3031*4882a593Smuzhiyun }
3032*4882a593Smuzhiyun
parse_amd_iommu_options(char * str)3033*4882a593Smuzhiyun static int __init parse_amd_iommu_options(char *str)
3034*4882a593Smuzhiyun {
3035*4882a593Smuzhiyun for (; *str; ++str) {
3036*4882a593Smuzhiyun if (strncmp(str, "fullflush", 9) == 0)
3037*4882a593Smuzhiyun amd_iommu_unmap_flush = true;
3038*4882a593Smuzhiyun if (strncmp(str, "off", 3) == 0)
3039*4882a593Smuzhiyun amd_iommu_disabled = true;
3040*4882a593Smuzhiyun if (strncmp(str, "force_isolation", 15) == 0)
3041*4882a593Smuzhiyun amd_iommu_force_isolation = true;
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun return 1;
3045*4882a593Smuzhiyun }
3046*4882a593Smuzhiyun
parse_ivrs_ioapic(char * str)3047*4882a593Smuzhiyun static int __init parse_ivrs_ioapic(char *str)
3048*4882a593Smuzhiyun {
3049*4882a593Smuzhiyun unsigned int bus, dev, fn;
3050*4882a593Smuzhiyun int ret, id, i;
3051*4882a593Smuzhiyun u16 devid;
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun if (ret != 4) {
3056*4882a593Smuzhiyun pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3057*4882a593Smuzhiyun return 1;
3058*4882a593Smuzhiyun }
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3061*4882a593Smuzhiyun pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3062*4882a593Smuzhiyun str);
3063*4882a593Smuzhiyun return 1;
3064*4882a593Smuzhiyun }
3065*4882a593Smuzhiyun
3066*4882a593Smuzhiyun devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun cmdline_maps = true;
3069*4882a593Smuzhiyun i = early_ioapic_map_size++;
3070*4882a593Smuzhiyun early_ioapic_map[i].id = id;
3071*4882a593Smuzhiyun early_ioapic_map[i].devid = devid;
3072*4882a593Smuzhiyun early_ioapic_map[i].cmd_line = true;
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun return 1;
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun
parse_ivrs_hpet(char * str)3077*4882a593Smuzhiyun static int __init parse_ivrs_hpet(char *str)
3078*4882a593Smuzhiyun {
3079*4882a593Smuzhiyun unsigned int bus, dev, fn;
3080*4882a593Smuzhiyun int ret, id, i;
3081*4882a593Smuzhiyun u16 devid;
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3084*4882a593Smuzhiyun
3085*4882a593Smuzhiyun if (ret != 4) {
3086*4882a593Smuzhiyun pr_err("Invalid command line: ivrs_hpet%s\n", str);
3087*4882a593Smuzhiyun return 1;
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun if (early_hpet_map_size == EARLY_MAP_SIZE) {
3091*4882a593Smuzhiyun pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3092*4882a593Smuzhiyun str);
3093*4882a593Smuzhiyun return 1;
3094*4882a593Smuzhiyun }
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun cmdline_maps = true;
3099*4882a593Smuzhiyun i = early_hpet_map_size++;
3100*4882a593Smuzhiyun early_hpet_map[i].id = id;
3101*4882a593Smuzhiyun early_hpet_map[i].devid = devid;
3102*4882a593Smuzhiyun early_hpet_map[i].cmd_line = true;
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun return 1;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun
parse_ivrs_acpihid(char * str)3107*4882a593Smuzhiyun static int __init parse_ivrs_acpihid(char *str)
3108*4882a593Smuzhiyun {
3109*4882a593Smuzhiyun u32 bus, dev, fn;
3110*4882a593Smuzhiyun char *hid, *uid, *p;
3111*4882a593Smuzhiyun char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3112*4882a593Smuzhiyun int ret, i;
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3115*4882a593Smuzhiyun if (ret != 4) {
3116*4882a593Smuzhiyun pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3117*4882a593Smuzhiyun return 1;
3118*4882a593Smuzhiyun }
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun p = acpiid;
3121*4882a593Smuzhiyun hid = strsep(&p, ":");
3122*4882a593Smuzhiyun uid = p;
3123*4882a593Smuzhiyun
3124*4882a593Smuzhiyun if (!hid || !(*hid) || !uid) {
3125*4882a593Smuzhiyun pr_err("Invalid command line: hid or uid\n");
3126*4882a593Smuzhiyun return 1;
3127*4882a593Smuzhiyun }
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun i = early_acpihid_map_size++;
3130*4882a593Smuzhiyun memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3131*4882a593Smuzhiyun memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3132*4882a593Smuzhiyun early_acpihid_map[i].devid =
3133*4882a593Smuzhiyun ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3134*4882a593Smuzhiyun early_acpihid_map[i].cmd_line = true;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun return 1;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun __setup("amd_iommu_dump", parse_amd_iommu_dump);
3140*4882a593Smuzhiyun __setup("amd_iommu=", parse_amd_iommu_options);
3141*4882a593Smuzhiyun __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3142*4882a593Smuzhiyun __setup("ivrs_ioapic", parse_ivrs_ioapic);
3143*4882a593Smuzhiyun __setup("ivrs_hpet", parse_ivrs_hpet);
3144*4882a593Smuzhiyun __setup("ivrs_acpihid", parse_ivrs_acpihid);
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun IOMMU_INIT_FINISH(amd_iommu_detect,
3147*4882a593Smuzhiyun gart_iommu_hole_init,
3148*4882a593Smuzhiyun NULL,
3149*4882a593Smuzhiyun NULL);
3150*4882a593Smuzhiyun
amd_iommu_v2_supported(void)3151*4882a593Smuzhiyun bool amd_iommu_v2_supported(void)
3152*4882a593Smuzhiyun {
3153*4882a593Smuzhiyun return amd_iommu_v2_present;
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_v2_supported);
3156*4882a593Smuzhiyun
get_amd_iommu(unsigned int idx)3157*4882a593Smuzhiyun struct amd_iommu *get_amd_iommu(unsigned int idx)
3158*4882a593Smuzhiyun {
3159*4882a593Smuzhiyun unsigned int i = 0;
3160*4882a593Smuzhiyun struct amd_iommu *iommu;
3161*4882a593Smuzhiyun
3162*4882a593Smuzhiyun for_each_iommu(iommu)
3163*4882a593Smuzhiyun if (i++ == idx)
3164*4882a593Smuzhiyun return iommu;
3165*4882a593Smuzhiyun return NULL;
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun EXPORT_SYMBOL(get_amd_iommu);
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun /****************************************************************************
3170*4882a593Smuzhiyun *
3171*4882a593Smuzhiyun * IOMMU EFR Performance Counter support functionality. This code allows
3172*4882a593Smuzhiyun * access to the IOMMU PC functionality.
3173*4882a593Smuzhiyun *
3174*4882a593Smuzhiyun ****************************************************************************/
3175*4882a593Smuzhiyun
amd_iommu_pc_get_max_banks(unsigned int idx)3176*4882a593Smuzhiyun u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3177*4882a593Smuzhiyun {
3178*4882a593Smuzhiyun struct amd_iommu *iommu = get_amd_iommu(idx);
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun if (iommu)
3181*4882a593Smuzhiyun return iommu->max_banks;
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun return 0;
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3186*4882a593Smuzhiyun
amd_iommu_pc_supported(void)3187*4882a593Smuzhiyun bool amd_iommu_pc_supported(void)
3188*4882a593Smuzhiyun {
3189*4882a593Smuzhiyun return amd_iommu_pc_present;
3190*4882a593Smuzhiyun }
3191*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_pc_supported);
3192*4882a593Smuzhiyun
amd_iommu_pc_get_max_counters(unsigned int idx)3193*4882a593Smuzhiyun u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3194*4882a593Smuzhiyun {
3195*4882a593Smuzhiyun struct amd_iommu *iommu = get_amd_iommu(idx);
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun if (iommu)
3198*4882a593Smuzhiyun return iommu->max_counters;
3199*4882a593Smuzhiyun
3200*4882a593Smuzhiyun return 0;
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3203*4882a593Smuzhiyun
iommu_pc_get_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value,bool is_write)3204*4882a593Smuzhiyun static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3205*4882a593Smuzhiyun u8 fxn, u64 *value, bool is_write)
3206*4882a593Smuzhiyun {
3207*4882a593Smuzhiyun u32 offset;
3208*4882a593Smuzhiyun u32 max_offset_lim;
3209*4882a593Smuzhiyun
3210*4882a593Smuzhiyun /* Make sure the IOMMU PC resource is available */
3211*4882a593Smuzhiyun if (!amd_iommu_pc_present)
3212*4882a593Smuzhiyun return -ENODEV;
3213*4882a593Smuzhiyun
3214*4882a593Smuzhiyun /* Check for valid iommu and pc register indexing */
3215*4882a593Smuzhiyun if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3216*4882a593Smuzhiyun return -ENODEV;
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3219*4882a593Smuzhiyun
3220*4882a593Smuzhiyun /* Limit the offset to the hw defined mmio region aperture */
3221*4882a593Smuzhiyun max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3222*4882a593Smuzhiyun (iommu->max_counters << 8) | 0x28);
3223*4882a593Smuzhiyun if ((offset < MMIO_CNTR_REG_OFFSET) ||
3224*4882a593Smuzhiyun (offset > max_offset_lim))
3225*4882a593Smuzhiyun return -EINVAL;
3226*4882a593Smuzhiyun
3227*4882a593Smuzhiyun if (is_write) {
3228*4882a593Smuzhiyun u64 val = *value & GENMASK_ULL(47, 0);
3229*4882a593Smuzhiyun
3230*4882a593Smuzhiyun writel((u32)val, iommu->mmio_base + offset);
3231*4882a593Smuzhiyun writel((val >> 32), iommu->mmio_base + offset + 4);
3232*4882a593Smuzhiyun } else {
3233*4882a593Smuzhiyun *value = readl(iommu->mmio_base + offset + 4);
3234*4882a593Smuzhiyun *value <<= 32;
3235*4882a593Smuzhiyun *value |= readl(iommu->mmio_base + offset);
3236*4882a593Smuzhiyun *value &= GENMASK_ULL(47, 0);
3237*4882a593Smuzhiyun }
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun return 0;
3240*4882a593Smuzhiyun }
3241*4882a593Smuzhiyun
amd_iommu_pc_get_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3242*4882a593Smuzhiyun int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3243*4882a593Smuzhiyun {
3244*4882a593Smuzhiyun if (!iommu)
3245*4882a593Smuzhiyun return -EINVAL;
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3250*4882a593Smuzhiyun
amd_iommu_pc_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3251*4882a593Smuzhiyun int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3252*4882a593Smuzhiyun {
3253*4882a593Smuzhiyun if (!iommu)
3254*4882a593Smuzhiyun return -EINVAL;
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3257*4882a593Smuzhiyun }
3258*4882a593Smuzhiyun EXPORT_SYMBOL(amd_iommu_pc_set_reg);
3259