1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
10
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/kmemleak.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/iopoll.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
26 #include <asm/apic.h>
27 #include <asm/msidef.h>
28 #include <asm/gart.h>
29 #include <asm/x86_init.h>
30 #include <asm/iommu_table.h>
31 #include <asm/io_apic.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/set_memory.h>
34
35 #include <linux/crash_dump.h>
36
37 #include "amd_iommu.h"
38 #include "../irq_remapping.h"
39
40 /*
41 * definitions for the ACPI scanning code
42 */
43 #define IVRS_HEADER_LENGTH 48
44
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46 #define ACPI_IVMD_TYPE_ALL 0x20
47 #define ACPI_IVMD_TYPE 0x21
48 #define ACPI_IVMD_TYPE_RANGE 0x22
49
50 #define IVHD_DEV_ALL 0x01
51 #define IVHD_DEV_SELECT 0x02
52 #define IVHD_DEV_SELECT_RANGE_START 0x03
53 #define IVHD_DEV_RANGE_END 0x04
54 #define IVHD_DEV_ALIAS 0x42
55 #define IVHD_DEV_ALIAS_RANGE 0x43
56 #define IVHD_DEV_EXT_SELECT 0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58 #define IVHD_DEV_SPECIAL 0x48
59 #define IVHD_DEV_ACPI_HID 0xf0
60
61 #define UID_NOT_PRESENT 0
62 #define UID_IS_INTEGER 1
63 #define UID_IS_CHARACTER 2
64
65 #define IVHD_SPECIAL_IOAPIC 1
66 #define IVHD_SPECIAL_HPET 2
67
68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71 #define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73 #define IVMD_FLAG_EXCL_RANGE 0x08
74 #define IVMD_FLAG_IW 0x04
75 #define IVMD_FLAG_IR 0x02
76 #define IVMD_FLAG_UNITY_MAP 0x01
77
78 #define ACPI_DEVFLAG_INITPASS 0x01
79 #define ACPI_DEVFLAG_EXTINT 0x02
80 #define ACPI_DEVFLAG_NMI 0x04
81 #define ACPI_DEVFLAG_SYSMGT1 0x10
82 #define ACPI_DEVFLAG_SYSMGT2 0x20
83 #define ACPI_DEVFLAG_LINT0 0x40
84 #define ACPI_DEVFLAG_LINT1 0x80
85 #define ACPI_DEVFLAG_ATSDIS 0x10000000
86
87 #define LOOP_TIMEOUT 2000000
88 /*
89 * ACPI table definitions
90 *
91 * These data structures are laid over the table to parse the important values
92 * out of it.
93 */
94
95 extern const struct iommu_ops amd_iommu_ops;
96
97 /*
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
100 */
101 struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
110 u32 efr_attr;
111
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114 u64 res;
115 } __attribute__((packed));
116
117 /*
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
120 */
121 struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 u32 ext;
126 u32 hidh;
127 u64 cid;
128 u8 uidf;
129 u8 uidl;
130 u8 uid;
131 } __attribute__((packed));
132
133 /*
134 * An AMD IOMMU memory definition structure. It defines things like exclusion
135 * ranges for devices and regions that should be unity mapped.
136 */
137 struct ivmd_header {
138 u8 type;
139 u8 flags;
140 u16 length;
141 u16 devid;
142 u16 aux;
143 u64 resv;
144 u64 range_start;
145 u64 range_length;
146 } __attribute__((packed));
147
148 bool amd_iommu_dump;
149 bool amd_iommu_irq_remap __read_mostly;
150
151 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
152 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
153
154 static bool amd_iommu_detected;
155 static bool __initdata amd_iommu_disabled;
156 static int amd_iommu_target_ivhd_type;
157
158 u16 amd_iommu_last_bdf; /* largest PCI device id we have
159 to handle */
160 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
161 we find in ACPI */
162 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
163
164 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
165 system */
166
167 /* Array to assign indices to IOMMUs*/
168 struct amd_iommu *amd_iommus[MAX_IOMMUS];
169
170 /* Number of IOMMUs present in the system */
171 static int amd_iommus_present;
172
173 /* IOMMUs have a non-present cache? */
174 bool amd_iommu_np_cache __read_mostly;
175 bool amd_iommu_iotlb_sup __read_mostly = true;
176
177 u32 amd_iommu_max_pasid __read_mostly = ~0;
178
179 bool amd_iommu_v2_present __read_mostly;
180 static bool amd_iommu_pc_present __read_mostly;
181
182 bool amd_iommu_force_isolation __read_mostly;
183
184 /*
185 * Pointer to the device table which is shared by all AMD IOMMUs
186 * it is indexed by the PCI device id or the HT unit id and contains
187 * information about the domain the device belongs to as well as the
188 * page table root pointer.
189 */
190 struct dev_table_entry *amd_iommu_dev_table;
191 /*
192 * Pointer to a device table which the content of old device table
193 * will be copied to. It's only be used in kdump kernel.
194 */
195 static struct dev_table_entry *old_dev_tbl_cpy;
196
197 /*
198 * The alias table is a driver specific data structure which contains the
199 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
200 * More than one device can share the same requestor id.
201 */
202 u16 *amd_iommu_alias_table;
203
204 /*
205 * The rlookup table is used to find the IOMMU which is responsible
206 * for a specific device. It is also indexed by the PCI device id.
207 */
208 struct amd_iommu **amd_iommu_rlookup_table;
209 EXPORT_SYMBOL(amd_iommu_rlookup_table);
210
211 /*
212 * This table is used to find the irq remapping table for a given device id
213 * quickly.
214 */
215 struct irq_remap_table **irq_lookup_table;
216
217 /*
218 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
219 * to know which ones are already in use.
220 */
221 unsigned long *amd_iommu_pd_alloc_bitmap;
222
223 static u32 dev_table_size; /* size of the device table */
224 static u32 alias_table_size; /* size of the alias table */
225 static u32 rlookup_table_size; /* size if the rlookup table */
226
227 enum iommu_init_state {
228 IOMMU_START_STATE,
229 IOMMU_IVRS_DETECTED,
230 IOMMU_ACPI_FINISHED,
231 IOMMU_ENABLED,
232 IOMMU_PCI_INIT,
233 IOMMU_INTERRUPTS_EN,
234 IOMMU_DMA_OPS,
235 IOMMU_INITIALIZED,
236 IOMMU_NOT_FOUND,
237 IOMMU_INIT_ERROR,
238 IOMMU_CMDLINE_DISABLED,
239 };
240
241 /* Early ioapic and hpet maps from kernel command line */
242 #define EARLY_MAP_SIZE 4
243 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
244 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
245 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
246
247 static int __initdata early_ioapic_map_size;
248 static int __initdata early_hpet_map_size;
249 static int __initdata early_acpihid_map_size;
250
251 static bool __initdata cmdline_maps;
252
253 static enum iommu_init_state init_state = IOMMU_START_STATE;
254
255 static int amd_iommu_enable_interrupts(void);
256 static int __init iommu_go_to_state(enum iommu_init_state state);
257 static void init_device_table_dma(void);
258
259 static bool amd_iommu_pre_enabled = true;
260
261 static u32 amd_iommu_ivinfo __initdata;
262
translation_pre_enabled(struct amd_iommu * iommu)263 bool translation_pre_enabled(struct amd_iommu *iommu)
264 {
265 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
266 }
267 EXPORT_SYMBOL(translation_pre_enabled);
268
clear_translation_pre_enabled(struct amd_iommu * iommu)269 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
270 {
271 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
272 }
273
init_translation_status(struct amd_iommu * iommu)274 static void init_translation_status(struct amd_iommu *iommu)
275 {
276 u64 ctrl;
277
278 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
279 if (ctrl & (1<<CONTROL_IOMMU_EN))
280 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
281 }
282
update_last_devid(u16 devid)283 static inline void update_last_devid(u16 devid)
284 {
285 if (devid > amd_iommu_last_bdf)
286 amd_iommu_last_bdf = devid;
287 }
288
tbl_size(int entry_size)289 static inline unsigned long tbl_size(int entry_size)
290 {
291 unsigned shift = PAGE_SHIFT +
292 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
293
294 return 1UL << shift;
295 }
296
amd_iommu_get_num_iommus(void)297 int amd_iommu_get_num_iommus(void)
298 {
299 return amd_iommus_present;
300 }
301
302 #ifdef CONFIG_IRQ_REMAP
check_feature_on_all_iommus(u64 mask)303 static bool check_feature_on_all_iommus(u64 mask)
304 {
305 bool ret = false;
306 struct amd_iommu *iommu;
307
308 for_each_iommu(iommu) {
309 ret = iommu_feature(iommu, mask);
310 if (!ret)
311 return false;
312 }
313
314 return true;
315 }
316 #endif
317
318 /*
319 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
320 * Default to IVHD EFR since it is available sooner
321 * (i.e. before PCI init).
322 */
early_iommu_features_init(struct amd_iommu * iommu,struct ivhd_header * h)323 static void __init early_iommu_features_init(struct amd_iommu *iommu,
324 struct ivhd_header *h)
325 {
326 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
327 iommu->features = h->efr_reg;
328 }
329
330 /* Access to l1 and l2 indexed register spaces */
331
iommu_read_l1(struct amd_iommu * iommu,u16 l1,u8 address)332 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
333 {
334 u32 val;
335
336 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
337 pci_read_config_dword(iommu->dev, 0xfc, &val);
338 return val;
339 }
340
iommu_write_l1(struct amd_iommu * iommu,u16 l1,u8 address,u32 val)341 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
342 {
343 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
344 pci_write_config_dword(iommu->dev, 0xfc, val);
345 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
346 }
347
iommu_read_l2(struct amd_iommu * iommu,u8 address)348 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
349 {
350 u32 val;
351
352 pci_write_config_dword(iommu->dev, 0xf0, address);
353 pci_read_config_dword(iommu->dev, 0xf4, &val);
354 return val;
355 }
356
iommu_write_l2(struct amd_iommu * iommu,u8 address,u32 val)357 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
358 {
359 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
360 pci_write_config_dword(iommu->dev, 0xf4, val);
361 }
362
363 /****************************************************************************
364 *
365 * AMD IOMMU MMIO register space handling functions
366 *
367 * These functions are used to program the IOMMU device registers in
368 * MMIO space required for that driver.
369 *
370 ****************************************************************************/
371
372 /*
373 * This function set the exclusion range in the IOMMU. DMA accesses to the
374 * exclusion range are passed through untranslated
375 */
iommu_set_exclusion_range(struct amd_iommu * iommu)376 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
377 {
378 u64 start = iommu->exclusion_start & PAGE_MASK;
379 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
380 u64 entry;
381
382 if (!iommu->exclusion_start)
383 return;
384
385 entry = start | MMIO_EXCL_ENABLE_MASK;
386 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
387 &entry, sizeof(entry));
388
389 entry = limit;
390 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
391 &entry, sizeof(entry));
392 }
393
iommu_set_cwwb_range(struct amd_iommu * iommu)394 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
395 {
396 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
397 u64 entry = start & PM_ADDR_MASK;
398
399 if (!iommu_feature(iommu, FEATURE_SNP))
400 return;
401
402 /* Note:
403 * Re-purpose Exclusion base/limit registers for Completion wait
404 * write-back base/limit.
405 */
406 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
407 &entry, sizeof(entry));
408
409 /* Note:
410 * Default to 4 Kbytes, which can be specified by setting base
411 * address equal to the limit address.
412 */
413 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
414 &entry, sizeof(entry));
415 }
416
417 /* Programs the physical address of the device table into the IOMMU hardware */
iommu_set_device_table(struct amd_iommu * iommu)418 static void iommu_set_device_table(struct amd_iommu *iommu)
419 {
420 u64 entry;
421
422 BUG_ON(iommu->mmio_base == NULL);
423
424 entry = iommu_virt_to_phys(amd_iommu_dev_table);
425 entry |= (dev_table_size >> 12) - 1;
426 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
427 &entry, sizeof(entry));
428 }
429
430 /* Generic functions to enable/disable certain features of the IOMMU. */
iommu_feature_enable(struct amd_iommu * iommu,u8 bit)431 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
432 {
433 u64 ctrl;
434
435 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
436 ctrl |= (1ULL << bit);
437 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
438 }
439
iommu_feature_disable(struct amd_iommu * iommu,u8 bit)440 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
441 {
442 u64 ctrl;
443
444 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
445 ctrl &= ~(1ULL << bit);
446 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
447 }
448
iommu_set_inv_tlb_timeout(struct amd_iommu * iommu,int timeout)449 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
450 {
451 u64 ctrl;
452
453 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
454 ctrl &= ~CTRL_INV_TO_MASK;
455 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
456 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
457 }
458
459 /* Function to enable the hardware */
iommu_enable(struct amd_iommu * iommu)460 static void iommu_enable(struct amd_iommu *iommu)
461 {
462 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
463 }
464
iommu_disable(struct amd_iommu * iommu)465 static void iommu_disable(struct amd_iommu *iommu)
466 {
467 if (!iommu->mmio_base)
468 return;
469
470 /* Disable command buffer */
471 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
472
473 /* Disable event logging and event interrupts */
474 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
475 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
476
477 /* Disable IOMMU GA_LOG */
478 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
479 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
480
481 /* Disable IOMMU hardware itself */
482 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
483 }
484
485 /*
486 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
487 * the system has one.
488 */
iommu_map_mmio_space(u64 address,u64 end)489 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
490 {
491 if (!request_mem_region(address, end, "amd_iommu")) {
492 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
493 address, end);
494 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
495 return NULL;
496 }
497
498 return (u8 __iomem *)ioremap(address, end);
499 }
500
iommu_unmap_mmio_space(struct amd_iommu * iommu)501 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
502 {
503 if (iommu->mmio_base)
504 iounmap(iommu->mmio_base);
505 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
506 }
507
get_ivhd_header_size(struct ivhd_header * h)508 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
509 {
510 u32 size = 0;
511
512 switch (h->type) {
513 case 0x10:
514 size = 24;
515 break;
516 case 0x11:
517 case 0x40:
518 size = 40;
519 break;
520 }
521 return size;
522 }
523
524 /****************************************************************************
525 *
526 * The functions below belong to the first pass of AMD IOMMU ACPI table
527 * parsing. In this pass we try to find out the highest device id this
528 * code has to handle. Upon this information the size of the shared data
529 * structures is determined later.
530 *
531 ****************************************************************************/
532
533 /*
534 * This function calculates the length of a given IVHD entry
535 */
ivhd_entry_length(u8 * ivhd)536 static inline int ivhd_entry_length(u8 *ivhd)
537 {
538 u32 type = ((struct ivhd_entry *)ivhd)->type;
539
540 if (type < 0x80) {
541 return 0x04 << (*ivhd >> 6);
542 } else if (type == IVHD_DEV_ACPI_HID) {
543 /* For ACPI_HID, offset 21 is uid len */
544 return *((u8 *)ivhd + 21) + 22;
545 }
546 return 0;
547 }
548
549 /*
550 * After reading the highest device id from the IOMMU PCI capability header
551 * this function looks if there is a higher device id defined in the ACPI table
552 */
find_last_devid_from_ivhd(struct ivhd_header * h)553 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
554 {
555 u8 *p = (void *)h, *end = (void *)h;
556 struct ivhd_entry *dev;
557
558 u32 ivhd_size = get_ivhd_header_size(h);
559
560 if (!ivhd_size) {
561 pr_err("Unsupported IVHD type %#x\n", h->type);
562 return -EINVAL;
563 }
564
565 p += ivhd_size;
566 end += h->length;
567
568 while (p < end) {
569 dev = (struct ivhd_entry *)p;
570 switch (dev->type) {
571 case IVHD_DEV_ALL:
572 /* Use maximum BDF value for DEV_ALL */
573 update_last_devid(0xffff);
574 break;
575 case IVHD_DEV_SELECT:
576 case IVHD_DEV_RANGE_END:
577 case IVHD_DEV_ALIAS:
578 case IVHD_DEV_EXT_SELECT:
579 /* all the above subfield types refer to device ids */
580 update_last_devid(dev->devid);
581 break;
582 default:
583 break;
584 }
585 p += ivhd_entry_length(p);
586 }
587
588 WARN_ON(p != end);
589
590 return 0;
591 }
592
check_ivrs_checksum(struct acpi_table_header * table)593 static int __init check_ivrs_checksum(struct acpi_table_header *table)
594 {
595 int i;
596 u8 checksum = 0, *p = (u8 *)table;
597
598 for (i = 0; i < table->length; ++i)
599 checksum += p[i];
600 if (checksum != 0) {
601 /* ACPI table corrupt */
602 pr_err(FW_BUG "IVRS invalid checksum\n");
603 return -ENODEV;
604 }
605
606 return 0;
607 }
608
609 /*
610 * Iterate over all IVHD entries in the ACPI table and find the highest device
611 * id which we need to handle. This is the first of three functions which parse
612 * the ACPI table. So we check the checksum here.
613 */
find_last_devid_acpi(struct acpi_table_header * table)614 static int __init find_last_devid_acpi(struct acpi_table_header *table)
615 {
616 u8 *p = (u8 *)table, *end = (u8 *)table;
617 struct ivhd_header *h;
618
619 p += IVRS_HEADER_LENGTH;
620
621 end += table->length;
622 while (p < end) {
623 h = (struct ivhd_header *)p;
624 if (h->type == amd_iommu_target_ivhd_type) {
625 int ret = find_last_devid_from_ivhd(h);
626
627 if (ret)
628 return ret;
629 }
630 p += h->length;
631 }
632 WARN_ON(p != end);
633
634 return 0;
635 }
636
637 /****************************************************************************
638 *
639 * The following functions belong to the code path which parses the ACPI table
640 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
641 * data structures, initialize the device/alias/rlookup table and also
642 * basically initialize the hardware.
643 *
644 ****************************************************************************/
645
646 /*
647 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
648 * write commands to that buffer later and the IOMMU will execute them
649 * asynchronously
650 */
alloc_command_buffer(struct amd_iommu * iommu)651 static int __init alloc_command_buffer(struct amd_iommu *iommu)
652 {
653 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
654 get_order(CMD_BUFFER_SIZE));
655
656 return iommu->cmd_buf ? 0 : -ENOMEM;
657 }
658
659 /*
660 * This function restarts event logging in case the IOMMU experienced
661 * an event log buffer overflow.
662 */
amd_iommu_restart_event_logging(struct amd_iommu * iommu)663 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
664 {
665 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
666 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
667 }
668
669 /*
670 * This function resets the command buffer if the IOMMU stopped fetching
671 * commands from it.
672 */
amd_iommu_reset_cmd_buffer(struct amd_iommu * iommu)673 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
674 {
675 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
676
677 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
678 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
679 iommu->cmd_buf_head = 0;
680 iommu->cmd_buf_tail = 0;
681
682 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
683 }
684
685 /*
686 * This function writes the command buffer address to the hardware and
687 * enables it.
688 */
iommu_enable_command_buffer(struct amd_iommu * iommu)689 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
690 {
691 u64 entry;
692
693 BUG_ON(iommu->cmd_buf == NULL);
694
695 entry = iommu_virt_to_phys(iommu->cmd_buf);
696 entry |= MMIO_CMD_SIZE_512;
697
698 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
699 &entry, sizeof(entry));
700
701 amd_iommu_reset_cmd_buffer(iommu);
702 }
703
704 /*
705 * This function disables the command buffer
706 */
iommu_disable_command_buffer(struct amd_iommu * iommu)707 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
708 {
709 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
710 }
711
free_command_buffer(struct amd_iommu * iommu)712 static void __init free_command_buffer(struct amd_iommu *iommu)
713 {
714 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
715 }
716
iommu_alloc_4k_pages(struct amd_iommu * iommu,gfp_t gfp,size_t size)717 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
718 gfp_t gfp, size_t size)
719 {
720 int order = get_order(size);
721 void *buf = (void *)__get_free_pages(gfp, order);
722
723 if (buf &&
724 iommu_feature(iommu, FEATURE_SNP) &&
725 set_memory_4k((unsigned long)buf, (1 << order))) {
726 free_pages((unsigned long)buf, order);
727 buf = NULL;
728 }
729
730 return buf;
731 }
732
733 /* allocates the memory where the IOMMU will log its events to */
alloc_event_buffer(struct amd_iommu * iommu)734 static int __init alloc_event_buffer(struct amd_iommu *iommu)
735 {
736 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
737 EVT_BUFFER_SIZE);
738
739 return iommu->evt_buf ? 0 : -ENOMEM;
740 }
741
iommu_enable_event_buffer(struct amd_iommu * iommu)742 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
743 {
744 u64 entry;
745
746 BUG_ON(iommu->evt_buf == NULL);
747
748 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
749
750 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
751 &entry, sizeof(entry));
752
753 /* set head and tail to zero manually */
754 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
755 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
756
757 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
758 }
759
760 /*
761 * This function disables the event log buffer
762 */
iommu_disable_event_buffer(struct amd_iommu * iommu)763 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
764 {
765 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
766 }
767
free_event_buffer(struct amd_iommu * iommu)768 static void __init free_event_buffer(struct amd_iommu *iommu)
769 {
770 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
771 }
772
773 /* allocates the memory where the IOMMU will log its events to */
alloc_ppr_log(struct amd_iommu * iommu)774 static int __init alloc_ppr_log(struct amd_iommu *iommu)
775 {
776 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
777 PPR_LOG_SIZE);
778
779 return iommu->ppr_log ? 0 : -ENOMEM;
780 }
781
iommu_enable_ppr_log(struct amd_iommu * iommu)782 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
783 {
784 u64 entry;
785
786 if (iommu->ppr_log == NULL)
787 return;
788
789 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
790
791 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
792 &entry, sizeof(entry));
793
794 /* set head and tail to zero manually */
795 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
796 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
797
798 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
799 iommu_feature_enable(iommu, CONTROL_PPR_EN);
800 }
801
free_ppr_log(struct amd_iommu * iommu)802 static void __init free_ppr_log(struct amd_iommu *iommu)
803 {
804 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
805 }
806
free_ga_log(struct amd_iommu * iommu)807 static void free_ga_log(struct amd_iommu *iommu)
808 {
809 #ifdef CONFIG_IRQ_REMAP
810 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
811 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
812 #endif
813 }
814
iommu_ga_log_enable(struct amd_iommu * iommu)815 static int iommu_ga_log_enable(struct amd_iommu *iommu)
816 {
817 #ifdef CONFIG_IRQ_REMAP
818 u32 status, i;
819 u64 entry;
820
821 if (!iommu->ga_log)
822 return -EINVAL;
823
824 /* Check if already running */
825 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
826 if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
827 return 0;
828
829 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
830 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
831 &entry, sizeof(entry));
832 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
833 (BIT_ULL(52)-1)) & ~7ULL;
834 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
835 &entry, sizeof(entry));
836 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
837 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
838
839
840 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
841 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
842
843 for (i = 0; i < LOOP_TIMEOUT; ++i) {
844 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
845 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
846 break;
847 udelay(10);
848 }
849
850 if (WARN_ON(i >= LOOP_TIMEOUT))
851 return -EINVAL;
852 #endif /* CONFIG_IRQ_REMAP */
853 return 0;
854 }
855
iommu_init_ga_log(struct amd_iommu * iommu)856 static int iommu_init_ga_log(struct amd_iommu *iommu)
857 {
858 #ifdef CONFIG_IRQ_REMAP
859 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
860 return 0;
861
862 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
863 get_order(GA_LOG_SIZE));
864 if (!iommu->ga_log)
865 goto err_out;
866
867 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
868 get_order(8));
869 if (!iommu->ga_log_tail)
870 goto err_out;
871
872 return 0;
873 err_out:
874 free_ga_log(iommu);
875 return -EINVAL;
876 #else
877 return 0;
878 #endif /* CONFIG_IRQ_REMAP */
879 }
880
alloc_cwwb_sem(struct amd_iommu * iommu)881 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
882 {
883 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
884
885 return iommu->cmd_sem ? 0 : -ENOMEM;
886 }
887
free_cwwb_sem(struct amd_iommu * iommu)888 static void __init free_cwwb_sem(struct amd_iommu *iommu)
889 {
890 if (iommu->cmd_sem)
891 free_page((unsigned long)iommu->cmd_sem);
892 }
893
iommu_enable_xt(struct amd_iommu * iommu)894 static void iommu_enable_xt(struct amd_iommu *iommu)
895 {
896 #ifdef CONFIG_IRQ_REMAP
897 /*
898 * XT mode (32-bit APIC destination ID) requires
899 * GA mode (128-bit IRTE support) as a prerequisite.
900 */
901 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
902 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
903 iommu_feature_enable(iommu, CONTROL_XT_EN);
904 #endif /* CONFIG_IRQ_REMAP */
905 }
906
iommu_enable_gt(struct amd_iommu * iommu)907 static void iommu_enable_gt(struct amd_iommu *iommu)
908 {
909 if (!iommu_feature(iommu, FEATURE_GT))
910 return;
911
912 iommu_feature_enable(iommu, CONTROL_GT_EN);
913 }
914
915 /* sets a specific bit in the device table entry. */
set_dev_entry_bit(u16 devid,u8 bit)916 static void set_dev_entry_bit(u16 devid, u8 bit)
917 {
918 int i = (bit >> 6) & 0x03;
919 int _bit = bit & 0x3f;
920
921 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
922 }
923
get_dev_entry_bit(u16 devid,u8 bit)924 static int get_dev_entry_bit(u16 devid, u8 bit)
925 {
926 int i = (bit >> 6) & 0x03;
927 int _bit = bit & 0x3f;
928
929 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
930 }
931
932
copy_device_table(void)933 static bool copy_device_table(void)
934 {
935 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
936 struct dev_table_entry *old_devtb = NULL;
937 u32 lo, hi, devid, old_devtb_size;
938 phys_addr_t old_devtb_phys;
939 struct amd_iommu *iommu;
940 u16 dom_id, dte_v, irq_v;
941 gfp_t gfp_flag;
942 u64 tmp;
943
944 if (!amd_iommu_pre_enabled)
945 return false;
946
947 pr_warn("Translation is already enabled - trying to copy translation structures\n");
948 for_each_iommu(iommu) {
949 /* All IOMMUs should use the same device table with the same size */
950 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
951 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
952 entry = (((u64) hi) << 32) + lo;
953 if (last_entry && last_entry != entry) {
954 pr_err("IOMMU:%d should use the same dev table as others!\n",
955 iommu->index);
956 return false;
957 }
958 last_entry = entry;
959
960 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
961 if (old_devtb_size != dev_table_size) {
962 pr_err("The device table size of IOMMU:%d is not expected!\n",
963 iommu->index);
964 return false;
965 }
966 }
967
968 /*
969 * When SME is enabled in the first kernel, the entry includes the
970 * memory encryption mask(sme_me_mask), we must remove the memory
971 * encryption mask to obtain the true physical address in kdump kernel.
972 */
973 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
974
975 if (old_devtb_phys >= 0x100000000ULL) {
976 pr_err("The address of old device table is above 4G, not trustworthy!\n");
977 return false;
978 }
979 old_devtb = (sme_active() && is_kdump_kernel())
980 ? (__force void *)ioremap_encrypted(old_devtb_phys,
981 dev_table_size)
982 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
983
984 if (!old_devtb)
985 return false;
986
987 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
988 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
989 get_order(dev_table_size));
990 if (old_dev_tbl_cpy == NULL) {
991 pr_err("Failed to allocate memory for copying old device table!\n");
992 return false;
993 }
994
995 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
996 old_dev_tbl_cpy[devid] = old_devtb[devid];
997 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
998 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
999
1000 if (dte_v && dom_id) {
1001 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1002 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1003 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1004 /* If gcr3 table existed, mask it out */
1005 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1006 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1007 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1008 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1009 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1010 tmp |= DTE_FLAG_GV;
1011 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1012 }
1013 }
1014
1015 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1016 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1017 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
1018 if (irq_v && (int_ctl || int_tab_len)) {
1019 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1020 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
1021 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1022 return false;
1023 }
1024
1025 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1026 }
1027 }
1028 memunmap(old_devtb);
1029
1030 return true;
1031 }
1032
amd_iommu_apply_erratum_63(u16 devid)1033 void amd_iommu_apply_erratum_63(u16 devid)
1034 {
1035 int sysmgt;
1036
1037 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1038 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1039
1040 if (sysmgt == 0x01)
1041 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1042 }
1043
1044 /* Writes the specific IOMMU for a device into the rlookup table */
set_iommu_for_device(struct amd_iommu * iommu,u16 devid)1045 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1046 {
1047 amd_iommu_rlookup_table[devid] = iommu;
1048 }
1049
1050 /*
1051 * This function takes the device specific flags read from the ACPI
1052 * table and sets up the device table entry with that information
1053 */
set_dev_entry_from_acpi(struct amd_iommu * iommu,u16 devid,u32 flags,u32 ext_flags)1054 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1055 u16 devid, u32 flags, u32 ext_flags)
1056 {
1057 if (flags & ACPI_DEVFLAG_INITPASS)
1058 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1059 if (flags & ACPI_DEVFLAG_EXTINT)
1060 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1061 if (flags & ACPI_DEVFLAG_NMI)
1062 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1063 if (flags & ACPI_DEVFLAG_SYSMGT1)
1064 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1065 if (flags & ACPI_DEVFLAG_SYSMGT2)
1066 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1067 if (flags & ACPI_DEVFLAG_LINT0)
1068 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1069 if (flags & ACPI_DEVFLAG_LINT1)
1070 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1071
1072 amd_iommu_apply_erratum_63(devid);
1073
1074 set_iommu_for_device(iommu, devid);
1075 }
1076
add_special_device(u8 type,u8 id,u16 * devid,bool cmd_line)1077 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1078 {
1079 struct devid_map *entry;
1080 struct list_head *list;
1081
1082 if (type == IVHD_SPECIAL_IOAPIC)
1083 list = &ioapic_map;
1084 else if (type == IVHD_SPECIAL_HPET)
1085 list = &hpet_map;
1086 else
1087 return -EINVAL;
1088
1089 list_for_each_entry(entry, list, list) {
1090 if (!(entry->id == id && entry->cmd_line))
1091 continue;
1092
1093 pr_info("Command-line override present for %s id %d - ignoring\n",
1094 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1095
1096 *devid = entry->devid;
1097
1098 return 0;
1099 }
1100
1101 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1102 if (!entry)
1103 return -ENOMEM;
1104
1105 entry->id = id;
1106 entry->devid = *devid;
1107 entry->cmd_line = cmd_line;
1108
1109 list_add_tail(&entry->list, list);
1110
1111 return 0;
1112 }
1113
add_acpi_hid_device(u8 * hid,u8 * uid,u16 * devid,bool cmd_line)1114 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1115 bool cmd_line)
1116 {
1117 struct acpihid_map_entry *entry;
1118 struct list_head *list = &acpihid_map;
1119
1120 list_for_each_entry(entry, list, list) {
1121 if (strcmp(entry->hid, hid) ||
1122 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1123 !entry->cmd_line)
1124 continue;
1125
1126 pr_info("Command-line override for hid:%s uid:%s\n",
1127 hid, uid);
1128 *devid = entry->devid;
1129 return 0;
1130 }
1131
1132 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1133 if (!entry)
1134 return -ENOMEM;
1135
1136 memcpy(entry->uid, uid, strlen(uid));
1137 memcpy(entry->hid, hid, strlen(hid));
1138 entry->devid = *devid;
1139 entry->cmd_line = cmd_line;
1140 entry->root_devid = (entry->devid & (~0x7));
1141
1142 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1143 entry->cmd_line ? "cmd" : "ivrs",
1144 entry->hid, entry->uid, entry->root_devid);
1145
1146 list_add_tail(&entry->list, list);
1147 return 0;
1148 }
1149
add_early_maps(void)1150 static int __init add_early_maps(void)
1151 {
1152 int i, ret;
1153
1154 for (i = 0; i < early_ioapic_map_size; ++i) {
1155 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1156 early_ioapic_map[i].id,
1157 &early_ioapic_map[i].devid,
1158 early_ioapic_map[i].cmd_line);
1159 if (ret)
1160 return ret;
1161 }
1162
1163 for (i = 0; i < early_hpet_map_size; ++i) {
1164 ret = add_special_device(IVHD_SPECIAL_HPET,
1165 early_hpet_map[i].id,
1166 &early_hpet_map[i].devid,
1167 early_hpet_map[i].cmd_line);
1168 if (ret)
1169 return ret;
1170 }
1171
1172 for (i = 0; i < early_acpihid_map_size; ++i) {
1173 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1174 early_acpihid_map[i].uid,
1175 &early_acpihid_map[i].devid,
1176 early_acpihid_map[i].cmd_line);
1177 if (ret)
1178 return ret;
1179 }
1180
1181 return 0;
1182 }
1183
1184 /*
1185 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1186 * initializes the hardware and our data structures with it.
1187 */
init_iommu_from_acpi(struct amd_iommu * iommu,struct ivhd_header * h)1188 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1189 struct ivhd_header *h)
1190 {
1191 u8 *p = (u8 *)h;
1192 u8 *end = p, flags = 0;
1193 u16 devid = 0, devid_start = 0, devid_to = 0;
1194 u32 dev_i, ext_flags = 0;
1195 bool alias = false;
1196 struct ivhd_entry *e;
1197 u32 ivhd_size;
1198 int ret;
1199
1200
1201 ret = add_early_maps();
1202 if (ret)
1203 return ret;
1204
1205 amd_iommu_apply_ivrs_quirks();
1206
1207 /*
1208 * First save the recommended feature enable bits from ACPI
1209 */
1210 iommu->acpi_flags = h->flags;
1211
1212 /*
1213 * Done. Now parse the device entries
1214 */
1215 ivhd_size = get_ivhd_header_size(h);
1216 if (!ivhd_size) {
1217 pr_err("Unsupported IVHD type %#x\n", h->type);
1218 return -EINVAL;
1219 }
1220
1221 p += ivhd_size;
1222
1223 end += h->length;
1224
1225
1226 while (p < end) {
1227 e = (struct ivhd_entry *)p;
1228 switch (e->type) {
1229 case IVHD_DEV_ALL:
1230
1231 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1232
1233 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1234 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1235 break;
1236 case IVHD_DEV_SELECT:
1237
1238 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1239 "flags: %02x\n",
1240 PCI_BUS_NUM(e->devid),
1241 PCI_SLOT(e->devid),
1242 PCI_FUNC(e->devid),
1243 e->flags);
1244
1245 devid = e->devid;
1246 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1247 break;
1248 case IVHD_DEV_SELECT_RANGE_START:
1249
1250 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1251 "devid: %02x:%02x.%x flags: %02x\n",
1252 PCI_BUS_NUM(e->devid),
1253 PCI_SLOT(e->devid),
1254 PCI_FUNC(e->devid),
1255 e->flags);
1256
1257 devid_start = e->devid;
1258 flags = e->flags;
1259 ext_flags = 0;
1260 alias = false;
1261 break;
1262 case IVHD_DEV_ALIAS:
1263
1264 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1265 "flags: %02x devid_to: %02x:%02x.%x\n",
1266 PCI_BUS_NUM(e->devid),
1267 PCI_SLOT(e->devid),
1268 PCI_FUNC(e->devid),
1269 e->flags,
1270 PCI_BUS_NUM(e->ext >> 8),
1271 PCI_SLOT(e->ext >> 8),
1272 PCI_FUNC(e->ext >> 8));
1273
1274 devid = e->devid;
1275 devid_to = e->ext >> 8;
1276 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1277 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1278 amd_iommu_alias_table[devid] = devid_to;
1279 break;
1280 case IVHD_DEV_ALIAS_RANGE:
1281
1282 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1283 "devid: %02x:%02x.%x flags: %02x "
1284 "devid_to: %02x:%02x.%x\n",
1285 PCI_BUS_NUM(e->devid),
1286 PCI_SLOT(e->devid),
1287 PCI_FUNC(e->devid),
1288 e->flags,
1289 PCI_BUS_NUM(e->ext >> 8),
1290 PCI_SLOT(e->ext >> 8),
1291 PCI_FUNC(e->ext >> 8));
1292
1293 devid_start = e->devid;
1294 flags = e->flags;
1295 devid_to = e->ext >> 8;
1296 ext_flags = 0;
1297 alias = true;
1298 break;
1299 case IVHD_DEV_EXT_SELECT:
1300
1301 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1302 "flags: %02x ext: %08x\n",
1303 PCI_BUS_NUM(e->devid),
1304 PCI_SLOT(e->devid),
1305 PCI_FUNC(e->devid),
1306 e->flags, e->ext);
1307
1308 devid = e->devid;
1309 set_dev_entry_from_acpi(iommu, devid, e->flags,
1310 e->ext);
1311 break;
1312 case IVHD_DEV_EXT_SELECT_RANGE:
1313
1314 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1315 "%02x:%02x.%x flags: %02x ext: %08x\n",
1316 PCI_BUS_NUM(e->devid),
1317 PCI_SLOT(e->devid),
1318 PCI_FUNC(e->devid),
1319 e->flags, e->ext);
1320
1321 devid_start = e->devid;
1322 flags = e->flags;
1323 ext_flags = e->ext;
1324 alias = false;
1325 break;
1326 case IVHD_DEV_RANGE_END:
1327
1328 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1329 PCI_BUS_NUM(e->devid),
1330 PCI_SLOT(e->devid),
1331 PCI_FUNC(e->devid));
1332
1333 devid = e->devid;
1334 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1335 if (alias) {
1336 amd_iommu_alias_table[dev_i] = devid_to;
1337 set_dev_entry_from_acpi(iommu,
1338 devid_to, flags, ext_flags);
1339 }
1340 set_dev_entry_from_acpi(iommu, dev_i,
1341 flags, ext_flags);
1342 }
1343 break;
1344 case IVHD_DEV_SPECIAL: {
1345 u8 handle, type;
1346 const char *var;
1347 u16 devid;
1348 int ret;
1349
1350 handle = e->ext & 0xff;
1351 devid = (e->ext >> 8) & 0xffff;
1352 type = (e->ext >> 24) & 0xff;
1353
1354 if (type == IVHD_SPECIAL_IOAPIC)
1355 var = "IOAPIC";
1356 else if (type == IVHD_SPECIAL_HPET)
1357 var = "HPET";
1358 else
1359 var = "UNKNOWN";
1360
1361 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1362 var, (int)handle,
1363 PCI_BUS_NUM(devid),
1364 PCI_SLOT(devid),
1365 PCI_FUNC(devid));
1366
1367 ret = add_special_device(type, handle, &devid, false);
1368 if (ret)
1369 return ret;
1370
1371 /*
1372 * add_special_device might update the devid in case a
1373 * command-line override is present. So call
1374 * set_dev_entry_from_acpi after add_special_device.
1375 */
1376 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1377
1378 break;
1379 }
1380 case IVHD_DEV_ACPI_HID: {
1381 u16 devid;
1382 u8 hid[ACPIHID_HID_LEN];
1383 u8 uid[ACPIHID_UID_LEN];
1384 int ret;
1385
1386 if (h->type != 0x40) {
1387 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1388 e->type);
1389 break;
1390 }
1391
1392 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1393 hid[ACPIHID_HID_LEN - 1] = '\0';
1394
1395 if (!(*hid)) {
1396 pr_err(FW_BUG "Invalid HID.\n");
1397 break;
1398 }
1399
1400 uid[0] = '\0';
1401 switch (e->uidf) {
1402 case UID_NOT_PRESENT:
1403
1404 if (e->uidl != 0)
1405 pr_warn(FW_BUG "Invalid UID length.\n");
1406
1407 break;
1408 case UID_IS_INTEGER:
1409
1410 sprintf(uid, "%d", e->uid);
1411
1412 break;
1413 case UID_IS_CHARACTER:
1414
1415 memcpy(uid, &e->uid, e->uidl);
1416 uid[e->uidl] = '\0';
1417
1418 break;
1419 default:
1420 break;
1421 }
1422
1423 devid = e->devid;
1424 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1425 hid, uid,
1426 PCI_BUS_NUM(devid),
1427 PCI_SLOT(devid),
1428 PCI_FUNC(devid));
1429
1430 flags = e->flags;
1431
1432 ret = add_acpi_hid_device(hid, uid, &devid, false);
1433 if (ret)
1434 return ret;
1435
1436 /*
1437 * add_special_device might update the devid in case a
1438 * command-line override is present. So call
1439 * set_dev_entry_from_acpi after add_special_device.
1440 */
1441 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1442
1443 break;
1444 }
1445 default:
1446 break;
1447 }
1448
1449 p += ivhd_entry_length(p);
1450 }
1451
1452 return 0;
1453 }
1454
free_iommu_one(struct amd_iommu * iommu)1455 static void __init free_iommu_one(struct amd_iommu *iommu)
1456 {
1457 free_cwwb_sem(iommu);
1458 free_command_buffer(iommu);
1459 free_event_buffer(iommu);
1460 free_ppr_log(iommu);
1461 free_ga_log(iommu);
1462 iommu_unmap_mmio_space(iommu);
1463 }
1464
free_iommu_all(void)1465 static void __init free_iommu_all(void)
1466 {
1467 struct amd_iommu *iommu, *next;
1468
1469 for_each_iommu_safe(iommu, next) {
1470 list_del(&iommu->list);
1471 free_iommu_one(iommu);
1472 kfree(iommu);
1473 }
1474 }
1475
1476 /*
1477 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1478 * Workaround:
1479 * BIOS should disable L2B micellaneous clock gating by setting
1480 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1481 */
amd_iommu_erratum_746_workaround(struct amd_iommu * iommu)1482 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1483 {
1484 u32 value;
1485
1486 if ((boot_cpu_data.x86 != 0x15) ||
1487 (boot_cpu_data.x86_model < 0x10) ||
1488 (boot_cpu_data.x86_model > 0x1f))
1489 return;
1490
1491 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1492 pci_read_config_dword(iommu->dev, 0xf4, &value);
1493
1494 if (value & BIT(2))
1495 return;
1496
1497 /* Select NB indirect register 0x90 and enable writing */
1498 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1499
1500 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1501 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1502
1503 /* Clear the enable writing bit */
1504 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1505 }
1506
1507 /*
1508 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1509 * Workaround:
1510 * BIOS should enable ATS write permission check by setting
1511 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1512 */
amd_iommu_ats_write_check_workaround(struct amd_iommu * iommu)1513 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1514 {
1515 u32 value;
1516
1517 if ((boot_cpu_data.x86 != 0x15) ||
1518 (boot_cpu_data.x86_model < 0x30) ||
1519 (boot_cpu_data.x86_model > 0x3f))
1520 return;
1521
1522 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1523 value = iommu_read_l2(iommu, 0x47);
1524
1525 if (value & BIT(0))
1526 return;
1527
1528 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1529 iommu_write_l2(iommu, 0x47, value | BIT(0));
1530
1531 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1532 }
1533
1534 /*
1535 * This function clues the initialization function for one IOMMU
1536 * together and also allocates the command buffer and programs the
1537 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1538 */
init_iommu_one(struct amd_iommu * iommu,struct ivhd_header * h)1539 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1540 {
1541 int ret;
1542
1543 raw_spin_lock_init(&iommu->lock);
1544 iommu->cmd_sem_val = 0;
1545
1546 /* Add IOMMU to internal data structures */
1547 list_add_tail(&iommu->list, &amd_iommu_list);
1548 iommu->index = amd_iommus_present++;
1549
1550 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1551 WARN(1, "System has more IOMMUs than supported by this driver\n");
1552 return -ENOSYS;
1553 }
1554
1555 /* Index is fine - add IOMMU to the array */
1556 amd_iommus[iommu->index] = iommu;
1557
1558 /*
1559 * Copy data from ACPI table entry to the iommu struct
1560 */
1561 iommu->devid = h->devid;
1562 iommu->cap_ptr = h->cap_ptr;
1563 iommu->pci_seg = h->pci_seg;
1564 iommu->mmio_phys = h->mmio_phys;
1565
1566 switch (h->type) {
1567 case 0x10:
1568 /* Check if IVHD EFR contains proper max banks/counters */
1569 if ((h->efr_attr != 0) &&
1570 ((h->efr_attr & (0xF << 13)) != 0) &&
1571 ((h->efr_attr & (0x3F << 17)) != 0))
1572 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1573 else
1574 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1575
1576 /*
1577 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1578 * GAM also requires GA mode. Therefore, we need to
1579 * check cmpxchg16b support before enabling it.
1580 */
1581 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1582 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1583 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1584 break;
1585 case 0x11:
1586 case 0x40:
1587 if (h->efr_reg & (1 << 9))
1588 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1589 else
1590 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1591
1592 /*
1593 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1594 * XT, GAM also requires GA mode. Therefore, we need to
1595 * check cmpxchg16b support before enabling them.
1596 */
1597 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1598 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1599 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1600 break;
1601 }
1602
1603 /*
1604 * Note: Since iommu_update_intcapxt() leverages
1605 * the IOMMU MMIO access to MSI capability block registers
1606 * for MSI address lo/hi/data, we need to check both
1607 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1608 */
1609 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1610 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1611 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1612
1613 early_iommu_features_init(iommu, h);
1614
1615 break;
1616 default:
1617 return -EINVAL;
1618 }
1619
1620 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1621 iommu->mmio_phys_end);
1622 if (!iommu->mmio_base)
1623 return -ENOMEM;
1624
1625 if (alloc_cwwb_sem(iommu))
1626 return -ENOMEM;
1627
1628 if (alloc_command_buffer(iommu))
1629 return -ENOMEM;
1630
1631 if (alloc_event_buffer(iommu))
1632 return -ENOMEM;
1633
1634 iommu->int_enabled = false;
1635
1636 init_translation_status(iommu);
1637 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1638 iommu_disable(iommu);
1639 clear_translation_pre_enabled(iommu);
1640 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1641 iommu->index);
1642 }
1643 if (amd_iommu_pre_enabled)
1644 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1645
1646 ret = init_iommu_from_acpi(iommu, h);
1647 if (ret)
1648 return ret;
1649
1650 ret = amd_iommu_create_irq_domain(iommu);
1651 if (ret)
1652 return ret;
1653
1654 /*
1655 * Make sure IOMMU is not considered to translate itself. The IVRS
1656 * table tells us so, but this is a lie!
1657 */
1658 amd_iommu_rlookup_table[iommu->devid] = NULL;
1659
1660 return 0;
1661 }
1662
1663 /**
1664 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1665 * @ivrs: Pointer to the IVRS header
1666 *
1667 * This function search through all IVDB of the maximum supported IVHD
1668 */
get_highest_supported_ivhd_type(struct acpi_table_header * ivrs)1669 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1670 {
1671 u8 *base = (u8 *)ivrs;
1672 struct ivhd_header *ivhd = (struct ivhd_header *)
1673 (base + IVRS_HEADER_LENGTH);
1674 u8 last_type = ivhd->type;
1675 u16 devid = ivhd->devid;
1676
1677 while (((u8 *)ivhd - base < ivrs->length) &&
1678 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1679 u8 *p = (u8 *) ivhd;
1680
1681 if (ivhd->devid == devid)
1682 last_type = ivhd->type;
1683 ivhd = (struct ivhd_header *)(p + ivhd->length);
1684 }
1685
1686 return last_type;
1687 }
1688
1689 /*
1690 * Iterates over all IOMMU entries in the ACPI table, allocates the
1691 * IOMMU structure and initializes it with init_iommu_one()
1692 */
init_iommu_all(struct acpi_table_header * table)1693 static int __init init_iommu_all(struct acpi_table_header *table)
1694 {
1695 u8 *p = (u8 *)table, *end = (u8 *)table;
1696 struct ivhd_header *h;
1697 struct amd_iommu *iommu;
1698 int ret;
1699
1700 end += table->length;
1701 p += IVRS_HEADER_LENGTH;
1702
1703 while (p < end) {
1704 h = (struct ivhd_header *)p;
1705 if (*p == amd_iommu_target_ivhd_type) {
1706
1707 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1708 "seg: %d flags: %01x info %04x\n",
1709 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1710 PCI_FUNC(h->devid), h->cap_ptr,
1711 h->pci_seg, h->flags, h->info);
1712 DUMP_printk(" mmio-addr: %016llx\n",
1713 h->mmio_phys);
1714
1715 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1716 if (iommu == NULL)
1717 return -ENOMEM;
1718
1719 ret = init_iommu_one(iommu, h);
1720 if (ret)
1721 return ret;
1722 }
1723 p += h->length;
1724
1725 }
1726 WARN_ON(p != end);
1727
1728 return 0;
1729 }
1730
init_iommu_perf_ctr(struct amd_iommu * iommu)1731 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1732 {
1733 u64 val;
1734 struct pci_dev *pdev = iommu->dev;
1735
1736 if (!iommu_feature(iommu, FEATURE_PC))
1737 return;
1738
1739 amd_iommu_pc_present = true;
1740
1741 pci_info(pdev, "IOMMU performance counters supported\n");
1742
1743 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1744 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1745 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1746
1747 return;
1748 }
1749
amd_iommu_show_cap(struct device * dev,struct device_attribute * attr,char * buf)1750 static ssize_t amd_iommu_show_cap(struct device *dev,
1751 struct device_attribute *attr,
1752 char *buf)
1753 {
1754 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1755 return sprintf(buf, "%x\n", iommu->cap);
1756 }
1757 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1758
amd_iommu_show_features(struct device * dev,struct device_attribute * attr,char * buf)1759 static ssize_t amd_iommu_show_features(struct device *dev,
1760 struct device_attribute *attr,
1761 char *buf)
1762 {
1763 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1764 return sprintf(buf, "%llx\n", iommu->features);
1765 }
1766 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1767
1768 static struct attribute *amd_iommu_attrs[] = {
1769 &dev_attr_cap.attr,
1770 &dev_attr_features.attr,
1771 NULL,
1772 };
1773
1774 static struct attribute_group amd_iommu_group = {
1775 .name = "amd-iommu",
1776 .attrs = amd_iommu_attrs,
1777 };
1778
1779 static const struct attribute_group *amd_iommu_groups[] = {
1780 &amd_iommu_group,
1781 NULL,
1782 };
1783
1784 /*
1785 * Note: IVHD 0x11 and 0x40 also contains exact copy
1786 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1787 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1788 */
late_iommu_features_init(struct amd_iommu * iommu)1789 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1790 {
1791 u64 features;
1792
1793 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1794 return;
1795
1796 /* read extended feature bits */
1797 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1798
1799 if (!iommu->features) {
1800 iommu->features = features;
1801 return;
1802 }
1803
1804 /*
1805 * Sanity check and warn if EFR values from
1806 * IVHD and MMIO conflict.
1807 */
1808 if (features != iommu->features)
1809 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
1810 features, iommu->features);
1811 }
1812
iommu_init_pci(struct amd_iommu * iommu)1813 static int __init iommu_init_pci(struct amd_iommu *iommu)
1814 {
1815 int cap_ptr = iommu->cap_ptr;
1816 int ret;
1817
1818 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1819 iommu->devid & 0xff);
1820 if (!iommu->dev)
1821 return -ENODEV;
1822
1823 /* Prevent binding other PCI device drivers to IOMMU devices */
1824 iommu->dev->match_driver = false;
1825
1826 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1827 &iommu->cap);
1828
1829 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1830 amd_iommu_iotlb_sup = false;
1831
1832 late_iommu_features_init(iommu);
1833
1834 if (iommu_feature(iommu, FEATURE_GT)) {
1835 int glxval;
1836 u32 max_pasid;
1837 u64 pasmax;
1838
1839 pasmax = iommu->features & FEATURE_PASID_MASK;
1840 pasmax >>= FEATURE_PASID_SHIFT;
1841 max_pasid = (1 << (pasmax + 1)) - 1;
1842
1843 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1844
1845 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1846
1847 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1848 glxval >>= FEATURE_GLXVAL_SHIFT;
1849
1850 if (amd_iommu_max_glx_val == -1)
1851 amd_iommu_max_glx_val = glxval;
1852 else
1853 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1854 }
1855
1856 if (iommu_feature(iommu, FEATURE_GT) &&
1857 iommu_feature(iommu, FEATURE_PPR)) {
1858 iommu->is_iommu_v2 = true;
1859 amd_iommu_v2_present = true;
1860 }
1861
1862 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1863 return -ENOMEM;
1864
1865 ret = iommu_init_ga_log(iommu);
1866 if (ret)
1867 return ret;
1868
1869 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1870 amd_iommu_np_cache = true;
1871
1872 init_iommu_perf_ctr(iommu);
1873
1874 if (is_rd890_iommu(iommu->dev)) {
1875 int i, j;
1876
1877 iommu->root_pdev =
1878 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1879 PCI_DEVFN(0, 0));
1880
1881 /*
1882 * Some rd890 systems may not be fully reconfigured by the
1883 * BIOS, so it's necessary for us to store this information so
1884 * it can be reprogrammed on resume
1885 */
1886 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1887 &iommu->stored_addr_lo);
1888 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1889 &iommu->stored_addr_hi);
1890
1891 /* Low bit locks writes to configuration space */
1892 iommu->stored_addr_lo &= ~1;
1893
1894 for (i = 0; i < 6; i++)
1895 for (j = 0; j < 0x12; j++)
1896 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1897
1898 for (i = 0; i < 0x83; i++)
1899 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1900 }
1901
1902 amd_iommu_erratum_746_workaround(iommu);
1903 amd_iommu_ats_write_check_workaround(iommu);
1904
1905 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1906 amd_iommu_groups, "ivhd%d", iommu->index);
1907 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1908 iommu_device_register(&iommu->iommu);
1909
1910 return pci_enable_device(iommu->dev);
1911 }
1912
print_iommu_info(void)1913 static void print_iommu_info(void)
1914 {
1915 static const char * const feat_str[] = {
1916 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1917 "IA", "GA", "HE", "PC"
1918 };
1919 struct amd_iommu *iommu;
1920
1921 for_each_iommu(iommu) {
1922 struct pci_dev *pdev = iommu->dev;
1923 int i;
1924
1925 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1926
1927 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1928 pr_info("Extended features (%#llx):", iommu->features);
1929
1930 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1931 if (iommu_feature(iommu, (1ULL << i)))
1932 pr_cont(" %s", feat_str[i]);
1933 }
1934
1935 if (iommu->features & FEATURE_GAM_VAPIC)
1936 pr_cont(" GA_vAPIC");
1937
1938 pr_cont("\n");
1939 }
1940 }
1941 if (irq_remapping_enabled) {
1942 pr_info("Interrupt remapping enabled\n");
1943 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1944 pr_info("Virtual APIC enabled\n");
1945 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1946 pr_info("X2APIC enabled\n");
1947 }
1948 }
1949
amd_iommu_init_pci(void)1950 static int __init amd_iommu_init_pci(void)
1951 {
1952 struct amd_iommu *iommu;
1953 int ret = 0;
1954
1955 for_each_iommu(iommu) {
1956 ret = iommu_init_pci(iommu);
1957 if (ret)
1958 break;
1959
1960 /* Need to setup range after PCI init */
1961 iommu_set_cwwb_range(iommu);
1962 }
1963
1964 /*
1965 * Order is important here to make sure any unity map requirements are
1966 * fulfilled. The unity mappings are created and written to the device
1967 * table during the amd_iommu_init_api() call.
1968 *
1969 * After that we call init_device_table_dma() to make sure any
1970 * uninitialized DTE will block DMA, and in the end we flush the caches
1971 * of all IOMMUs to make sure the changes to the device table are
1972 * active.
1973 */
1974 ret = amd_iommu_init_api();
1975
1976 init_device_table_dma();
1977
1978 for_each_iommu(iommu)
1979 iommu_flush_all_caches(iommu);
1980
1981 if (!ret)
1982 print_iommu_info();
1983
1984 return ret;
1985 }
1986
1987 /****************************************************************************
1988 *
1989 * The following functions initialize the MSI interrupts for all IOMMUs
1990 * in the system. It's a bit challenging because there could be multiple
1991 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1992 * pci_dev.
1993 *
1994 ****************************************************************************/
1995
iommu_setup_msi(struct amd_iommu * iommu)1996 static int iommu_setup_msi(struct amd_iommu *iommu)
1997 {
1998 int r;
1999
2000 r = pci_enable_msi(iommu->dev);
2001 if (r)
2002 return r;
2003
2004 r = request_threaded_irq(iommu->dev->irq,
2005 amd_iommu_int_handler,
2006 amd_iommu_int_thread,
2007 0, "AMD-Vi",
2008 iommu);
2009
2010 if (r) {
2011 pci_disable_msi(iommu->dev);
2012 return r;
2013 }
2014
2015 iommu->int_enabled = true;
2016
2017 return 0;
2018 }
2019
2020 #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
2021 #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
2022 #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
2023 #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
2024
2025 /*
2026 * Setup the IntCapXT registers with interrupt routing information
2027 * based on the PCI MSI capability block registers, accessed via
2028 * MMIO MSI address low/hi and MSI data registers.
2029 */
iommu_update_intcapxt(struct amd_iommu * iommu)2030 static void iommu_update_intcapxt(struct amd_iommu *iommu)
2031 {
2032 u64 val;
2033 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
2034 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
2035 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
2036 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
2037 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
2038
2039 if (x2apic_enabled())
2040 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
2041
2042 val = XT_INT_VEC(data & 0xFF) |
2043 XT_INT_DEST_MODE(dm) |
2044 XT_INT_DEST_LO(dest) |
2045 XT_INT_DEST_HI(dest);
2046
2047 /**
2048 * Current IOMMU implemtation uses the same IRQ for all
2049 * 3 IOMMU interrupts.
2050 */
2051 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2052 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2053 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2054 }
2055
_irq_notifier_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2056 static void _irq_notifier_notify(struct irq_affinity_notify *notify,
2057 const cpumask_t *mask)
2058 {
2059 struct amd_iommu *iommu;
2060
2061 for_each_iommu(iommu) {
2062 if (iommu->dev->irq == notify->irq) {
2063 iommu_update_intcapxt(iommu);
2064 break;
2065 }
2066 }
2067 }
2068
_irq_notifier_release(struct kref * ref)2069 static void _irq_notifier_release(struct kref *ref)
2070 {
2071 }
2072
iommu_init_intcapxt(struct amd_iommu * iommu)2073 static int iommu_init_intcapxt(struct amd_iommu *iommu)
2074 {
2075 int ret;
2076 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2077
2078 /**
2079 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
2080 * which can be inferred from amd_iommu_xt_mode.
2081 */
2082 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
2083 return 0;
2084
2085 /**
2086 * Also, we need to setup notifier to update the IntCapXT registers
2087 * whenever the irq affinity is changed from user-space.
2088 */
2089 notify->irq = iommu->dev->irq;
2090 notify->notify = _irq_notifier_notify,
2091 notify->release = _irq_notifier_release,
2092 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2093 if (ret) {
2094 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2095 iommu->devid, iommu->dev->irq);
2096 return ret;
2097 }
2098
2099 iommu_update_intcapxt(iommu);
2100 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2101 return ret;
2102 }
2103
iommu_init_msi(struct amd_iommu * iommu)2104 static int iommu_init_msi(struct amd_iommu *iommu)
2105 {
2106 int ret;
2107
2108 if (iommu->int_enabled)
2109 goto enable_faults;
2110
2111 if (iommu->dev->msi_cap)
2112 ret = iommu_setup_msi(iommu);
2113 else
2114 ret = -ENODEV;
2115
2116 if (ret)
2117 return ret;
2118
2119 enable_faults:
2120 ret = iommu_init_intcapxt(iommu);
2121 if (ret)
2122 return ret;
2123
2124 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2125
2126 if (iommu->ppr_log != NULL)
2127 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2128
2129 iommu_ga_log_enable(iommu);
2130
2131 return 0;
2132 }
2133
2134 /****************************************************************************
2135 *
2136 * The next functions belong to the third pass of parsing the ACPI
2137 * table. In this last pass the memory mapping requirements are
2138 * gathered (like exclusion and unity mapping ranges).
2139 *
2140 ****************************************************************************/
2141
free_unity_maps(void)2142 static void __init free_unity_maps(void)
2143 {
2144 struct unity_map_entry *entry, *next;
2145
2146 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2147 list_del(&entry->list);
2148 kfree(entry);
2149 }
2150 }
2151
2152 /* called for unity map ACPI definition */
init_unity_map_range(struct ivmd_header * m)2153 static int __init init_unity_map_range(struct ivmd_header *m)
2154 {
2155 struct unity_map_entry *e = NULL;
2156 char *s;
2157
2158 e = kzalloc(sizeof(*e), GFP_KERNEL);
2159 if (e == NULL)
2160 return -ENOMEM;
2161
2162 switch (m->type) {
2163 default:
2164 kfree(e);
2165 return 0;
2166 case ACPI_IVMD_TYPE:
2167 s = "IVMD_TYPEi\t\t\t";
2168 e->devid_start = e->devid_end = m->devid;
2169 break;
2170 case ACPI_IVMD_TYPE_ALL:
2171 s = "IVMD_TYPE_ALL\t\t";
2172 e->devid_start = 0;
2173 e->devid_end = amd_iommu_last_bdf;
2174 break;
2175 case ACPI_IVMD_TYPE_RANGE:
2176 s = "IVMD_TYPE_RANGE\t\t";
2177 e->devid_start = m->devid;
2178 e->devid_end = m->aux;
2179 break;
2180 }
2181 e->address_start = PAGE_ALIGN(m->range_start);
2182 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2183 e->prot = m->flags >> 1;
2184
2185 /*
2186 * Treat per-device exclusion ranges as r/w unity-mapped regions
2187 * since some buggy BIOSes might lead to the overwritten exclusion
2188 * range (exclusion_start and exclusion_length members). This
2189 * happens when there are multiple exclusion ranges (IVMD entries)
2190 * defined in ACPI table.
2191 */
2192 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2193 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2194
2195 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2196 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2197 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2198 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2199 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2200 e->address_start, e->address_end, m->flags);
2201
2202 list_add_tail(&e->list, &amd_iommu_unity_map);
2203
2204 return 0;
2205 }
2206
2207 /* iterates over all memory definitions we find in the ACPI table */
init_memory_definitions(struct acpi_table_header * table)2208 static int __init init_memory_definitions(struct acpi_table_header *table)
2209 {
2210 u8 *p = (u8 *)table, *end = (u8 *)table;
2211 struct ivmd_header *m;
2212
2213 end += table->length;
2214 p += IVRS_HEADER_LENGTH;
2215
2216 while (p < end) {
2217 m = (struct ivmd_header *)p;
2218 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2219 init_unity_map_range(m);
2220
2221 p += m->length;
2222 }
2223
2224 return 0;
2225 }
2226
2227 /*
2228 * Init the device table to not allow DMA access for devices
2229 */
init_device_table_dma(void)2230 static void init_device_table_dma(void)
2231 {
2232 u32 devid;
2233
2234 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2235 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2236 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2237 }
2238 }
2239
uninit_device_table_dma(void)2240 static void __init uninit_device_table_dma(void)
2241 {
2242 u32 devid;
2243
2244 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2245 amd_iommu_dev_table[devid].data[0] = 0ULL;
2246 amd_iommu_dev_table[devid].data[1] = 0ULL;
2247 }
2248 }
2249
init_device_table(void)2250 static void init_device_table(void)
2251 {
2252 u32 devid;
2253
2254 if (!amd_iommu_irq_remap)
2255 return;
2256
2257 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2258 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2259 }
2260
iommu_init_flags(struct amd_iommu * iommu)2261 static void iommu_init_flags(struct amd_iommu *iommu)
2262 {
2263 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2264 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2265 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2266
2267 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2268 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2269 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2270
2271 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2272 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2273 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2274
2275 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2276 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2277 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2278
2279 /*
2280 * make IOMMU memory accesses cache coherent
2281 */
2282 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2283
2284 /* Set IOTLB invalidation timeout to 1s */
2285 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2286 }
2287
iommu_apply_resume_quirks(struct amd_iommu * iommu)2288 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2289 {
2290 int i, j;
2291 u32 ioc_feature_control;
2292 struct pci_dev *pdev = iommu->root_pdev;
2293
2294 /* RD890 BIOSes may not have completely reconfigured the iommu */
2295 if (!is_rd890_iommu(iommu->dev) || !pdev)
2296 return;
2297
2298 /*
2299 * First, we need to ensure that the iommu is enabled. This is
2300 * controlled by a register in the northbridge
2301 */
2302
2303 /* Select Northbridge indirect register 0x75 and enable writing */
2304 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2305 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2306
2307 /* Enable the iommu */
2308 if (!(ioc_feature_control & 0x1))
2309 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2310
2311 /* Restore the iommu BAR */
2312 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2313 iommu->stored_addr_lo);
2314 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2315 iommu->stored_addr_hi);
2316
2317 /* Restore the l1 indirect regs for each of the 6 l1s */
2318 for (i = 0; i < 6; i++)
2319 for (j = 0; j < 0x12; j++)
2320 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2321
2322 /* Restore the l2 indirect regs */
2323 for (i = 0; i < 0x83; i++)
2324 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2325
2326 /* Lock PCI setup registers */
2327 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2328 iommu->stored_addr_lo | 1);
2329 }
2330
iommu_enable_ga(struct amd_iommu * iommu)2331 static void iommu_enable_ga(struct amd_iommu *iommu)
2332 {
2333 #ifdef CONFIG_IRQ_REMAP
2334 switch (amd_iommu_guest_ir) {
2335 case AMD_IOMMU_GUEST_IR_VAPIC:
2336 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2337 fallthrough;
2338 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2339 iommu_feature_enable(iommu, CONTROL_GA_EN);
2340 iommu->irte_ops = &irte_128_ops;
2341 break;
2342 default:
2343 iommu->irte_ops = &irte_32_ops;
2344 break;
2345 }
2346 #endif
2347 }
2348
early_enable_iommu(struct amd_iommu * iommu)2349 static void early_enable_iommu(struct amd_iommu *iommu)
2350 {
2351 iommu_disable(iommu);
2352 iommu_init_flags(iommu);
2353 iommu_set_device_table(iommu);
2354 iommu_enable_command_buffer(iommu);
2355 iommu_enable_event_buffer(iommu);
2356 iommu_set_exclusion_range(iommu);
2357 iommu_enable_ga(iommu);
2358 iommu_enable_xt(iommu);
2359 iommu_enable(iommu);
2360 iommu_flush_all_caches(iommu);
2361 }
2362
2363 /*
2364 * This function finally enables all IOMMUs found in the system after
2365 * they have been initialized.
2366 *
2367 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2368 * the old content of device table entries. Not this case or copy failed,
2369 * just continue as normal kernel does.
2370 */
early_enable_iommus(void)2371 static void early_enable_iommus(void)
2372 {
2373 struct amd_iommu *iommu;
2374
2375
2376 if (!copy_device_table()) {
2377 /*
2378 * If come here because of failure in copying device table from old
2379 * kernel with all IOMMUs enabled, print error message and try to
2380 * free allocated old_dev_tbl_cpy.
2381 */
2382 if (amd_iommu_pre_enabled)
2383 pr_err("Failed to copy DEV table from previous kernel.\n");
2384 if (old_dev_tbl_cpy != NULL)
2385 free_pages((unsigned long)old_dev_tbl_cpy,
2386 get_order(dev_table_size));
2387
2388 for_each_iommu(iommu) {
2389 clear_translation_pre_enabled(iommu);
2390 early_enable_iommu(iommu);
2391 }
2392 } else {
2393 pr_info("Copied DEV table from previous kernel.\n");
2394 free_pages((unsigned long)amd_iommu_dev_table,
2395 get_order(dev_table_size));
2396 amd_iommu_dev_table = old_dev_tbl_cpy;
2397 for_each_iommu(iommu) {
2398 iommu_disable_command_buffer(iommu);
2399 iommu_disable_event_buffer(iommu);
2400 iommu_enable_command_buffer(iommu);
2401 iommu_enable_event_buffer(iommu);
2402 iommu_enable_ga(iommu);
2403 iommu_enable_xt(iommu);
2404 iommu_set_device_table(iommu);
2405 iommu_flush_all_caches(iommu);
2406 }
2407 }
2408
2409 #ifdef CONFIG_IRQ_REMAP
2410 /*
2411 * Note: We have already checked GASup from IVRS table.
2412 * Now, we need to make sure that GAMSup is set.
2413 */
2414 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2415 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
2416 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2417
2418 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2419 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2420 #endif
2421 }
2422
enable_iommus_v2(void)2423 static void enable_iommus_v2(void)
2424 {
2425 struct amd_iommu *iommu;
2426
2427 for_each_iommu(iommu) {
2428 iommu_enable_ppr_log(iommu);
2429 iommu_enable_gt(iommu);
2430 }
2431 }
2432
enable_iommus(void)2433 static void enable_iommus(void)
2434 {
2435 early_enable_iommus();
2436
2437 enable_iommus_v2();
2438 }
2439
disable_iommus(void)2440 static void disable_iommus(void)
2441 {
2442 struct amd_iommu *iommu;
2443
2444 for_each_iommu(iommu)
2445 iommu_disable(iommu);
2446
2447 #ifdef CONFIG_IRQ_REMAP
2448 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2449 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2450 #endif
2451 }
2452
2453 /*
2454 * Suspend/Resume support
2455 * disable suspend until real resume implemented
2456 */
2457
amd_iommu_resume(void)2458 static void amd_iommu_resume(void)
2459 {
2460 struct amd_iommu *iommu;
2461
2462 for_each_iommu(iommu)
2463 iommu_apply_resume_quirks(iommu);
2464
2465 /* re-load the hardware */
2466 enable_iommus();
2467
2468 amd_iommu_enable_interrupts();
2469 }
2470
amd_iommu_suspend(void)2471 static int amd_iommu_suspend(void)
2472 {
2473 /* disable IOMMUs to go out of the way for BIOS */
2474 disable_iommus();
2475
2476 return 0;
2477 }
2478
2479 static struct syscore_ops amd_iommu_syscore_ops = {
2480 .suspend = amd_iommu_suspend,
2481 .resume = amd_iommu_resume,
2482 };
2483
free_iommu_resources(void)2484 static void __init free_iommu_resources(void)
2485 {
2486 kmemleak_free(irq_lookup_table);
2487 free_pages((unsigned long)irq_lookup_table,
2488 get_order(rlookup_table_size));
2489 irq_lookup_table = NULL;
2490
2491 kmem_cache_destroy(amd_iommu_irq_cache);
2492 amd_iommu_irq_cache = NULL;
2493
2494 free_pages((unsigned long)amd_iommu_rlookup_table,
2495 get_order(rlookup_table_size));
2496 amd_iommu_rlookup_table = NULL;
2497
2498 free_pages((unsigned long)amd_iommu_alias_table,
2499 get_order(alias_table_size));
2500 amd_iommu_alias_table = NULL;
2501
2502 free_pages((unsigned long)amd_iommu_dev_table,
2503 get_order(dev_table_size));
2504 amd_iommu_dev_table = NULL;
2505
2506 free_iommu_all();
2507 }
2508
2509 /* SB IOAPIC is always on this device in AMD systems */
2510 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2511
check_ioapic_information(void)2512 static bool __init check_ioapic_information(void)
2513 {
2514 const char *fw_bug = FW_BUG;
2515 bool ret, has_sb_ioapic;
2516 int idx;
2517
2518 has_sb_ioapic = false;
2519 ret = false;
2520
2521 /*
2522 * If we have map overrides on the kernel command line the
2523 * messages in this function might not describe firmware bugs
2524 * anymore - so be careful
2525 */
2526 if (cmdline_maps)
2527 fw_bug = "";
2528
2529 for (idx = 0; idx < nr_ioapics; idx++) {
2530 int devid, id = mpc_ioapic_id(idx);
2531
2532 devid = get_ioapic_devid(id);
2533 if (devid < 0) {
2534 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2535 fw_bug, id);
2536 ret = false;
2537 } else if (devid == IOAPIC_SB_DEVID) {
2538 has_sb_ioapic = true;
2539 ret = true;
2540 }
2541 }
2542
2543 if (!has_sb_ioapic) {
2544 /*
2545 * We expect the SB IOAPIC to be listed in the IVRS
2546 * table. The system timer is connected to the SB IOAPIC
2547 * and if we don't have it in the list the system will
2548 * panic at boot time. This situation usually happens
2549 * when the BIOS is buggy and provides us the wrong
2550 * device id for the IOAPIC in the system.
2551 */
2552 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2553 }
2554
2555 if (!ret)
2556 pr_err("Disabling interrupt remapping\n");
2557
2558 return ret;
2559 }
2560
free_dma_resources(void)2561 static void __init free_dma_resources(void)
2562 {
2563 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2564 get_order(MAX_DOMAIN_ID/8));
2565 amd_iommu_pd_alloc_bitmap = NULL;
2566
2567 free_unity_maps();
2568 }
2569
ivinfo_init(void * ivrs)2570 static void __init ivinfo_init(void *ivrs)
2571 {
2572 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2573 }
2574
2575 /*
2576 * This is the hardware init function for AMD IOMMU in the system.
2577 * This function is called either from amd_iommu_init or from the interrupt
2578 * remapping setup code.
2579 *
2580 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2581 * four times:
2582 *
2583 * 1 pass) Discover the most comprehensive IVHD type to use.
2584 *
2585 * 2 pass) Find the highest PCI device id the driver has to handle.
2586 * Upon this information the size of the data structures is
2587 * determined that needs to be allocated.
2588 *
2589 * 3 pass) Initialize the data structures just allocated with the
2590 * information in the ACPI table about available AMD IOMMUs
2591 * in the system. It also maps the PCI devices in the
2592 * system to specific IOMMUs
2593 *
2594 * 4 pass) After the basic data structures are allocated and
2595 * initialized we update them with information about memory
2596 * remapping requirements parsed out of the ACPI table in
2597 * this last pass.
2598 *
2599 * After everything is set up the IOMMUs are enabled and the necessary
2600 * hotplug and suspend notifiers are registered.
2601 */
early_amd_iommu_init(void)2602 static int __init early_amd_iommu_init(void)
2603 {
2604 struct acpi_table_header *ivrs_base;
2605 acpi_status status;
2606 int i, remap_cache_sz, ret = 0;
2607 u32 pci_id;
2608
2609 if (!amd_iommu_detected)
2610 return -ENODEV;
2611
2612 status = acpi_get_table("IVRS", 0, &ivrs_base);
2613 if (status == AE_NOT_FOUND)
2614 return -ENODEV;
2615 else if (ACPI_FAILURE(status)) {
2616 const char *err = acpi_format_exception(status);
2617 pr_err("IVRS table error: %s\n", err);
2618 return -EINVAL;
2619 }
2620
2621 /*
2622 * Validate checksum here so we don't need to do it when
2623 * we actually parse the table
2624 */
2625 ret = check_ivrs_checksum(ivrs_base);
2626 if (ret)
2627 goto out;
2628
2629 ivinfo_init(ivrs_base);
2630
2631 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2632 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2633
2634 /*
2635 * First parse ACPI tables to find the largest Bus/Dev/Func
2636 * we need to handle. Upon this information the shared data
2637 * structures for the IOMMUs in the system will be allocated
2638 */
2639 ret = find_last_devid_acpi(ivrs_base);
2640 if (ret)
2641 goto out;
2642
2643 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2644 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2645 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2646
2647 /* Device table - directly used by all IOMMUs */
2648 ret = -ENOMEM;
2649 amd_iommu_dev_table = (void *)__get_free_pages(
2650 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2651 get_order(dev_table_size));
2652 if (amd_iommu_dev_table == NULL)
2653 goto out;
2654
2655 /*
2656 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2657 * IOMMU see for that device
2658 */
2659 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2660 get_order(alias_table_size));
2661 if (amd_iommu_alias_table == NULL)
2662 goto out;
2663
2664 /* IOMMU rlookup table - find the IOMMU for a specific device */
2665 amd_iommu_rlookup_table = (void *)__get_free_pages(
2666 GFP_KERNEL | __GFP_ZERO,
2667 get_order(rlookup_table_size));
2668 if (amd_iommu_rlookup_table == NULL)
2669 goto out;
2670
2671 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2672 GFP_KERNEL | __GFP_ZERO,
2673 get_order(MAX_DOMAIN_ID/8));
2674 if (amd_iommu_pd_alloc_bitmap == NULL)
2675 goto out;
2676
2677 /*
2678 * let all alias entries point to itself
2679 */
2680 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2681 amd_iommu_alias_table[i] = i;
2682
2683 /*
2684 * never allocate domain 0 because its used as the non-allocated and
2685 * error value placeholder
2686 */
2687 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2688
2689 /*
2690 * now the data structures are allocated and basically initialized
2691 * start the real acpi table scan
2692 */
2693 ret = init_iommu_all(ivrs_base);
2694 if (ret)
2695 goto out;
2696
2697 /* Disable IOMMU if there's Stoney Ridge graphics */
2698 for (i = 0; i < 32; i++) {
2699 pci_id = read_pci_config(0, i, 0, 0);
2700 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2701 pr_info("Disable IOMMU on Stoney Ridge\n");
2702 amd_iommu_disabled = true;
2703 break;
2704 }
2705 }
2706
2707 /* Disable any previously enabled IOMMUs */
2708 if (!is_kdump_kernel() || amd_iommu_disabled)
2709 disable_iommus();
2710
2711 if (amd_iommu_irq_remap)
2712 amd_iommu_irq_remap = check_ioapic_information();
2713
2714 if (amd_iommu_irq_remap) {
2715 /*
2716 * Interrupt remapping enabled, create kmem_cache for the
2717 * remapping tables.
2718 */
2719 ret = -ENOMEM;
2720 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2721 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2722 else
2723 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2724 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2725 remap_cache_sz,
2726 IRQ_TABLE_ALIGNMENT,
2727 0, NULL);
2728 if (!amd_iommu_irq_cache)
2729 goto out;
2730
2731 irq_lookup_table = (void *)__get_free_pages(
2732 GFP_KERNEL | __GFP_ZERO,
2733 get_order(rlookup_table_size));
2734 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2735 1, GFP_KERNEL);
2736 if (!irq_lookup_table)
2737 goto out;
2738 }
2739
2740 ret = init_memory_definitions(ivrs_base);
2741 if (ret)
2742 goto out;
2743
2744 /* init the device table */
2745 init_device_table();
2746
2747 out:
2748 /* Don't leak any ACPI memory */
2749 acpi_put_table(ivrs_base);
2750 ivrs_base = NULL;
2751
2752 return ret;
2753 }
2754
amd_iommu_enable_interrupts(void)2755 static int amd_iommu_enable_interrupts(void)
2756 {
2757 struct amd_iommu *iommu;
2758 int ret = 0;
2759
2760 for_each_iommu(iommu) {
2761 ret = iommu_init_msi(iommu);
2762 if (ret)
2763 goto out;
2764 }
2765
2766 out:
2767 return ret;
2768 }
2769
detect_ivrs(void)2770 static bool detect_ivrs(void)
2771 {
2772 struct acpi_table_header *ivrs_base;
2773 acpi_status status;
2774
2775 status = acpi_get_table("IVRS", 0, &ivrs_base);
2776 if (status == AE_NOT_FOUND)
2777 return false;
2778 else if (ACPI_FAILURE(status)) {
2779 const char *err = acpi_format_exception(status);
2780 pr_err("IVRS table error: %s\n", err);
2781 return false;
2782 }
2783
2784 acpi_put_table(ivrs_base);
2785
2786 /* Make sure ACS will be enabled during PCI probe */
2787 pci_request_acs();
2788
2789 return true;
2790 }
2791
2792 /****************************************************************************
2793 *
2794 * AMD IOMMU Initialization State Machine
2795 *
2796 ****************************************************************************/
2797
state_next(void)2798 static int __init state_next(void)
2799 {
2800 int ret = 0;
2801
2802 switch (init_state) {
2803 case IOMMU_START_STATE:
2804 if (!detect_ivrs()) {
2805 init_state = IOMMU_NOT_FOUND;
2806 ret = -ENODEV;
2807 } else {
2808 init_state = IOMMU_IVRS_DETECTED;
2809 }
2810 break;
2811 case IOMMU_IVRS_DETECTED:
2812 ret = early_amd_iommu_init();
2813 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2814 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2815 pr_info("AMD IOMMU disabled\n");
2816 init_state = IOMMU_CMDLINE_DISABLED;
2817 ret = -EINVAL;
2818 }
2819 break;
2820 case IOMMU_ACPI_FINISHED:
2821 early_enable_iommus();
2822 x86_platform.iommu_shutdown = disable_iommus;
2823 init_state = IOMMU_ENABLED;
2824 break;
2825 case IOMMU_ENABLED:
2826 register_syscore_ops(&amd_iommu_syscore_ops);
2827 ret = amd_iommu_init_pci();
2828 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2829 enable_iommus_v2();
2830 break;
2831 case IOMMU_PCI_INIT:
2832 ret = amd_iommu_enable_interrupts();
2833 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2834 break;
2835 case IOMMU_INTERRUPTS_EN:
2836 ret = amd_iommu_init_dma_ops();
2837 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2838 break;
2839 case IOMMU_DMA_OPS:
2840 init_state = IOMMU_INITIALIZED;
2841 break;
2842 case IOMMU_INITIALIZED:
2843 /* Nothing to do */
2844 break;
2845 case IOMMU_NOT_FOUND:
2846 case IOMMU_INIT_ERROR:
2847 case IOMMU_CMDLINE_DISABLED:
2848 /* Error states => do nothing */
2849 ret = -EINVAL;
2850 break;
2851 default:
2852 /* Unknown state */
2853 BUG();
2854 }
2855
2856 if (ret) {
2857 free_dma_resources();
2858 if (!irq_remapping_enabled) {
2859 disable_iommus();
2860 free_iommu_resources();
2861 } else {
2862 struct amd_iommu *iommu;
2863
2864 uninit_device_table_dma();
2865 for_each_iommu(iommu)
2866 iommu_flush_all_caches(iommu);
2867 }
2868 }
2869 return ret;
2870 }
2871
iommu_go_to_state(enum iommu_init_state state)2872 static int __init iommu_go_to_state(enum iommu_init_state state)
2873 {
2874 int ret = -EINVAL;
2875
2876 while (init_state != state) {
2877 if (init_state == IOMMU_NOT_FOUND ||
2878 init_state == IOMMU_INIT_ERROR ||
2879 init_state == IOMMU_CMDLINE_DISABLED)
2880 break;
2881 ret = state_next();
2882 }
2883
2884 return ret;
2885 }
2886
2887 #ifdef CONFIG_IRQ_REMAP
amd_iommu_prepare(void)2888 int __init amd_iommu_prepare(void)
2889 {
2890 int ret;
2891
2892 amd_iommu_irq_remap = true;
2893
2894 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2895 if (ret)
2896 return ret;
2897 return amd_iommu_irq_remap ? 0 : -ENODEV;
2898 }
2899
amd_iommu_enable(void)2900 int __init amd_iommu_enable(void)
2901 {
2902 int ret;
2903
2904 ret = iommu_go_to_state(IOMMU_ENABLED);
2905 if (ret)
2906 return ret;
2907
2908 irq_remapping_enabled = 1;
2909 return amd_iommu_xt_mode;
2910 }
2911
amd_iommu_disable(void)2912 void amd_iommu_disable(void)
2913 {
2914 amd_iommu_suspend();
2915 }
2916
amd_iommu_reenable(int mode)2917 int amd_iommu_reenable(int mode)
2918 {
2919 amd_iommu_resume();
2920
2921 return 0;
2922 }
2923
amd_iommu_enable_faulting(void)2924 int __init amd_iommu_enable_faulting(void)
2925 {
2926 /* We enable MSI later when PCI is initialized */
2927 return 0;
2928 }
2929 #endif
2930
2931 /*
2932 * This is the core init function for AMD IOMMU hardware in the system.
2933 * This function is called from the generic x86 DMA layer initialization
2934 * code.
2935 */
amd_iommu_init(void)2936 static int __init amd_iommu_init(void)
2937 {
2938 struct amd_iommu *iommu;
2939 int ret;
2940
2941 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2942 #ifdef CONFIG_GART_IOMMU
2943 if (ret && list_empty(&amd_iommu_list)) {
2944 /*
2945 * We failed to initialize the AMD IOMMU - try fallback
2946 * to GART if possible.
2947 */
2948 gart_iommu_init();
2949 }
2950 #endif
2951
2952 for_each_iommu(iommu)
2953 amd_iommu_debugfs_setup(iommu);
2954
2955 return ret;
2956 }
2957
amd_iommu_sme_check(void)2958 static bool amd_iommu_sme_check(void)
2959 {
2960 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2961 return true;
2962
2963 /* For Fam17h, a specific level of support is required */
2964 if (boot_cpu_data.microcode >= 0x08001205)
2965 return true;
2966
2967 if ((boot_cpu_data.microcode >= 0x08001126) &&
2968 (boot_cpu_data.microcode <= 0x080011ff))
2969 return true;
2970
2971 pr_notice("IOMMU not currently supported when SME is active\n");
2972
2973 return false;
2974 }
2975
2976 /****************************************************************************
2977 *
2978 * Early detect code. This code runs at IOMMU detection time in the DMA
2979 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2980 * IOMMUs
2981 *
2982 ****************************************************************************/
amd_iommu_detect(void)2983 int __init amd_iommu_detect(void)
2984 {
2985 int ret;
2986
2987 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2988 return -ENODEV;
2989
2990 if (!amd_iommu_sme_check())
2991 return -ENODEV;
2992
2993 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2994 if (ret)
2995 return ret;
2996
2997 amd_iommu_detected = true;
2998 iommu_detected = 1;
2999 x86_init.iommu.iommu_init = amd_iommu_init;
3000
3001 return 1;
3002 }
3003
3004 /****************************************************************************
3005 *
3006 * Parsing functions for the AMD IOMMU specific kernel command line
3007 * options.
3008 *
3009 ****************************************************************************/
3010
parse_amd_iommu_dump(char * str)3011 static int __init parse_amd_iommu_dump(char *str)
3012 {
3013 amd_iommu_dump = true;
3014
3015 return 1;
3016 }
3017
parse_amd_iommu_intr(char * str)3018 static int __init parse_amd_iommu_intr(char *str)
3019 {
3020 for (; *str; ++str) {
3021 if (strncmp(str, "legacy", 6) == 0) {
3022 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3023 break;
3024 }
3025 if (strncmp(str, "vapic", 5) == 0) {
3026 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3027 break;
3028 }
3029 }
3030 return 1;
3031 }
3032
parse_amd_iommu_options(char * str)3033 static int __init parse_amd_iommu_options(char *str)
3034 {
3035 for (; *str; ++str) {
3036 if (strncmp(str, "fullflush", 9) == 0)
3037 amd_iommu_unmap_flush = true;
3038 if (strncmp(str, "off", 3) == 0)
3039 amd_iommu_disabled = true;
3040 if (strncmp(str, "force_isolation", 15) == 0)
3041 amd_iommu_force_isolation = true;
3042 }
3043
3044 return 1;
3045 }
3046
parse_ivrs_ioapic(char * str)3047 static int __init parse_ivrs_ioapic(char *str)
3048 {
3049 unsigned int bus, dev, fn;
3050 int ret, id, i;
3051 u16 devid;
3052
3053 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3054
3055 if (ret != 4) {
3056 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3057 return 1;
3058 }
3059
3060 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3061 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3062 str);
3063 return 1;
3064 }
3065
3066 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3067
3068 cmdline_maps = true;
3069 i = early_ioapic_map_size++;
3070 early_ioapic_map[i].id = id;
3071 early_ioapic_map[i].devid = devid;
3072 early_ioapic_map[i].cmd_line = true;
3073
3074 return 1;
3075 }
3076
parse_ivrs_hpet(char * str)3077 static int __init parse_ivrs_hpet(char *str)
3078 {
3079 unsigned int bus, dev, fn;
3080 int ret, id, i;
3081 u16 devid;
3082
3083 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3084
3085 if (ret != 4) {
3086 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3087 return 1;
3088 }
3089
3090 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3091 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3092 str);
3093 return 1;
3094 }
3095
3096 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3097
3098 cmdline_maps = true;
3099 i = early_hpet_map_size++;
3100 early_hpet_map[i].id = id;
3101 early_hpet_map[i].devid = devid;
3102 early_hpet_map[i].cmd_line = true;
3103
3104 return 1;
3105 }
3106
parse_ivrs_acpihid(char * str)3107 static int __init parse_ivrs_acpihid(char *str)
3108 {
3109 u32 bus, dev, fn;
3110 char *hid, *uid, *p;
3111 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3112 int ret, i;
3113
3114 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3115 if (ret != 4) {
3116 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3117 return 1;
3118 }
3119
3120 p = acpiid;
3121 hid = strsep(&p, ":");
3122 uid = p;
3123
3124 if (!hid || !(*hid) || !uid) {
3125 pr_err("Invalid command line: hid or uid\n");
3126 return 1;
3127 }
3128
3129 i = early_acpihid_map_size++;
3130 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3131 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3132 early_acpihid_map[i].devid =
3133 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3134 early_acpihid_map[i].cmd_line = true;
3135
3136 return 1;
3137 }
3138
3139 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3140 __setup("amd_iommu=", parse_amd_iommu_options);
3141 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3142 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3143 __setup("ivrs_hpet", parse_ivrs_hpet);
3144 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3145
3146 IOMMU_INIT_FINISH(amd_iommu_detect,
3147 gart_iommu_hole_init,
3148 NULL,
3149 NULL);
3150
amd_iommu_v2_supported(void)3151 bool amd_iommu_v2_supported(void)
3152 {
3153 return amd_iommu_v2_present;
3154 }
3155 EXPORT_SYMBOL(amd_iommu_v2_supported);
3156
get_amd_iommu(unsigned int idx)3157 struct amd_iommu *get_amd_iommu(unsigned int idx)
3158 {
3159 unsigned int i = 0;
3160 struct amd_iommu *iommu;
3161
3162 for_each_iommu(iommu)
3163 if (i++ == idx)
3164 return iommu;
3165 return NULL;
3166 }
3167 EXPORT_SYMBOL(get_amd_iommu);
3168
3169 /****************************************************************************
3170 *
3171 * IOMMU EFR Performance Counter support functionality. This code allows
3172 * access to the IOMMU PC functionality.
3173 *
3174 ****************************************************************************/
3175
amd_iommu_pc_get_max_banks(unsigned int idx)3176 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3177 {
3178 struct amd_iommu *iommu = get_amd_iommu(idx);
3179
3180 if (iommu)
3181 return iommu->max_banks;
3182
3183 return 0;
3184 }
3185 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3186
amd_iommu_pc_supported(void)3187 bool amd_iommu_pc_supported(void)
3188 {
3189 return amd_iommu_pc_present;
3190 }
3191 EXPORT_SYMBOL(amd_iommu_pc_supported);
3192
amd_iommu_pc_get_max_counters(unsigned int idx)3193 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3194 {
3195 struct amd_iommu *iommu = get_amd_iommu(idx);
3196
3197 if (iommu)
3198 return iommu->max_counters;
3199
3200 return 0;
3201 }
3202 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3203
iommu_pc_get_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value,bool is_write)3204 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3205 u8 fxn, u64 *value, bool is_write)
3206 {
3207 u32 offset;
3208 u32 max_offset_lim;
3209
3210 /* Make sure the IOMMU PC resource is available */
3211 if (!amd_iommu_pc_present)
3212 return -ENODEV;
3213
3214 /* Check for valid iommu and pc register indexing */
3215 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3216 return -ENODEV;
3217
3218 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3219
3220 /* Limit the offset to the hw defined mmio region aperture */
3221 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3222 (iommu->max_counters << 8) | 0x28);
3223 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3224 (offset > max_offset_lim))
3225 return -EINVAL;
3226
3227 if (is_write) {
3228 u64 val = *value & GENMASK_ULL(47, 0);
3229
3230 writel((u32)val, iommu->mmio_base + offset);
3231 writel((val >> 32), iommu->mmio_base + offset + 4);
3232 } else {
3233 *value = readl(iommu->mmio_base + offset + 4);
3234 *value <<= 32;
3235 *value |= readl(iommu->mmio_base + offset);
3236 *value &= GENMASK_ULL(47, 0);
3237 }
3238
3239 return 0;
3240 }
3241
amd_iommu_pc_get_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3242 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3243 {
3244 if (!iommu)
3245 return -EINVAL;
3246
3247 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3248 }
3249 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3250
amd_iommu_pc_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3251 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3252 {
3253 if (!iommu)
3254 return -EINVAL;
3255
3256 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3257 }
3258 EXPORT_SYMBOL(amd_iommu_pc_set_reg);
3259