1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4*4882a593Smuzhiyun * Rewrite, cleanup:
5*4882a593Smuzhiyun * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _ASM_IOMMU_H
9*4882a593Smuzhiyun #define _ASM_IOMMU_H
10*4882a593Smuzhiyun #ifdef __KERNEL__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/device.h>
15*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun #include <asm/machdep.h>
18*4882a593Smuzhiyun #include <asm/types.h>
19*4882a593Smuzhiyun #include <asm/pci-bridge.h>
20*4882a593Smuzhiyun #include <asm/asm-const.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define IOMMU_PAGE_SHIFT_4K 12
23*4882a593Smuzhiyun #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
24*4882a593Smuzhiyun #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
25*4882a593Smuzhiyun #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
28*4882a593Smuzhiyun #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
29*4882a593Smuzhiyun #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Boot time flags */
32*4882a593Smuzhiyun extern int iommu_is_off;
33*4882a593Smuzhiyun extern int iommu_force_on;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct iommu_table_ops {
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * When called with direction==DMA_NONE, it is equal to clear().
38*4882a593Smuzhiyun * uaddr is a linear map address.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun int (*set)(struct iommu_table *tbl,
41*4882a593Smuzhiyun long index, long npages,
42*4882a593Smuzhiyun unsigned long uaddr,
43*4882a593Smuzhiyun enum dma_data_direction direction,
44*4882a593Smuzhiyun unsigned long attrs);
45*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Exchanges existing TCE with new TCE plus direction bits;
48*4882a593Smuzhiyun * returns old TCE and DMA direction mask.
49*4882a593Smuzhiyun * @tce is a physical address.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun int (*xchg_no_kill)(struct iommu_table *tbl,
52*4882a593Smuzhiyun long index,
53*4882a593Smuzhiyun unsigned long *hpa,
54*4882a593Smuzhiyun enum dma_data_direction *direction,
55*4882a593Smuzhiyun bool realmode);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun void (*tce_kill)(struct iommu_table *tbl,
58*4882a593Smuzhiyun unsigned long index,
59*4882a593Smuzhiyun unsigned long pages,
60*4882a593Smuzhiyun bool realmode);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun void (*clear)(struct iommu_table *tbl,
65*4882a593Smuzhiyun long index, long npages);
66*4882a593Smuzhiyun /* get() returns a physical address */
67*4882a593Smuzhiyun unsigned long (*get)(struct iommu_table *tbl, long index);
68*4882a593Smuzhiyun void (*flush)(struct iommu_table *tbl);
69*4882a593Smuzhiyun void (*free)(struct iommu_table *tbl);
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* These are used by VIO */
73*4882a593Smuzhiyun extern struct iommu_table_ops iommu_table_lpar_multi_ops;
74*4882a593Smuzhiyun extern struct iommu_table_ops iommu_table_pseries_ops;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * IOMAP_MAX_ORDER defines the largest contiguous block
78*4882a593Smuzhiyun * of dma space we can get. IOMAP_MAX_ORDER = 13
79*4882a593Smuzhiyun * allows up to 2**12 pages (4096 * 4096) = 16 MB
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun #define IOMAP_MAX_ORDER 13
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define IOMMU_POOL_HASHBITS 2
84*4882a593Smuzhiyun #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct iommu_pool {
87*4882a593Smuzhiyun unsigned long start;
88*4882a593Smuzhiyun unsigned long end;
89*4882a593Smuzhiyun unsigned long hint;
90*4882a593Smuzhiyun spinlock_t lock;
91*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun struct iommu_table {
94*4882a593Smuzhiyun unsigned long it_busno; /* Bus number this table belongs to */
95*4882a593Smuzhiyun unsigned long it_size; /* Size of iommu table in entries */
96*4882a593Smuzhiyun unsigned long it_indirect_levels;
97*4882a593Smuzhiyun unsigned long it_level_size;
98*4882a593Smuzhiyun unsigned long it_allocated_size;
99*4882a593Smuzhiyun unsigned long it_offset; /* Offset into global table */
100*4882a593Smuzhiyun unsigned long it_base; /* mapped address of tce table */
101*4882a593Smuzhiyun unsigned long it_index; /* which iommu table this is */
102*4882a593Smuzhiyun unsigned long it_type; /* type: PCI or Virtual Bus */
103*4882a593Smuzhiyun unsigned long it_blocksize; /* Entries in each block (cacheline) */
104*4882a593Smuzhiyun unsigned long poolsize;
105*4882a593Smuzhiyun unsigned long nr_pools;
106*4882a593Smuzhiyun struct iommu_pool large_pool;
107*4882a593Smuzhiyun struct iommu_pool pools[IOMMU_NR_POOLS];
108*4882a593Smuzhiyun unsigned long *it_map; /* A simple allocation bitmap for now */
109*4882a593Smuzhiyun unsigned long it_page_shift;/* table iommu page size */
110*4882a593Smuzhiyun struct list_head it_group_list;/* List of iommu_table_group_link */
111*4882a593Smuzhiyun __be64 *it_userspace; /* userspace view of the table */
112*4882a593Smuzhiyun struct iommu_table_ops *it_ops;
113*4882a593Smuzhiyun struct kref it_kref;
114*4882a593Smuzhiyun int it_nid;
115*4882a593Smuzhiyun unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
116*4882a593Smuzhiyun unsigned long it_reserved_end;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
120*4882a593Smuzhiyun ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
121*4882a593Smuzhiyun #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
122*4882a593Smuzhiyun ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Pure 2^n version of get_order */
125*4882a593Smuzhiyun static inline __attribute_const__
get_iommu_order(unsigned long size,struct iommu_table * tbl)126*4882a593Smuzhiyun int get_iommu_order(unsigned long size, struct iommu_table *tbl)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct scatterlist;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #ifdef CONFIG_PPC64
135*4882a593Smuzhiyun
set_iommu_table_base(struct device * dev,struct iommu_table * base)136*4882a593Smuzhiyun static inline void set_iommu_table_base(struct device *dev,
137*4882a593Smuzhiyun struct iommu_table *base)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun dev->archdata.iommu_table_base = base;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
get_iommu_table_base(struct device * dev)142*4882a593Smuzhiyun static inline void *get_iommu_table_base(struct device *dev)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return dev->archdata.iommu_table_base;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
150*4882a593Smuzhiyun extern int iommu_tce_table_put(struct iommu_table *tbl);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Initializes an iommu_table based in values set in the passed-in
153*4882a593Smuzhiyun * structure
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
156*4882a593Smuzhiyun int nid, unsigned long res_start, unsigned long res_end);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define IOMMU_TABLE_GROUP_MAX_TABLES 2
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun struct iommu_table_group;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun struct iommu_table_group_ops {
163*4882a593Smuzhiyun unsigned long (*get_table_size)(
164*4882a593Smuzhiyun __u32 page_shift,
165*4882a593Smuzhiyun __u64 window_size,
166*4882a593Smuzhiyun __u32 levels);
167*4882a593Smuzhiyun long (*create_table)(struct iommu_table_group *table_group,
168*4882a593Smuzhiyun int num,
169*4882a593Smuzhiyun __u32 page_shift,
170*4882a593Smuzhiyun __u64 window_size,
171*4882a593Smuzhiyun __u32 levels,
172*4882a593Smuzhiyun struct iommu_table **ptbl);
173*4882a593Smuzhiyun long (*set_window)(struct iommu_table_group *table_group,
174*4882a593Smuzhiyun int num,
175*4882a593Smuzhiyun struct iommu_table *tblnew);
176*4882a593Smuzhiyun long (*unset_window)(struct iommu_table_group *table_group,
177*4882a593Smuzhiyun int num);
178*4882a593Smuzhiyun /* Switch ownership from platform code to external user (e.g. VFIO) */
179*4882a593Smuzhiyun void (*take_ownership)(struct iommu_table_group *table_group);
180*4882a593Smuzhiyun /* Switch ownership from external user (e.g. VFIO) back to core */
181*4882a593Smuzhiyun void (*release_ownership)(struct iommu_table_group *table_group);
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct iommu_table_group_link {
185*4882a593Smuzhiyun struct list_head next;
186*4882a593Smuzhiyun struct rcu_head rcu;
187*4882a593Smuzhiyun struct iommu_table_group *table_group;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun struct iommu_table_group {
191*4882a593Smuzhiyun /* IOMMU properties */
192*4882a593Smuzhiyun __u32 tce32_start;
193*4882a593Smuzhiyun __u32 tce32_size;
194*4882a593Smuzhiyun __u64 pgsizes; /* Bitmap of supported page sizes */
195*4882a593Smuzhiyun __u32 max_dynamic_windows_supported;
196*4882a593Smuzhiyun __u32 max_levels;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct iommu_group *group;
199*4882a593Smuzhiyun struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
200*4882a593Smuzhiyun struct iommu_table_group_ops *ops;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun extern void iommu_register_group(struct iommu_table_group *table_group,
206*4882a593Smuzhiyun int pci_domain_number, unsigned long pe_num);
207*4882a593Smuzhiyun extern int iommu_add_device(struct iommu_table_group *table_group,
208*4882a593Smuzhiyun struct device *dev);
209*4882a593Smuzhiyun extern void iommu_del_device(struct device *dev);
210*4882a593Smuzhiyun extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
211*4882a593Smuzhiyun unsigned long entry, unsigned long *hpa,
212*4882a593Smuzhiyun enum dma_data_direction *direction);
213*4882a593Smuzhiyun extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
214*4882a593Smuzhiyun struct iommu_table *tbl,
215*4882a593Smuzhiyun unsigned long entry, unsigned long *hpa,
216*4882a593Smuzhiyun enum dma_data_direction *direction);
217*4882a593Smuzhiyun extern void iommu_tce_kill(struct iommu_table *tbl,
218*4882a593Smuzhiyun unsigned long entry, unsigned long pages);
219*4882a593Smuzhiyun #else
iommu_register_group(struct iommu_table_group * table_group,int pci_domain_number,unsigned long pe_num)220*4882a593Smuzhiyun static inline void iommu_register_group(struct iommu_table_group *table_group,
221*4882a593Smuzhiyun int pci_domain_number,
222*4882a593Smuzhiyun unsigned long pe_num)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
iommu_add_device(struct iommu_table_group * table_group,struct device * dev)226*4882a593Smuzhiyun static inline int iommu_add_device(struct iommu_table_group *table_group,
227*4882a593Smuzhiyun struct device *dev)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
iommu_del_device(struct device * dev)232*4882a593Smuzhiyun static inline void iommu_del_device(struct device *dev)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun #endif /* !CONFIG_IOMMU_API */
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun u64 dma_iommu_get_required_mask(struct device *dev);
238*4882a593Smuzhiyun #else
239*4882a593Smuzhiyun
get_iommu_table_base(struct device * dev)240*4882a593Smuzhiyun static inline void *get_iommu_table_base(struct device *dev)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun return NULL;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
dma_iommu_dma_supported(struct device * dev,u64 mask)245*4882a593Smuzhiyun static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
253*4882a593Smuzhiyun struct scatterlist *sglist, int nelems,
254*4882a593Smuzhiyun unsigned long mask,
255*4882a593Smuzhiyun enum dma_data_direction direction,
256*4882a593Smuzhiyun unsigned long attrs);
257*4882a593Smuzhiyun extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
258*4882a593Smuzhiyun struct scatterlist *sglist,
259*4882a593Smuzhiyun int nelems,
260*4882a593Smuzhiyun enum dma_data_direction direction,
261*4882a593Smuzhiyun unsigned long attrs);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
264*4882a593Smuzhiyun size_t size, dma_addr_t *dma_handle,
265*4882a593Smuzhiyun unsigned long mask, gfp_t flag, int node);
266*4882a593Smuzhiyun extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
267*4882a593Smuzhiyun void *vaddr, dma_addr_t dma_handle);
268*4882a593Smuzhiyun extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
269*4882a593Smuzhiyun struct page *page, unsigned long offset,
270*4882a593Smuzhiyun size_t size, unsigned long mask,
271*4882a593Smuzhiyun enum dma_data_direction direction,
272*4882a593Smuzhiyun unsigned long attrs);
273*4882a593Smuzhiyun extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
274*4882a593Smuzhiyun size_t size, enum dma_data_direction direction,
275*4882a593Smuzhiyun unsigned long attrs);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun extern void iommu_init_early_pSeries(void);
278*4882a593Smuzhiyun extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
279*4882a593Smuzhiyun extern void iommu_init_early_pasemi(void);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
iommu_save(void)282*4882a593Smuzhiyun static inline void iommu_save(void)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun if (ppc_md.iommu_save)
285*4882a593Smuzhiyun ppc_md.iommu_save();
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
iommu_restore(void)288*4882a593Smuzhiyun static inline void iommu_restore(void)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun if (ppc_md.iommu_restore)
291*4882a593Smuzhiyun ppc_md.iommu_restore();
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* The API to support IOMMU operations for VFIO */
296*4882a593Smuzhiyun extern int iommu_tce_check_ioba(unsigned long page_shift,
297*4882a593Smuzhiyun unsigned long offset, unsigned long size,
298*4882a593Smuzhiyun unsigned long ioba, unsigned long npages);
299*4882a593Smuzhiyun extern int iommu_tce_check_gpa(unsigned long page_shift,
300*4882a593Smuzhiyun unsigned long gpa);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
303*4882a593Smuzhiyun (iommu_tce_check_ioba((tbl)->it_page_shift, \
304*4882a593Smuzhiyun (tbl)->it_offset, (tbl)->it_size, \
305*4882a593Smuzhiyun (ioba), (npages)) || (tce_value))
306*4882a593Smuzhiyun #define iommu_tce_put_param_check(tbl, ioba, gpa) \
307*4882a593Smuzhiyun (iommu_tce_check_ioba((tbl)->it_page_shift, \
308*4882a593Smuzhiyun (tbl)->it_offset, (tbl)->it_size, \
309*4882a593Smuzhiyun (ioba), 1) || \
310*4882a593Smuzhiyun iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun extern void iommu_flush_tce(struct iommu_table *tbl);
313*4882a593Smuzhiyun extern int iommu_take_ownership(struct iommu_table *tbl);
314*4882a593Smuzhiyun extern void iommu_release_ownership(struct iommu_table *tbl);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
317*4882a593Smuzhiyun extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun #ifdef CONFIG_PPC_CELL_NATIVE
320*4882a593Smuzhiyun extern bool iommu_fixed_is_weak;
321*4882a593Smuzhiyun #else
322*4882a593Smuzhiyun #define iommu_fixed_is_weak false
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun extern const struct dma_map_ops dma_iommu_ops;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #endif /* __KERNEL__ */
328*4882a593Smuzhiyun #endif /* _ASM_IOMMU_H */
329