1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4*4882a593Smuzhiyun * Author: Joerg Roedel <joerg.roedel@amd.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef __LINUX_IOMMU_H
8*4882a593Smuzhiyun #define __LINUX_IOMMU_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/scatterlist.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/ioasid.h>
17*4882a593Smuzhiyun #include <uapi/linux/iommu.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define IOMMU_READ (1 << 0)
20*4882a593Smuzhiyun #define IOMMU_WRITE (1 << 1)
21*4882a593Smuzhiyun #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
22*4882a593Smuzhiyun #define IOMMU_NOEXEC (1 << 3)
23*4882a593Smuzhiyun #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Where the bus hardware includes a privilege level as part of its access type
26*4882a593Smuzhiyun * markings, and certain devices are capable of issuing transactions marked as
27*4882a593Smuzhiyun * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28*4882a593Smuzhiyun * given permission flags only apply to accesses at the higher privilege level,
29*4882a593Smuzhiyun * and that unprivileged transactions should have as little access as possible.
30*4882a593Smuzhiyun * This would usually imply the same permissions as kernel mappings on the CPU,
31*4882a593Smuzhiyun * if the IOMMU page table format is equivalent.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #define IOMMU_PRIV (1 << 5)
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Allow caching in a transparent outer level of cache, also known as
36*4882a593Smuzhiyun * the last-level or system cache, with a read/write allocation policy.
37*4882a593Smuzhiyun * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE_NWA.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun #define IOMMU_SYS_CACHE (1 << 6)
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * Allow caching in a transparent outer level of cache, also known as
42*4882a593Smuzhiyun * the last-level or system cache, with a read allocation policy.
43*4882a593Smuzhiyun * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define IOMMU_SYS_CACHE_NWA (1 << 7)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct iommu_ops;
48*4882a593Smuzhiyun struct iommu_group;
49*4882a593Smuzhiyun struct bus_type;
50*4882a593Smuzhiyun struct device;
51*4882a593Smuzhiyun struct iommu_domain;
52*4882a593Smuzhiyun struct notifier_block;
53*4882a593Smuzhiyun struct iommu_sva;
54*4882a593Smuzhiyun struct iommu_fault_event;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* iommu fault flags */
57*4882a593Smuzhiyun #define IOMMU_FAULT_READ 0x0
58*4882a593Smuzhiyun #define IOMMU_FAULT_WRITE 0x1
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
61*4882a593Smuzhiyun struct device *, unsigned long, int, void *);
62*4882a593Smuzhiyun typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct iommu_domain_geometry {
65*4882a593Smuzhiyun dma_addr_t aperture_start; /* First address that can be mapped */
66*4882a593Smuzhiyun dma_addr_t aperture_end; /* Last address that can be mapped */
67*4882a593Smuzhiyun bool force_aperture; /* DMA only allowed in mappable range? */
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Domain feature flags */
71*4882a593Smuzhiyun #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
72*4882a593Smuzhiyun #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
73*4882a593Smuzhiyun implementation */
74*4882a593Smuzhiyun #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * This are the possible domain-types
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
80*4882a593Smuzhiyun * devices
81*4882a593Smuzhiyun * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
82*4882a593Smuzhiyun * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
83*4882a593Smuzhiyun * for VMs
84*4882a593Smuzhiyun * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
85*4882a593Smuzhiyun * This flag allows IOMMU drivers to implement
86*4882a593Smuzhiyun * certain optimizations for these domains
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun #define IOMMU_DOMAIN_BLOCKED (0U)
89*4882a593Smuzhiyun #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
90*4882a593Smuzhiyun #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
91*4882a593Smuzhiyun #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
92*4882a593Smuzhiyun __IOMMU_DOMAIN_DMA_API)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct iommu_domain {
95*4882a593Smuzhiyun unsigned type;
96*4882a593Smuzhiyun const struct iommu_ops *ops;
97*4882a593Smuzhiyun unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
98*4882a593Smuzhiyun iommu_fault_handler_t handler;
99*4882a593Smuzhiyun void *handler_token;
100*4882a593Smuzhiyun struct iommu_domain_geometry geometry;
101*4882a593Smuzhiyun void *iova_cookie;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun enum iommu_cap {
105*4882a593Smuzhiyun IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
106*4882a593Smuzhiyun transactions */
107*4882a593Smuzhiyun IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
108*4882a593Smuzhiyun IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Following constraints are specifc to FSL_PAMUV1:
113*4882a593Smuzhiyun * -aperture must be power of 2, and naturally aligned
114*4882a593Smuzhiyun * -number of windows must be power of 2, and address space size
115*4882a593Smuzhiyun * of each window is determined by aperture size / # of windows
116*4882a593Smuzhiyun * -the actual size of the mapped region of a window must be power
117*4882a593Smuzhiyun * of 2 starting with 4KB and physical address must be naturally
118*4882a593Smuzhiyun * aligned.
119*4882a593Smuzhiyun * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
120*4882a593Smuzhiyun * The caller can invoke iommu_domain_get_attr to check if the underlying
121*4882a593Smuzhiyun * iommu implementation supports these constraints.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun enum iommu_attr {
125*4882a593Smuzhiyun DOMAIN_ATTR_GEOMETRY,
126*4882a593Smuzhiyun DOMAIN_ATTR_PAGING,
127*4882a593Smuzhiyun DOMAIN_ATTR_WINDOWS,
128*4882a593Smuzhiyun DOMAIN_ATTR_FSL_PAMU_STASH,
129*4882a593Smuzhiyun DOMAIN_ATTR_FSL_PAMU_ENABLE,
130*4882a593Smuzhiyun DOMAIN_ATTR_FSL_PAMUV1,
131*4882a593Smuzhiyun DOMAIN_ATTR_NESTING, /* two stages of translation */
132*4882a593Smuzhiyun DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
133*4882a593Smuzhiyun DOMAIN_ATTR_MAX,
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* These are the possible reserved region types */
137*4882a593Smuzhiyun enum iommu_resv_type {
138*4882a593Smuzhiyun /* Memory regions which must be mapped 1:1 at all times */
139*4882a593Smuzhiyun IOMMU_RESV_DIRECT,
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Memory regions which are advertised to be 1:1 but are
142*4882a593Smuzhiyun * commonly considered relaxable in some conditions,
143*4882a593Smuzhiyun * for instance in device assignment use case (USB, Graphics)
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun IOMMU_RESV_DIRECT_RELAXABLE,
146*4882a593Smuzhiyun /* Arbitrary "never map this or give it to a device" address ranges */
147*4882a593Smuzhiyun IOMMU_RESV_RESERVED,
148*4882a593Smuzhiyun /* Hardware MSI region (untranslated) */
149*4882a593Smuzhiyun IOMMU_RESV_MSI,
150*4882a593Smuzhiyun /* Software-managed MSI translation window */
151*4882a593Smuzhiyun IOMMU_RESV_SW_MSI,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * struct iommu_resv_region - descriptor for a reserved memory region
156*4882a593Smuzhiyun * @list: Linked list pointers
157*4882a593Smuzhiyun * @start: System physical start address of the region
158*4882a593Smuzhiyun * @length: Length of the region in bytes
159*4882a593Smuzhiyun * @prot: IOMMU Protection flags (READ/WRITE/...)
160*4882a593Smuzhiyun * @type: Type of the reserved region
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun struct iommu_resv_region {
163*4882a593Smuzhiyun struct list_head list;
164*4882a593Smuzhiyun phys_addr_t start;
165*4882a593Smuzhiyun size_t length;
166*4882a593Smuzhiyun int prot;
167*4882a593Smuzhiyun enum iommu_resv_type type;
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Per device IOMMU features */
171*4882a593Smuzhiyun enum iommu_dev_features {
172*4882a593Smuzhiyun IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
173*4882a593Smuzhiyun IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #define IOMMU_PASID_INVALID (-1U)
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * @start: IOVA representing the start of the range to be flushed
184*4882a593Smuzhiyun * @end: IOVA representing the end of the range to be flushed (inclusive)
185*4882a593Smuzhiyun * @pgsize: The interval at which to perform the flush
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * This structure is intended to be updated by multiple calls to the
188*4882a593Smuzhiyun * ->unmap() function in struct iommu_ops before eventually being passed
189*4882a593Smuzhiyun * into ->iotlb_sync().
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun struct iommu_iotlb_gather {
192*4882a593Smuzhiyun unsigned long start;
193*4882a593Smuzhiyun unsigned long end;
194*4882a593Smuzhiyun size_t pgsize;
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /**
198*4882a593Smuzhiyun * struct iommu_ops - iommu ops and capabilities
199*4882a593Smuzhiyun * @capable: check capability
200*4882a593Smuzhiyun * @domain_alloc: allocate iommu domain
201*4882a593Smuzhiyun * @domain_free: free iommu domain
202*4882a593Smuzhiyun * @attach_dev: attach device to an iommu domain
203*4882a593Smuzhiyun * @detach_dev: detach device from an iommu domain
204*4882a593Smuzhiyun * @map: map a physically contiguous memory region to an iommu domain
205*4882a593Smuzhiyun * @map_pages: map a physically contiguous set of pages of the same size to
206*4882a593Smuzhiyun * an iommu domain.
207*4882a593Smuzhiyun * @map_sg: map a scatter-gather list of physically contiguous chunks to
208*4882a593Smuzhiyun * an iommu domain.
209*4882a593Smuzhiyun * @unmap: unmap a physically contiguous memory region from an iommu domain
210*4882a593Smuzhiyun * @unmap_pages: unmap a number of pages of the same size from an iommu domain
211*4882a593Smuzhiyun * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
212*4882a593Smuzhiyun * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
213*4882a593Smuzhiyun * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
214*4882a593Smuzhiyun * queue
215*4882a593Smuzhiyun * @iova_to_phys: translate iova to physical address
216*4882a593Smuzhiyun * @probe_device: Add device to iommu driver handling
217*4882a593Smuzhiyun * @release_device: Remove device from iommu driver handling
218*4882a593Smuzhiyun * @probe_finalize: Do final setup work after the device is added to an IOMMU
219*4882a593Smuzhiyun * group and attached to the groups domain
220*4882a593Smuzhiyun * @device_group: find iommu group for a particular device
221*4882a593Smuzhiyun * @domain_get_attr: Query domain attributes
222*4882a593Smuzhiyun * @domain_set_attr: Change domain attributes
223*4882a593Smuzhiyun * @get_resv_regions: Request list of reserved regions for a device
224*4882a593Smuzhiyun * @put_resv_regions: Free list of reserved regions for a device
225*4882a593Smuzhiyun * @apply_resv_region: Temporary helper call-back for iova reserved ranges
226*4882a593Smuzhiyun * @domain_window_enable: Configure and enable a particular window for a domain
227*4882a593Smuzhiyun * @domain_window_disable: Disable a particular window for a domain
228*4882a593Smuzhiyun * @of_xlate: add OF master IDs to iommu grouping
229*4882a593Smuzhiyun * @is_attach_deferred: Check if domain attach should be deferred from iommu
230*4882a593Smuzhiyun * driver init to device driver init (default no)
231*4882a593Smuzhiyun * @dev_has/enable/disable_feat: per device entries to check/enable/disable
232*4882a593Smuzhiyun * iommu specific features.
233*4882a593Smuzhiyun * @dev_feat_enabled: check enabled feature
234*4882a593Smuzhiyun * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
235*4882a593Smuzhiyun * @aux_get_pasid: get the pasid given an aux-domain
236*4882a593Smuzhiyun * @sva_bind: Bind process address space to device
237*4882a593Smuzhiyun * @sva_unbind: Unbind process address space from device
238*4882a593Smuzhiyun * @sva_get_pasid: Get PASID associated to a SVA handle
239*4882a593Smuzhiyun * @page_response: handle page request response
240*4882a593Smuzhiyun * @cache_invalidate: invalidate translation caches
241*4882a593Smuzhiyun * @sva_bind_gpasid: bind guest pasid and mm
242*4882a593Smuzhiyun * @sva_unbind_gpasid: unbind guest pasid and mm
243*4882a593Smuzhiyun * @def_domain_type: device default domain type, return value:
244*4882a593Smuzhiyun * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
245*4882a593Smuzhiyun * - IOMMU_DOMAIN_DMA: must use a dma domain
246*4882a593Smuzhiyun * - 0: use the default setting
247*4882a593Smuzhiyun * @pgsize_bitmap: bitmap of all possible supported page sizes
248*4882a593Smuzhiyun * @owner: Driver module providing these ops
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun struct iommu_ops {
251*4882a593Smuzhiyun bool (*capable)(enum iommu_cap);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Domain allocation and freeing by the iommu driver */
254*4882a593Smuzhiyun struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
255*4882a593Smuzhiyun void (*domain_free)(struct iommu_domain *);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
258*4882a593Smuzhiyun void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
259*4882a593Smuzhiyun int (*map)(struct iommu_domain *domain, unsigned long iova,
260*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
261*4882a593Smuzhiyun int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
262*4882a593Smuzhiyun phys_addr_t paddr, size_t pgsize, size_t pgcount,
263*4882a593Smuzhiyun int prot, gfp_t gfp, size_t *mapped);
264*4882a593Smuzhiyun int (*map_sg)(struct iommu_domain *domain, unsigned long iova,
265*4882a593Smuzhiyun struct scatterlist *sg, unsigned int nents, int prot,
266*4882a593Smuzhiyun gfp_t gfp, size_t *mapped);
267*4882a593Smuzhiyun size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
268*4882a593Smuzhiyun size_t size, struct iommu_iotlb_gather *iotlb_gather);
269*4882a593Smuzhiyun size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
270*4882a593Smuzhiyun size_t pgsize, size_t pgcount,
271*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather);
272*4882a593Smuzhiyun void (*flush_iotlb_all)(struct iommu_domain *domain);
273*4882a593Smuzhiyun void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
274*4882a593Smuzhiyun size_t size);
275*4882a593Smuzhiyun void (*iotlb_sync)(struct iommu_domain *domain,
276*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather);
277*4882a593Smuzhiyun phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
278*4882a593Smuzhiyun struct iommu_device *(*probe_device)(struct device *dev);
279*4882a593Smuzhiyun void (*release_device)(struct device *dev);
280*4882a593Smuzhiyun void (*probe_finalize)(struct device *dev);
281*4882a593Smuzhiyun struct iommu_group *(*device_group)(struct device *dev);
282*4882a593Smuzhiyun int (*domain_get_attr)(struct iommu_domain *domain,
283*4882a593Smuzhiyun enum iommu_attr attr, void *data);
284*4882a593Smuzhiyun int (*domain_set_attr)(struct iommu_domain *domain,
285*4882a593Smuzhiyun enum iommu_attr attr, void *data);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Request/Free a list of reserved regions for a device */
288*4882a593Smuzhiyun void (*get_resv_regions)(struct device *dev, struct list_head *list);
289*4882a593Smuzhiyun void (*put_resv_regions)(struct device *dev, struct list_head *list);
290*4882a593Smuzhiyun void (*apply_resv_region)(struct device *dev,
291*4882a593Smuzhiyun struct iommu_domain *domain,
292*4882a593Smuzhiyun struct iommu_resv_region *region);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Window handling functions */
295*4882a593Smuzhiyun int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
296*4882a593Smuzhiyun phys_addr_t paddr, u64 size, int prot);
297*4882a593Smuzhiyun void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
300*4882a593Smuzhiyun bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Per device IOMMU features */
303*4882a593Smuzhiyun bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
304*4882a593Smuzhiyun bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
305*4882a593Smuzhiyun int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
306*4882a593Smuzhiyun int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Aux-domain specific attach/detach entries */
309*4882a593Smuzhiyun int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
310*4882a593Smuzhiyun void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
311*4882a593Smuzhiyun int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
314*4882a593Smuzhiyun void *drvdata);
315*4882a593Smuzhiyun void (*sva_unbind)(struct iommu_sva *handle);
316*4882a593Smuzhiyun u32 (*sva_get_pasid)(struct iommu_sva *handle);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun int (*page_response)(struct device *dev,
319*4882a593Smuzhiyun struct iommu_fault_event *evt,
320*4882a593Smuzhiyun struct iommu_page_response *msg);
321*4882a593Smuzhiyun int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
322*4882a593Smuzhiyun struct iommu_cache_invalidate_info *inv_info);
323*4882a593Smuzhiyun int (*sva_bind_gpasid)(struct iommu_domain *domain,
324*4882a593Smuzhiyun struct device *dev, struct iommu_gpasid_bind_data *data);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun int (*def_domain_type)(struct device *dev);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun unsigned long pgsize_bitmap;
331*4882a593Smuzhiyun struct module *owner;
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /**
335*4882a593Smuzhiyun * struct iommu_device - IOMMU core representation of one IOMMU hardware
336*4882a593Smuzhiyun * instance
337*4882a593Smuzhiyun * @list: Used by the iommu-core to keep a list of registered iommus
338*4882a593Smuzhiyun * @ops: iommu-ops for talking to this iommu
339*4882a593Smuzhiyun * @dev: struct device for sysfs handling
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun struct iommu_device {
342*4882a593Smuzhiyun struct list_head list;
343*4882a593Smuzhiyun const struct iommu_ops *ops;
344*4882a593Smuzhiyun struct fwnode_handle *fwnode;
345*4882a593Smuzhiyun struct device *dev;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun * struct iommu_fault_event - Generic fault event
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Can represent recoverable faults such as a page requests or
352*4882a593Smuzhiyun * unrecoverable faults such as DMA or IRQ remapping faults.
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * @fault: fault descriptor
355*4882a593Smuzhiyun * @list: pending fault event list, used for tracking responses
356*4882a593Smuzhiyun */
357*4882a593Smuzhiyun struct iommu_fault_event {
358*4882a593Smuzhiyun struct iommu_fault fault;
359*4882a593Smuzhiyun struct list_head list;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /**
363*4882a593Smuzhiyun * struct iommu_fault_param - per-device IOMMU fault data
364*4882a593Smuzhiyun * @handler: Callback function to handle IOMMU faults at device level
365*4882a593Smuzhiyun * @data: handler private data
366*4882a593Smuzhiyun * @faults: holds the pending faults which needs response
367*4882a593Smuzhiyun * @lock: protect pending faults list
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun struct iommu_fault_param {
370*4882a593Smuzhiyun iommu_dev_fault_handler_t handler;
371*4882a593Smuzhiyun void *data;
372*4882a593Smuzhiyun struct list_head faults;
373*4882a593Smuzhiyun struct mutex lock;
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun * struct dev_iommu - Collection of per-device IOMMU data
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * @fault_param: IOMMU detected device fault reporting data
380*4882a593Smuzhiyun * @fwspec: IOMMU fwspec data
381*4882a593Smuzhiyun * @iommu_dev: IOMMU device this device is linked to
382*4882a593Smuzhiyun * @priv: IOMMU Driver private data
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
385*4882a593Smuzhiyun * struct iommu_group *iommu_group;
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun struct dev_iommu {
388*4882a593Smuzhiyun struct mutex lock;
389*4882a593Smuzhiyun struct iommu_fault_param *fault_param;
390*4882a593Smuzhiyun struct iommu_fwspec *fwspec;
391*4882a593Smuzhiyun struct iommu_device *iommu_dev;
392*4882a593Smuzhiyun void *priv;
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun int iommu_device_register(struct iommu_device *iommu);
396*4882a593Smuzhiyun void iommu_device_unregister(struct iommu_device *iommu);
397*4882a593Smuzhiyun int iommu_device_sysfs_add(struct iommu_device *iommu,
398*4882a593Smuzhiyun struct device *parent,
399*4882a593Smuzhiyun const struct attribute_group **groups,
400*4882a593Smuzhiyun const char *fmt, ...) __printf(4, 5);
401*4882a593Smuzhiyun void iommu_device_sysfs_remove(struct iommu_device *iommu);
402*4882a593Smuzhiyun int iommu_device_link(struct iommu_device *iommu, struct device *link);
403*4882a593Smuzhiyun void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
404*4882a593Smuzhiyun
__iommu_device_set_ops(struct iommu_device * iommu,const struct iommu_ops * ops)405*4882a593Smuzhiyun static inline void __iommu_device_set_ops(struct iommu_device *iommu,
406*4882a593Smuzhiyun const struct iommu_ops *ops)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun iommu->ops = ops;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun #define iommu_device_set_ops(iommu, ops) \
412*4882a593Smuzhiyun do { \
413*4882a593Smuzhiyun struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
414*4882a593Smuzhiyun __ops->owner = THIS_MODULE; \
415*4882a593Smuzhiyun __iommu_device_set_ops(iommu, __ops); \
416*4882a593Smuzhiyun } while (0)
417*4882a593Smuzhiyun
iommu_device_set_fwnode(struct iommu_device * iommu,struct fwnode_handle * fwnode)418*4882a593Smuzhiyun static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
419*4882a593Smuzhiyun struct fwnode_handle *fwnode)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun iommu->fwnode = fwnode;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
dev_to_iommu_device(struct device * dev)424*4882a593Smuzhiyun static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun return (struct iommu_device *)dev_get_drvdata(dev);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)429*4882a593Smuzhiyun static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun *gather = (struct iommu_iotlb_gather) {
432*4882a593Smuzhiyun .start = ULONG_MAX,
433*4882a593Smuzhiyun };
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
437*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
438*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
439*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
440*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
441*4882a593Smuzhiyun #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
444*4882a593Smuzhiyun extern int bus_iommu_probe(struct bus_type *bus);
445*4882a593Smuzhiyun extern bool iommu_present(struct bus_type *bus);
446*4882a593Smuzhiyun extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
447*4882a593Smuzhiyun extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
448*4882a593Smuzhiyun extern struct iommu_group *iommu_group_get_by_id(int id);
449*4882a593Smuzhiyun extern void iommu_domain_free(struct iommu_domain *domain);
450*4882a593Smuzhiyun extern int iommu_attach_device(struct iommu_domain *domain,
451*4882a593Smuzhiyun struct device *dev);
452*4882a593Smuzhiyun extern void iommu_detach_device(struct iommu_domain *domain,
453*4882a593Smuzhiyun struct device *dev);
454*4882a593Smuzhiyun extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
455*4882a593Smuzhiyun struct device *dev,
456*4882a593Smuzhiyun void __user *uinfo);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
459*4882a593Smuzhiyun struct device *dev, void __user *udata);
460*4882a593Smuzhiyun extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
461*4882a593Smuzhiyun struct device *dev, void __user *udata);
462*4882a593Smuzhiyun extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
463*4882a593Smuzhiyun struct device *dev, ioasid_t pasid);
464*4882a593Smuzhiyun extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
465*4882a593Smuzhiyun extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
466*4882a593Smuzhiyun extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
467*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot);
468*4882a593Smuzhiyun extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
469*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot);
470*4882a593Smuzhiyun extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
471*4882a593Smuzhiyun size_t size);
472*4882a593Smuzhiyun extern size_t iommu_unmap_fast(struct iommu_domain *domain,
473*4882a593Smuzhiyun unsigned long iova, size_t size,
474*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather);
475*4882a593Smuzhiyun extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
476*4882a593Smuzhiyun struct scatterlist *sg,unsigned int nents, int prot);
477*4882a593Smuzhiyun extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
478*4882a593Smuzhiyun unsigned long iova, struct scatterlist *sg,
479*4882a593Smuzhiyun unsigned int nents, int prot);
480*4882a593Smuzhiyun extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
481*4882a593Smuzhiyun extern void iommu_set_fault_handler(struct iommu_domain *domain,
482*4882a593Smuzhiyun iommu_fault_handler_t handler, void *token);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
485*4882a593Smuzhiyun extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
486*4882a593Smuzhiyun extern void generic_iommu_put_resv_regions(struct device *dev,
487*4882a593Smuzhiyun struct list_head *list);
488*4882a593Smuzhiyun extern void iommu_set_default_passthrough(bool cmd_line);
489*4882a593Smuzhiyun extern void iommu_set_default_translated(bool cmd_line);
490*4882a593Smuzhiyun extern bool iommu_default_passthrough(void);
491*4882a593Smuzhiyun extern struct iommu_resv_region *
492*4882a593Smuzhiyun iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
493*4882a593Smuzhiyun enum iommu_resv_type type);
494*4882a593Smuzhiyun extern int iommu_get_group_resv_regions(struct iommu_group *group,
495*4882a593Smuzhiyun struct list_head *head);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun extern int iommu_attach_group(struct iommu_domain *domain,
498*4882a593Smuzhiyun struct iommu_group *group);
499*4882a593Smuzhiyun extern void iommu_detach_group(struct iommu_domain *domain,
500*4882a593Smuzhiyun struct iommu_group *group);
501*4882a593Smuzhiyun extern struct iommu_group *iommu_group_alloc(void);
502*4882a593Smuzhiyun extern void *iommu_group_get_iommudata(struct iommu_group *group);
503*4882a593Smuzhiyun extern void iommu_group_set_iommudata(struct iommu_group *group,
504*4882a593Smuzhiyun void *iommu_data,
505*4882a593Smuzhiyun void (*release)(void *iommu_data));
506*4882a593Smuzhiyun extern int iommu_group_set_name(struct iommu_group *group, const char *name);
507*4882a593Smuzhiyun extern int iommu_group_add_device(struct iommu_group *group,
508*4882a593Smuzhiyun struct device *dev);
509*4882a593Smuzhiyun extern void iommu_group_remove_device(struct device *dev);
510*4882a593Smuzhiyun extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
511*4882a593Smuzhiyun int (*fn)(struct device *, void *));
512*4882a593Smuzhiyun extern struct iommu_group *iommu_group_get(struct device *dev);
513*4882a593Smuzhiyun extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
514*4882a593Smuzhiyun extern void iommu_group_put(struct iommu_group *group);
515*4882a593Smuzhiyun extern int iommu_group_register_notifier(struct iommu_group *group,
516*4882a593Smuzhiyun struct notifier_block *nb);
517*4882a593Smuzhiyun extern int iommu_group_unregister_notifier(struct iommu_group *group,
518*4882a593Smuzhiyun struct notifier_block *nb);
519*4882a593Smuzhiyun extern int iommu_register_device_fault_handler(struct device *dev,
520*4882a593Smuzhiyun iommu_dev_fault_handler_t handler,
521*4882a593Smuzhiyun void *data);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun extern int iommu_unregister_device_fault_handler(struct device *dev);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun extern int iommu_report_device_fault(struct device *dev,
526*4882a593Smuzhiyun struct iommu_fault_event *evt);
527*4882a593Smuzhiyun extern int iommu_page_response(struct device *dev,
528*4882a593Smuzhiyun struct iommu_page_response *msg);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun extern int iommu_group_id(struct iommu_group *group);
531*4882a593Smuzhiyun extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
534*4882a593Smuzhiyun void *data);
535*4882a593Smuzhiyun extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
536*4882a593Smuzhiyun void *data);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* Window handling function prototypes */
539*4882a593Smuzhiyun extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
540*4882a593Smuzhiyun phys_addr_t offset, u64 size,
541*4882a593Smuzhiyun int prot);
542*4882a593Smuzhiyun extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
545*4882a593Smuzhiyun unsigned long iova, int flags);
546*4882a593Smuzhiyun
iommu_flush_iotlb_all(struct iommu_domain * domain)547*4882a593Smuzhiyun static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun if (domain->ops->flush_iotlb_all)
550*4882a593Smuzhiyun domain->ops->flush_iotlb_all(domain);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)553*4882a593Smuzhiyun static inline void iommu_iotlb_sync(struct iommu_domain *domain,
554*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun if (domain->ops->iotlb_sync)
557*4882a593Smuzhiyun domain->ops->iotlb_sync(domain, iotlb_gather);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun iommu_iotlb_gather_init(iotlb_gather);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)562*4882a593Smuzhiyun static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
563*4882a593Smuzhiyun struct iommu_iotlb_gather *gather,
564*4882a593Smuzhiyun unsigned long iova, size_t size)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun unsigned long start = iova, end = start + size - 1;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * If the new page is disjoint from the current range or is mapped at
570*4882a593Smuzhiyun * a different granularity, then sync the TLB so that the gather
571*4882a593Smuzhiyun * structure can be rewritten.
572*4882a593Smuzhiyun */
573*4882a593Smuzhiyun if (gather->pgsize != size ||
574*4882a593Smuzhiyun end + 1 < gather->start || start > gather->end + 1) {
575*4882a593Smuzhiyun if (gather->pgsize)
576*4882a593Smuzhiyun iommu_iotlb_sync(domain, gather);
577*4882a593Smuzhiyun gather->pgsize = size;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (gather->end < end)
581*4882a593Smuzhiyun gather->end = end;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (gather->start > start)
584*4882a593Smuzhiyun gather->start = start;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* PCI device grouping function */
588*4882a593Smuzhiyun extern struct iommu_group *pci_device_group(struct device *dev);
589*4882a593Smuzhiyun /* Generic device grouping function */
590*4882a593Smuzhiyun extern struct iommu_group *generic_device_group(struct device *dev);
591*4882a593Smuzhiyun /* FSL-MC device grouping function */
592*4882a593Smuzhiyun struct iommu_group *fsl_mc_device_group(struct device *dev);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /**
595*4882a593Smuzhiyun * struct iommu_fwspec - per-device IOMMU instance data
596*4882a593Smuzhiyun * @ops: ops for this device's IOMMU
597*4882a593Smuzhiyun * @iommu_fwnode: firmware handle for this device's IOMMU
598*4882a593Smuzhiyun * @iommu_priv: IOMMU driver private data for this device
599*4882a593Smuzhiyun * @num_pasid_bits: number of PASID bits supported by this device
600*4882a593Smuzhiyun * @num_ids: number of associated device IDs
601*4882a593Smuzhiyun * @ids: IDs which this device may present to the IOMMU
602*4882a593Smuzhiyun */
603*4882a593Smuzhiyun struct iommu_fwspec {
604*4882a593Smuzhiyun const struct iommu_ops *ops;
605*4882a593Smuzhiyun struct fwnode_handle *iommu_fwnode;
606*4882a593Smuzhiyun u32 flags;
607*4882a593Smuzhiyun u32 num_pasid_bits;
608*4882a593Smuzhiyun unsigned int num_ids;
609*4882a593Smuzhiyun u32 ids[];
610*4882a593Smuzhiyun };
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* ATS is supported */
613*4882a593Smuzhiyun #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun * struct iommu_sva - handle to a device-mm bond
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun struct iommu_sva {
619*4882a593Smuzhiyun struct device *dev;
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
623*4882a593Smuzhiyun const struct iommu_ops *ops);
624*4882a593Smuzhiyun void iommu_fwspec_free(struct device *dev);
625*4882a593Smuzhiyun int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
626*4882a593Smuzhiyun const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
627*4882a593Smuzhiyun
dev_iommu_fwspec_get(struct device * dev)628*4882a593Smuzhiyun static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun if (dev->iommu)
631*4882a593Smuzhiyun return dev->iommu->fwspec;
632*4882a593Smuzhiyun else
633*4882a593Smuzhiyun return NULL;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)636*4882a593Smuzhiyun static inline void dev_iommu_fwspec_set(struct device *dev,
637*4882a593Smuzhiyun struct iommu_fwspec *fwspec)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun dev->iommu->fwspec = fwspec;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
dev_iommu_priv_get(struct device * dev)642*4882a593Smuzhiyun static inline void *dev_iommu_priv_get(struct device *dev)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun if (dev->iommu)
645*4882a593Smuzhiyun return dev->iommu->priv;
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun return NULL;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
dev_iommu_priv_set(struct device * dev,void * priv)650*4882a593Smuzhiyun static inline void dev_iommu_priv_set(struct device *dev, void *priv)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun dev->iommu->priv = priv;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun int iommu_probe_device(struct device *dev);
656*4882a593Smuzhiyun void iommu_release_device(struct device *dev);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
659*4882a593Smuzhiyun int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
660*4882a593Smuzhiyun int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
661*4882a593Smuzhiyun bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
662*4882a593Smuzhiyun int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
663*4882a593Smuzhiyun void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
664*4882a593Smuzhiyun int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun struct iommu_sva *iommu_sva_bind_device(struct device *dev,
667*4882a593Smuzhiyun struct mm_struct *mm,
668*4882a593Smuzhiyun void *drvdata);
669*4882a593Smuzhiyun void iommu_sva_unbind_device(struct iommu_sva *handle);
670*4882a593Smuzhiyun u32 iommu_sva_get_pasid(struct iommu_sva *handle);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun #else /* CONFIG_IOMMU_API */
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun struct iommu_ops {};
675*4882a593Smuzhiyun struct iommu_group {};
676*4882a593Smuzhiyun struct iommu_fwspec {};
677*4882a593Smuzhiyun struct iommu_device {};
678*4882a593Smuzhiyun struct iommu_fault_param {};
679*4882a593Smuzhiyun struct iommu_iotlb_gather {};
680*4882a593Smuzhiyun
iommu_present(struct bus_type * bus)681*4882a593Smuzhiyun static inline bool iommu_present(struct bus_type *bus)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun return false;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
iommu_capable(struct bus_type * bus,enum iommu_cap cap)686*4882a593Smuzhiyun static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun return false;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
iommu_domain_alloc(struct bus_type * bus)691*4882a593Smuzhiyun static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun return NULL;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
iommu_group_get_by_id(int id)696*4882a593Smuzhiyun static inline struct iommu_group *iommu_group_get_by_id(int id)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun return NULL;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
iommu_domain_free(struct iommu_domain * domain)701*4882a593Smuzhiyun static inline void iommu_domain_free(struct iommu_domain *domain)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
iommu_attach_device(struct iommu_domain * domain,struct device * dev)705*4882a593Smuzhiyun static inline int iommu_attach_device(struct iommu_domain *domain,
706*4882a593Smuzhiyun struct device *dev)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun return -ENODEV;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
iommu_detach_device(struct iommu_domain * domain,struct device * dev)711*4882a593Smuzhiyun static inline void iommu_detach_device(struct iommu_domain *domain,
712*4882a593Smuzhiyun struct device *dev)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
iommu_get_domain_for_dev(struct device * dev)716*4882a593Smuzhiyun static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun return NULL;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)721*4882a593Smuzhiyun static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
722*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun return -ENODEV;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
iommu_map_atomic(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)727*4882a593Smuzhiyun static inline int iommu_map_atomic(struct iommu_domain *domain,
728*4882a593Smuzhiyun unsigned long iova, phys_addr_t paddr,
729*4882a593Smuzhiyun size_t size, int prot)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun return -ENODEV;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)734*4882a593Smuzhiyun static inline size_t iommu_unmap(struct iommu_domain *domain,
735*4882a593Smuzhiyun unsigned long iova, size_t size)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)740*4882a593Smuzhiyun static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
741*4882a593Smuzhiyun unsigned long iova, int gfp_order,
742*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun return 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)747*4882a593Smuzhiyun static inline size_t iommu_map_sg(struct iommu_domain *domain,
748*4882a593Smuzhiyun unsigned long iova, struct scatterlist *sg,
749*4882a593Smuzhiyun unsigned int nents, int prot)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
iommu_map_sg_atomic(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)754*4882a593Smuzhiyun static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
755*4882a593Smuzhiyun unsigned long iova, struct scatterlist *sg,
756*4882a593Smuzhiyun unsigned int nents, int prot)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun return 0;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
iommu_flush_iotlb_all(struct iommu_domain * domain)761*4882a593Smuzhiyun static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)765*4882a593Smuzhiyun static inline void iommu_iotlb_sync(struct iommu_domain *domain,
766*4882a593Smuzhiyun struct iommu_iotlb_gather *iotlb_gather)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
iommu_domain_window_enable(struct iommu_domain * domain,u32 wnd_nr,phys_addr_t paddr,u64 size,int prot)770*4882a593Smuzhiyun static inline int iommu_domain_window_enable(struct iommu_domain *domain,
771*4882a593Smuzhiyun u32 wnd_nr, phys_addr_t paddr,
772*4882a593Smuzhiyun u64 size, int prot)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun return -ENODEV;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
iommu_domain_window_disable(struct iommu_domain * domain,u32 wnd_nr)777*4882a593Smuzhiyun static inline void iommu_domain_window_disable(struct iommu_domain *domain,
778*4882a593Smuzhiyun u32 wnd_nr)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)782*4882a593Smuzhiyun static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)787*4882a593Smuzhiyun static inline void iommu_set_fault_handler(struct iommu_domain *domain,
788*4882a593Smuzhiyun iommu_fault_handler_t handler, void *token)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
iommu_get_resv_regions(struct device * dev,struct list_head * list)792*4882a593Smuzhiyun static inline void iommu_get_resv_regions(struct device *dev,
793*4882a593Smuzhiyun struct list_head *list)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
iommu_put_resv_regions(struct device * dev,struct list_head * list)797*4882a593Smuzhiyun static inline void iommu_put_resv_regions(struct device *dev,
798*4882a593Smuzhiyun struct list_head *list)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)802*4882a593Smuzhiyun static inline int iommu_get_group_resv_regions(struct iommu_group *group,
803*4882a593Smuzhiyun struct list_head *head)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun return -ENODEV;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
iommu_set_default_passthrough(bool cmd_line)808*4882a593Smuzhiyun static inline void iommu_set_default_passthrough(bool cmd_line)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
iommu_set_default_translated(bool cmd_line)812*4882a593Smuzhiyun static inline void iommu_set_default_translated(bool cmd_line)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
iommu_default_passthrough(void)816*4882a593Smuzhiyun static inline bool iommu_default_passthrough(void)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun return true;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)821*4882a593Smuzhiyun static inline int iommu_attach_group(struct iommu_domain *domain,
822*4882a593Smuzhiyun struct iommu_group *group)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun return -ENODEV;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)827*4882a593Smuzhiyun static inline void iommu_detach_group(struct iommu_domain *domain,
828*4882a593Smuzhiyun struct iommu_group *group)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
iommu_group_alloc(void)832*4882a593Smuzhiyun static inline struct iommu_group *iommu_group_alloc(void)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
iommu_group_get_iommudata(struct iommu_group * group)837*4882a593Smuzhiyun static inline void *iommu_group_get_iommudata(struct iommu_group *group)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun return NULL;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))842*4882a593Smuzhiyun static inline void iommu_group_set_iommudata(struct iommu_group *group,
843*4882a593Smuzhiyun void *iommu_data,
844*4882a593Smuzhiyun void (*release)(void *iommu_data))
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
iommu_group_set_name(struct iommu_group * group,const char * name)848*4882a593Smuzhiyun static inline int iommu_group_set_name(struct iommu_group *group,
849*4882a593Smuzhiyun const char *name)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun return -ENODEV;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
iommu_group_add_device(struct iommu_group * group,struct device * dev)854*4882a593Smuzhiyun static inline int iommu_group_add_device(struct iommu_group *group,
855*4882a593Smuzhiyun struct device *dev)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun return -ENODEV;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
iommu_group_remove_device(struct device * dev)860*4882a593Smuzhiyun static inline void iommu_group_remove_device(struct device *dev)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))864*4882a593Smuzhiyun static inline int iommu_group_for_each_dev(struct iommu_group *group,
865*4882a593Smuzhiyun void *data,
866*4882a593Smuzhiyun int (*fn)(struct device *, void *))
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun return -ENODEV;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
iommu_group_get(struct device * dev)871*4882a593Smuzhiyun static inline struct iommu_group *iommu_group_get(struct device *dev)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun return NULL;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
iommu_group_put(struct iommu_group * group)876*4882a593Smuzhiyun static inline void iommu_group_put(struct iommu_group *group)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
iommu_group_register_notifier(struct iommu_group * group,struct notifier_block * nb)880*4882a593Smuzhiyun static inline int iommu_group_register_notifier(struct iommu_group *group,
881*4882a593Smuzhiyun struct notifier_block *nb)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun return -ENODEV;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
iommu_group_unregister_notifier(struct iommu_group * group,struct notifier_block * nb)886*4882a593Smuzhiyun static inline int iommu_group_unregister_notifier(struct iommu_group *group,
887*4882a593Smuzhiyun struct notifier_block *nb)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun return 0;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun static inline
iommu_register_device_fault_handler(struct device * dev,iommu_dev_fault_handler_t handler,void * data)893*4882a593Smuzhiyun int iommu_register_device_fault_handler(struct device *dev,
894*4882a593Smuzhiyun iommu_dev_fault_handler_t handler,
895*4882a593Smuzhiyun void *data)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun return -ENODEV;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
iommu_unregister_device_fault_handler(struct device * dev)900*4882a593Smuzhiyun static inline int iommu_unregister_device_fault_handler(struct device *dev)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun return 0;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun static inline
iommu_report_device_fault(struct device * dev,struct iommu_fault_event * evt)906*4882a593Smuzhiyun int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun return -ENODEV;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
iommu_page_response(struct device * dev,struct iommu_page_response * msg)911*4882a593Smuzhiyun static inline int iommu_page_response(struct device *dev,
912*4882a593Smuzhiyun struct iommu_page_response *msg)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun return -ENODEV;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
iommu_group_id(struct iommu_group * group)917*4882a593Smuzhiyun static inline int iommu_group_id(struct iommu_group *group)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun return -ENODEV;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
iommu_domain_get_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)922*4882a593Smuzhiyun static inline int iommu_domain_get_attr(struct iommu_domain *domain,
923*4882a593Smuzhiyun enum iommu_attr attr, void *data)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun return -EINVAL;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
iommu_domain_set_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)928*4882a593Smuzhiyun static inline int iommu_domain_set_attr(struct iommu_domain *domain,
929*4882a593Smuzhiyun enum iommu_attr attr, void *data)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun return -EINVAL;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
iommu_device_register(struct iommu_device * iommu)934*4882a593Smuzhiyun static inline int iommu_device_register(struct iommu_device *iommu)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun return -ENODEV;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
iommu_device_set_ops(struct iommu_device * iommu,const struct iommu_ops * ops)939*4882a593Smuzhiyun static inline void iommu_device_set_ops(struct iommu_device *iommu,
940*4882a593Smuzhiyun const struct iommu_ops *ops)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
iommu_device_set_fwnode(struct iommu_device * iommu,struct fwnode_handle * fwnode)944*4882a593Smuzhiyun static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
945*4882a593Smuzhiyun struct fwnode_handle *fwnode)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
dev_to_iommu_device(struct device * dev)949*4882a593Smuzhiyun static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun return NULL;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)954*4882a593Smuzhiyun static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)958*4882a593Smuzhiyun static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
959*4882a593Smuzhiyun struct iommu_iotlb_gather *gather,
960*4882a593Smuzhiyun unsigned long iova, size_t size)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
iommu_device_unregister(struct iommu_device * iommu)964*4882a593Smuzhiyun static inline void iommu_device_unregister(struct iommu_device *iommu)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)968*4882a593Smuzhiyun static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
969*4882a593Smuzhiyun struct device *parent,
970*4882a593Smuzhiyun const struct attribute_group **groups,
971*4882a593Smuzhiyun const char *fmt, ...)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun return -ENODEV;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
iommu_device_sysfs_remove(struct iommu_device * iommu)976*4882a593Smuzhiyun static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
iommu_device_link(struct device * dev,struct device * link)980*4882a593Smuzhiyun static inline int iommu_device_link(struct device *dev, struct device *link)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun return -EINVAL;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
iommu_device_unlink(struct device * dev,struct device * link)985*4882a593Smuzhiyun static inline void iommu_device_unlink(struct device *dev, struct device *link)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)989*4882a593Smuzhiyun static inline int iommu_fwspec_init(struct device *dev,
990*4882a593Smuzhiyun struct fwnode_handle *iommu_fwnode,
991*4882a593Smuzhiyun const struct iommu_ops *ops)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun return -ENODEV;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
iommu_fwspec_free(struct device * dev)996*4882a593Smuzhiyun static inline void iommu_fwspec_free(struct device *dev)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1000*4882a593Smuzhiyun static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1001*4882a593Smuzhiyun int num_ids)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun return -ENODEV;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun static inline
iommu_ops_from_fwnode(struct fwnode_handle * fwnode)1007*4882a593Smuzhiyun const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun return NULL;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun static inline bool
iommu_dev_has_feature(struct device * dev,enum iommu_dev_features feat)1013*4882a593Smuzhiyun iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun return false;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun static inline bool
iommu_dev_feature_enabled(struct device * dev,enum iommu_dev_features feat)1019*4882a593Smuzhiyun iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun return false;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun static inline int
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)1025*4882a593Smuzhiyun iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun return -ENODEV;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun static inline int
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)1031*4882a593Smuzhiyun iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun return -ENODEV;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun static inline int
iommu_aux_attach_device(struct iommu_domain * domain,struct device * dev)1037*4882a593Smuzhiyun iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun return -ENODEV;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun static inline void
iommu_aux_detach_device(struct iommu_domain * domain,struct device * dev)1043*4882a593Smuzhiyun iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun static inline int
iommu_aux_get_pasid(struct iommu_domain * domain,struct device * dev)1048*4882a593Smuzhiyun iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun return -ENODEV;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm,void * drvdata)1054*4882a593Smuzhiyun iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun return NULL;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
iommu_sva_unbind_device(struct iommu_sva * handle)1059*4882a593Smuzhiyun static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
iommu_sva_get_pasid(struct iommu_sva * handle)1063*4882a593Smuzhiyun static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun return IOMMU_PASID_INVALID;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun static inline int
iommu_uapi_cache_invalidate(struct iommu_domain * domain,struct device * dev,struct iommu_cache_invalidate_info * inv_info)1069*4882a593Smuzhiyun iommu_uapi_cache_invalidate(struct iommu_domain *domain,
1070*4882a593Smuzhiyun struct device *dev,
1071*4882a593Smuzhiyun struct iommu_cache_invalidate_info *inv_info)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun return -ENODEV;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
iommu_uapi_sva_bind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)1076*4882a593Smuzhiyun static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
1077*4882a593Smuzhiyun struct device *dev, void __user *udata)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun return -ENODEV;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
iommu_uapi_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)1082*4882a593Smuzhiyun static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
1083*4882a593Smuzhiyun struct device *dev, void __user *udata)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun return -ENODEV;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
iommu_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1088*4882a593Smuzhiyun static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1089*4882a593Smuzhiyun struct device *dev,
1090*4882a593Smuzhiyun ioasid_t pasid)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun return -ENODEV;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
dev_iommu_fwspec_get(struct device * dev)1095*4882a593Smuzhiyun static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun return NULL;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun #endif /* CONFIG_IOMMU_API */
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /**
1102*4882a593Smuzhiyun * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1103*4882a593Smuzhiyun * @domain: The IOMMU domain to perform the mapping
1104*4882a593Smuzhiyun * @iova: The start address to map the buffer
1105*4882a593Smuzhiyun * @sgt: The sg_table object describing the buffer
1106*4882a593Smuzhiyun * @prot: IOMMU protection bits
1107*4882a593Smuzhiyun *
1108*4882a593Smuzhiyun * Creates a mapping at @iova for the buffer described by a scatterlist
1109*4882a593Smuzhiyun * stored in the given sg_table object in the provided IOMMU domain.
1110*4882a593Smuzhiyun */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1111*4882a593Smuzhiyun static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1112*4882a593Smuzhiyun unsigned long iova, struct sg_table *sgt, int prot)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_DEBUGFS
1118*4882a593Smuzhiyun extern struct dentry *iommu_debugfs_dir;
1119*4882a593Smuzhiyun void iommu_debugfs_setup(void);
1120*4882a593Smuzhiyun #else
iommu_debugfs_setup(void)1121*4882a593Smuzhiyun static inline void iommu_debugfs_setup(void) {}
1122*4882a593Smuzhiyun #endif
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun #endif /* __LINUX_IOMMU_H */
1125