xref: /OK3568_Linux_fs/kernel/include/linux/dma-iommu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014-2015 ARM Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __DMA_IOMMU_H
6*4882a593Smuzhiyun #define __DMA_IOMMU_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_DMA
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/iommu.h>
14*4882a593Smuzhiyun #include <linux/msi.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* Domain management interface for IOMMU drivers */
17*4882a593Smuzhiyun int iommu_get_dma_cookie(struct iommu_domain *domain);
18*4882a593Smuzhiyun int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
19*4882a593Smuzhiyun void iommu_put_dma_cookie(struct iommu_domain *domain);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* Setup call for arch DMA mapping code */
22*4882a593Smuzhiyun void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* The DMA API isn't _quite_ the whole story, though... */
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * The MSI page will be stored in @desc.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * Return: 0 on success otherwise an error describing the failure.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Update the MSI message if required. */
35*4882a593Smuzhiyun void iommu_dma_compose_msi_msg(struct msi_desc *desc,
36*4882a593Smuzhiyun 			       struct msi_msg *msg);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
41*4882a593Smuzhiyun 			   u64 size);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun int iommu_dma_enable_best_fit_algo(struct device *dev);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #else /* CONFIG_IOMMU_DMA */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct iommu_domain;
48*4882a593Smuzhiyun struct msi_desc;
49*4882a593Smuzhiyun struct msi_msg;
50*4882a593Smuzhiyun struct device;
51*4882a593Smuzhiyun 
iommu_setup_dma_ops(struct device * dev,u64 dma_base,u64 size)52*4882a593Smuzhiyun static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
53*4882a593Smuzhiyun 		u64 size)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
iommu_get_dma_cookie(struct iommu_domain * domain)57*4882a593Smuzhiyun static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return -ENODEV;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)62*4882a593Smuzhiyun static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	return -ENODEV;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
iommu_put_dma_cookie(struct iommu_domain * domain)67*4882a593Smuzhiyun static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)71*4882a593Smuzhiyun static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
72*4882a593Smuzhiyun 					phys_addr_t msi_addr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)77*4882a593Smuzhiyun static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
78*4882a593Smuzhiyun 					     struct msi_msg *msg)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)82*4882a593Smuzhiyun static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
iommu_dma_reserve_iova(struct device * dev,dma_addr_t base,u64 size)86*4882a593Smuzhiyun static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
87*4882a593Smuzhiyun 					 u64 size)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	return -ENODEV;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
iommu_dma_enable_best_fit_algo(struct device * dev)92*4882a593Smuzhiyun static inline int iommu_dma_enable_best_fit_algo(struct device *dev)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	return -ENODEV;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #endif	/* CONFIG_IOMMU_DMA */
98*4882a593Smuzhiyun #endif	/* __DMA_IOMMU_H */
99