1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Internals of the DMA direct mapping implementation. Only for use by the
4*4882a593Smuzhiyun * DMA mapping code and IOMMU drivers.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #ifndef _LINUX_DMA_DIRECT_H
7*4882a593Smuzhiyun #define _LINUX_DMA_DIRECT_H 1
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
11*4882a593Smuzhiyun #include <linux/memblock.h> /* for min_low_pfn */
12*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
13*4882a593Smuzhiyun #include <linux/swiotlb.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun extern unsigned int zone_dma_bits;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * Record the mapping of CPU physical to DMA addresses for a given region.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun struct bus_dma_region {
21*4882a593Smuzhiyun phys_addr_t cpu_start;
22*4882a593Smuzhiyun dma_addr_t dma_start;
23*4882a593Smuzhiyun u64 size;
24*4882a593Smuzhiyun u64 offset;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
zone_dma32_is_empty(int node)27*4882a593Smuzhiyun static inline bool zone_dma32_is_empty(int node)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA32
30*4882a593Smuzhiyun pg_data_t *pgdat = NODE_DATA(node);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun return zone_is_empty(&pgdat->node_zones[ZONE_DMA32]);
33*4882a593Smuzhiyun #else
34*4882a593Smuzhiyun return true;
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
zone_dma32_are_empty(void)38*4882a593Smuzhiyun static inline bool zone_dma32_are_empty(void)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun #ifdef CONFIG_NUMA
41*4882a593Smuzhiyun int node;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun for_each_node(node)
44*4882a593Smuzhiyun if (!zone_dma32_is_empty(node))
45*4882a593Smuzhiyun return false;
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun if (!zone_dma32_is_empty(numa_node_id()))
48*4882a593Smuzhiyun return false;
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun return true;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
translate_phys_to_dma(struct device * dev,phys_addr_t paddr)54*4882a593Smuzhiyun static inline dma_addr_t translate_phys_to_dma(struct device *dev,
55*4882a593Smuzhiyun phys_addr_t paddr)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun const struct bus_dma_region *m;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun for (m = dev->dma_range_map; m->size; m++)
60*4882a593Smuzhiyun if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
61*4882a593Smuzhiyun return (dma_addr_t)paddr - m->offset;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* make sure dma_capable fails when no translation is available */
64*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
translate_dma_to_phys(struct device * dev,dma_addr_t dma_addr)67*4882a593Smuzhiyun static inline phys_addr_t translate_dma_to_phys(struct device *dev,
68*4882a593Smuzhiyun dma_addr_t dma_addr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun const struct bus_dma_region *m;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun for (m = dev->dma_range_map; m->size; m++)
73*4882a593Smuzhiyun if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
74*4882a593Smuzhiyun return (phys_addr_t)dma_addr + m->offset;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return (phys_addr_t)-1;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
80*4882a593Smuzhiyun #include <asm/dma-direct.h>
81*4882a593Smuzhiyun #ifndef phys_to_dma_unencrypted
82*4882a593Smuzhiyun #define phys_to_dma_unencrypted phys_to_dma
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun #else
phys_to_dma_unencrypted(struct device * dev,phys_addr_t paddr)85*4882a593Smuzhiyun static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
86*4882a593Smuzhiyun phys_addr_t paddr)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun if (dev->dma_range_map)
89*4882a593Smuzhiyun return translate_phys_to_dma(dev, paddr);
90*4882a593Smuzhiyun return paddr;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * If memory encryption is supported, phys_to_dma will set the memory encryption
95*4882a593Smuzhiyun * bit in the DMA address, and dma_to_phys will clear it.
96*4882a593Smuzhiyun * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
97*4882a593Smuzhiyun * buffers.
98*4882a593Smuzhiyun */
phys_to_dma(struct device * dev,phys_addr_t paddr)99*4882a593Smuzhiyun static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun return __sme_set(phys_to_dma_unencrypted(dev, paddr));
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
dma_to_phys(struct device * dev,dma_addr_t dma_addr)104*4882a593Smuzhiyun static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun phys_addr_t paddr;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (dev->dma_range_map)
109*4882a593Smuzhiyun paddr = translate_dma_to_phys(dev, dma_addr);
110*4882a593Smuzhiyun else
111*4882a593Smuzhiyun paddr = dma_addr;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return __sme_clr(paddr);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
118*4882a593Smuzhiyun bool force_dma_unencrypted(struct device *dev);
119*4882a593Smuzhiyun #else
force_dma_unencrypted(struct device * dev)120*4882a593Smuzhiyun static inline bool force_dma_unencrypted(struct device *dev)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return false;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
125*4882a593Smuzhiyun
dma_capable(struct device * dev,dma_addr_t addr,size_t size,bool is_ram)126*4882a593Smuzhiyun static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
127*4882a593Smuzhiyun bool is_ram)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun dma_addr_t end = addr + size - 1;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (addr == DMA_MAPPING_ERROR)
132*4882a593Smuzhiyun return false;
133*4882a593Smuzhiyun if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
134*4882a593Smuzhiyun min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
135*4882a593Smuzhiyun return false;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun u64 dma_direct_get_required_mask(struct device *dev);
141*4882a593Smuzhiyun void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
142*4882a593Smuzhiyun gfp_t gfp, unsigned long attrs);
143*4882a593Smuzhiyun void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
144*4882a593Smuzhiyun dma_addr_t dma_addr, unsigned long attrs);
145*4882a593Smuzhiyun struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
146*4882a593Smuzhiyun dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
147*4882a593Smuzhiyun void dma_direct_free_pages(struct device *dev, size_t size,
148*4882a593Smuzhiyun struct page *page, dma_addr_t dma_addr,
149*4882a593Smuzhiyun enum dma_data_direction dir);
150*4882a593Smuzhiyun int dma_direct_supported(struct device *dev, u64 mask);
151*4882a593Smuzhiyun dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
152*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #endif /* _LINUX_DMA_DIRECT_H */
155