xref: /OK3568_Linux_fs/kernel/include/linux/io-mapping.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2008 Keith Packard <keithp@keithp.com>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _LINUX_IO_MAPPING_H
7*4882a593Smuzhiyun #define _LINUX_IO_MAPPING_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/bug.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/pgtable.h>
14*4882a593Smuzhiyun #include <asm/page.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * The io_mapping mechanism provides an abstraction for mapping
18*4882a593Smuzhiyun  * individual pages from an io device to the CPU in an efficient fashion.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * See Documentation/driver-api/io-mapping.rst
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct io_mapping {
24*4882a593Smuzhiyun 	resource_size_t base;
25*4882a593Smuzhiyun 	unsigned long size;
26*4882a593Smuzhiyun 	pgprot_t prot;
27*4882a593Smuzhiyun 	void __iomem *iomem;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ATOMIC_IOMAP
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <linux/pfn.h>
33*4882a593Smuzhiyun #include <asm/iomap.h>
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * For small address space machines, mapping large objects
36*4882a593Smuzhiyun  * into the kernel virtual space isn't practical. Where
37*4882a593Smuzhiyun  * available, use fixmap support to dynamically map pages
38*4882a593Smuzhiyun  * of the object at run time.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)42*4882a593Smuzhiyun io_mapping_init_wc(struct io_mapping *iomap,
43*4882a593Smuzhiyun 		   resource_size_t base,
44*4882a593Smuzhiyun 		   unsigned long size)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	pgprot_t prot;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (iomap_create_wc(base, size, &prot))
49*4882a593Smuzhiyun 		return NULL;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	iomap->base = base;
52*4882a593Smuzhiyun 	iomap->size = size;
53*4882a593Smuzhiyun 	iomap->prot = prot;
54*4882a593Smuzhiyun 	return iomap;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static inline void
io_mapping_fini(struct io_mapping * mapping)58*4882a593Smuzhiyun io_mapping_fini(struct io_mapping *mapping)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	iomap_free(mapping->base, mapping->size);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Atomic map/unmap */
64*4882a593Smuzhiyun static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)65*4882a593Smuzhiyun io_mapping_map_atomic_wc(struct io_mapping *mapping,
66*4882a593Smuzhiyun 			 unsigned long offset)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	resource_size_t phys_addr;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	BUG_ON(offset >= mapping->size);
71*4882a593Smuzhiyun 	phys_addr = mapping->base + offset;
72*4882a593Smuzhiyun 	return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)76*4882a593Smuzhiyun io_mapping_unmap_atomic(void __iomem *vaddr)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	iounmap_atomic(vaddr);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)82*4882a593Smuzhiyun io_mapping_map_wc(struct io_mapping *mapping,
83*4882a593Smuzhiyun 		  unsigned long offset,
84*4882a593Smuzhiyun 		  unsigned long size)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	resource_size_t phys_addr;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	BUG_ON(offset >= mapping->size);
89*4882a593Smuzhiyun 	phys_addr = mapping->base + offset;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return ioremap_wc(phys_addr, size);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun static inline void
io_mapping_unmap(void __iomem * vaddr)95*4882a593Smuzhiyun io_mapping_unmap(void __iomem *vaddr)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	iounmap(vaddr);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #else
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #include <linux/uaccess.h>
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* Create the io_mapping object*/
105*4882a593Smuzhiyun static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)106*4882a593Smuzhiyun io_mapping_init_wc(struct io_mapping *iomap,
107*4882a593Smuzhiyun 		   resource_size_t base,
108*4882a593Smuzhiyun 		   unsigned long size)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	iomap->iomem = ioremap_wc(base, size);
111*4882a593Smuzhiyun 	if (!iomap->iomem)
112*4882a593Smuzhiyun 		return NULL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	iomap->base = base;
115*4882a593Smuzhiyun 	iomap->size = size;
116*4882a593Smuzhiyun #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
117*4882a593Smuzhiyun 	iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
118*4882a593Smuzhiyun #elif defined(pgprot_writecombine)
119*4882a593Smuzhiyun 	iomap->prot = pgprot_writecombine(PAGE_KERNEL);
120*4882a593Smuzhiyun #else
121*4882a593Smuzhiyun 	iomap->prot = pgprot_noncached(PAGE_KERNEL);
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return iomap;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static inline void
io_mapping_fini(struct io_mapping * mapping)128*4882a593Smuzhiyun io_mapping_fini(struct io_mapping *mapping)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	iounmap(mapping->iomem);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Non-atomic map/unmap */
134*4882a593Smuzhiyun static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)135*4882a593Smuzhiyun io_mapping_map_wc(struct io_mapping *mapping,
136*4882a593Smuzhiyun 		  unsigned long offset,
137*4882a593Smuzhiyun 		  unsigned long size)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return mapping->iomem + offset;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static inline void
io_mapping_unmap(void __iomem * vaddr)143*4882a593Smuzhiyun io_mapping_unmap(void __iomem *vaddr)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /* Atomic map/unmap */
148*4882a593Smuzhiyun static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)149*4882a593Smuzhiyun io_mapping_map_atomic_wc(struct io_mapping *mapping,
150*4882a593Smuzhiyun 			 unsigned long offset)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	preempt_disable();
153*4882a593Smuzhiyun 	pagefault_disable();
154*4882a593Smuzhiyun 	return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)158*4882a593Smuzhiyun io_mapping_unmap_atomic(void __iomem *vaddr)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	io_mapping_unmap(vaddr);
161*4882a593Smuzhiyun 	pagefault_enable();
162*4882a593Smuzhiyun 	preempt_enable();
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #endif /* HAVE_ATOMIC_IOMAP */
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,unsigned long size)168*4882a593Smuzhiyun io_mapping_create_wc(resource_size_t base,
169*4882a593Smuzhiyun 		     unsigned long size)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct io_mapping *iomap;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
174*4882a593Smuzhiyun 	if (!iomap)
175*4882a593Smuzhiyun 		return NULL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (!io_mapping_init_wc(iomap, base, size)) {
178*4882a593Smuzhiyun 		kfree(iomap);
179*4882a593Smuzhiyun 		return NULL;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return iomap;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun static inline void
io_mapping_free(struct io_mapping * iomap)186*4882a593Smuzhiyun io_mapping_free(struct io_mapping *iomap)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	io_mapping_fini(iomap);
189*4882a593Smuzhiyun 	kfree(iomap);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #endif /* _LINUX_IO_MAPPING_H */
193