xref: /rk3399_rockchip-uboot/include/asm-generic/io.h (revision f35264bc648ac8bfbbaa780a977ae5dc11f53296)
1*f35264bcSPaul Burton /*
2*f35264bcSPaul Burton  * Generic I/O functions.
3*f35264bcSPaul Burton  *
4*f35264bcSPaul Burton  * Copyright (c) 2016 Imagination Technologies Ltd.
5*f35264bcSPaul Burton  *
6*f35264bcSPaul Burton  * SPDX-License-Identifier:	GPL-2.0+
7*f35264bcSPaul Burton  */
8*f35264bcSPaul Burton 
9*f35264bcSPaul Burton #ifndef __ASM_GENERIC_IO_H__
10*f35264bcSPaul Burton #define __ASM_GENERIC_IO_H__
11*f35264bcSPaul Burton 
12*f35264bcSPaul Burton /*
13*f35264bcSPaul Burton  * This file should be included at the end of each architecture-specific
14*f35264bcSPaul Burton  * asm/io.h such that we may provide generic implementations without
15*f35264bcSPaul Burton  * conflicting with architecture-specific code.
16*f35264bcSPaul Burton  */
17*f35264bcSPaul Burton 
18*f35264bcSPaul Burton #ifndef __ASSEMBLY__
19*f35264bcSPaul Burton 
20*f35264bcSPaul Burton /**
21*f35264bcSPaul Burton  * phys_to_virt() - Return a virtual address mapped to a given physical address
22*f35264bcSPaul Burton  * @paddr: the physical address
23*f35264bcSPaul Burton  *
24*f35264bcSPaul Burton  * Returns a virtual address which the CPU can access that maps to the physical
25*f35264bcSPaul Burton  * address @paddr. This should only be used where it is known that no dynamic
26*f35264bcSPaul Burton  * mapping is required. In general, map_physmem should be used instead.
27*f35264bcSPaul Burton  *
28*f35264bcSPaul Burton  * Returns: a virtual address which maps to @paddr
29*f35264bcSPaul Burton  */
30*f35264bcSPaul Burton #ifndef phys_to_virt
phys_to_virt(phys_addr_t paddr)31*f35264bcSPaul Burton static inline void *phys_to_virt(phys_addr_t paddr)
32*f35264bcSPaul Burton {
33*f35264bcSPaul Burton 	return (void *)(unsigned long)paddr;
34*f35264bcSPaul Burton }
35*f35264bcSPaul Burton #endif
36*f35264bcSPaul Burton 
37*f35264bcSPaul Burton /**
38*f35264bcSPaul Burton  * virt_to_phys() - Return the physical address that a virtual address maps to
39*f35264bcSPaul Burton  * @vaddr: the virtual address
40*f35264bcSPaul Burton  *
41*f35264bcSPaul Burton  * Returns the physical address which the CPU-accessible virtual address @vaddr
42*f35264bcSPaul Burton  * maps to.
43*f35264bcSPaul Burton  *
44*f35264bcSPaul Burton  * Returns: the physical address which @vaddr maps to
45*f35264bcSPaul Burton  */
46*f35264bcSPaul Burton #ifndef virt_to_phys
virt_to_phys(void * vaddr)47*f35264bcSPaul Burton static inline phys_addr_t virt_to_phys(void *vaddr)
48*f35264bcSPaul Burton {
49*f35264bcSPaul Burton 	return (phys_addr_t)((unsigned long)vaddr);
50*f35264bcSPaul Burton }
51*f35264bcSPaul Burton #endif
52*f35264bcSPaul Burton 
53*f35264bcSPaul Burton /*
54*f35264bcSPaul Burton  * Flags for use with map_physmem() & unmap_physmem(). Architectures need not
55*f35264bcSPaul Burton  * support all of these, in which case they will be defined as zero here &
56*f35264bcSPaul Burton  * ignored. Callers that may run on multiple architectures should therefore
57*f35264bcSPaul Burton  * treat them as hints rather than requirements.
58*f35264bcSPaul Burton  */
59*f35264bcSPaul Burton #ifndef MAP_NOCACHE
60*f35264bcSPaul Burton # define MAP_NOCACHE	0	/* Produce an uncached mapping */
61*f35264bcSPaul Burton #endif
62*f35264bcSPaul Burton #ifndef MAP_WRCOMBINE
63*f35264bcSPaul Burton # define MAP_WRCOMBINE	0	/* Allow write-combining on the mapping */
64*f35264bcSPaul Burton #endif
65*f35264bcSPaul Burton #ifndef MAP_WRBACK
66*f35264bcSPaul Burton # define MAP_WRBACK	0	/* Map using write-back caching */
67*f35264bcSPaul Burton #endif
68*f35264bcSPaul Burton #ifndef MAP_WRTHROUGH
69*f35264bcSPaul Burton # define MAP_WRTHROUGH	0	/* Map using write-through caching */
70*f35264bcSPaul Burton #endif
71*f35264bcSPaul Burton 
72*f35264bcSPaul Burton /**
73*f35264bcSPaul Burton  * map_physmem() - Return a virtual address mapped to a given physical address
74*f35264bcSPaul Burton  * @paddr: the physical address
75*f35264bcSPaul Burton  * @len: the length of the required mapping
76*f35264bcSPaul Burton  * @flags: flags affecting the type of mapping
77*f35264bcSPaul Burton  *
78*f35264bcSPaul Burton  * Return a virtual address through which the CPU may access the memory at
79*f35264bcSPaul Burton  * physical address @paddr. The mapping will be valid for at least @len bytes,
80*f35264bcSPaul Burton  * and may be affected by flags passed to the @flags argument. This function
81*f35264bcSPaul Burton  * may create new mappings, so should generally be paired with a matching call
82*f35264bcSPaul Burton  * to unmap_physmem once the caller is finished with the memory in question.
83*f35264bcSPaul Burton  *
84*f35264bcSPaul Burton  * Returns: a virtual address suitably mapped to @paddr
85*f35264bcSPaul Burton  */
86*f35264bcSPaul Burton #ifndef map_physmem
map_physmem(phys_addr_t paddr,unsigned long len,unsigned long flags)87*f35264bcSPaul Burton static inline void *map_physmem(phys_addr_t paddr, unsigned long len,
88*f35264bcSPaul Burton 				unsigned long flags)
89*f35264bcSPaul Burton {
90*f35264bcSPaul Burton 	return phys_to_virt(paddr);
91*f35264bcSPaul Burton }
92*f35264bcSPaul Burton #endif
93*f35264bcSPaul Burton 
94*f35264bcSPaul Burton /**
95*f35264bcSPaul Burton  * unmap_physmem() - Remove mappings created by a prior call to map_physmem()
96*f35264bcSPaul Burton  * @vaddr: the virtual address which map_physmem() previously returned
97*f35264bcSPaul Burton  * @flags: flags matching those originally passed to map_physmem()
98*f35264bcSPaul Burton  *
99*f35264bcSPaul Burton  * Unmap memory which was previously mapped by a call to map_physmem(). If
100*f35264bcSPaul Burton  * map_physmem() dynamically created a mapping for the memory in question then
101*f35264bcSPaul Burton  * unmap_physmem() will remove that mapping.
102*f35264bcSPaul Burton  */
103*f35264bcSPaul Burton #ifndef unmap_physmem
unmap_physmem(void * vaddr,unsigned long flags)104*f35264bcSPaul Burton static inline void unmap_physmem(void *vaddr, unsigned long flags)
105*f35264bcSPaul Burton {
106*f35264bcSPaul Burton }
107*f35264bcSPaul Burton #endif
108*f35264bcSPaul Burton 
109*f35264bcSPaul Burton #endif /* !__ASSEMBLY__ */
110*f35264bcSPaul Burton #endif /* __ASM_GENERIC_IO_H__ */
111