1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Generic I/O functions.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2016 Imagination Technologies Ltd.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __ASM_GENERIC_IO_H__
10*4882a593Smuzhiyun #define __ASM_GENERIC_IO_H__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * This file should be included at the end of each architecture-specific
14*4882a593Smuzhiyun * asm/io.h such that we may provide generic implementations without
15*4882a593Smuzhiyun * conflicting with architecture-specific code.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #ifndef __ASSEMBLY__
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * phys_to_virt() - Return a virtual address mapped to a given physical address
22*4882a593Smuzhiyun * @paddr: the physical address
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Returns a virtual address which the CPU can access that maps to the physical
25*4882a593Smuzhiyun * address @paddr. This should only be used where it is known that no dynamic
26*4882a593Smuzhiyun * mapping is required. In general, map_physmem should be used instead.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Returns: a virtual address which maps to @paddr
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun #ifndef phys_to_virt
phys_to_virt(phys_addr_t paddr)31*4882a593Smuzhiyun static inline void *phys_to_virt(phys_addr_t paddr)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun return (void *)(unsigned long)paddr;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /**
38*4882a593Smuzhiyun * virt_to_phys() - Return the physical address that a virtual address maps to
39*4882a593Smuzhiyun * @vaddr: the virtual address
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Returns the physical address which the CPU-accessible virtual address @vaddr
42*4882a593Smuzhiyun * maps to.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * Returns: the physical address which @vaddr maps to
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun #ifndef virt_to_phys
virt_to_phys(void * vaddr)47*4882a593Smuzhiyun static inline phys_addr_t virt_to_phys(void *vaddr)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return (phys_addr_t)((unsigned long)vaddr);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Flags for use with map_physmem() & unmap_physmem(). Architectures need not
55*4882a593Smuzhiyun * support all of these, in which case they will be defined as zero here &
56*4882a593Smuzhiyun * ignored. Callers that may run on multiple architectures should therefore
57*4882a593Smuzhiyun * treat them as hints rather than requirements.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #ifndef MAP_NOCACHE
60*4882a593Smuzhiyun # define MAP_NOCACHE 0 /* Produce an uncached mapping */
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun #ifndef MAP_WRCOMBINE
63*4882a593Smuzhiyun # define MAP_WRCOMBINE 0 /* Allow write-combining on the mapping */
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun #ifndef MAP_WRBACK
66*4882a593Smuzhiyun # define MAP_WRBACK 0 /* Map using write-back caching */
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun #ifndef MAP_WRTHROUGH
69*4882a593Smuzhiyun # define MAP_WRTHROUGH 0 /* Map using write-through caching */
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * map_physmem() - Return a virtual address mapped to a given physical address
74*4882a593Smuzhiyun * @paddr: the physical address
75*4882a593Smuzhiyun * @len: the length of the required mapping
76*4882a593Smuzhiyun * @flags: flags affecting the type of mapping
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Return a virtual address through which the CPU may access the memory at
79*4882a593Smuzhiyun * physical address @paddr. The mapping will be valid for at least @len bytes,
80*4882a593Smuzhiyun * and may be affected by flags passed to the @flags argument. This function
81*4882a593Smuzhiyun * may create new mappings, so should generally be paired with a matching call
82*4882a593Smuzhiyun * to unmap_physmem once the caller is finished with the memory in question.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Returns: a virtual address suitably mapped to @paddr
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #ifndef map_physmem
map_physmem(phys_addr_t paddr,unsigned long len,unsigned long flags)87*4882a593Smuzhiyun static inline void *map_physmem(phys_addr_t paddr, unsigned long len,
88*4882a593Smuzhiyun unsigned long flags)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return phys_to_virt(paddr);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * unmap_physmem() - Remove mappings created by a prior call to map_physmem()
96*4882a593Smuzhiyun * @vaddr: the virtual address which map_physmem() previously returned
97*4882a593Smuzhiyun * @flags: flags matching those originally passed to map_physmem()
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * Unmap memory which was previously mapped by a call to map_physmem(). If
100*4882a593Smuzhiyun * map_physmem() dynamically created a mapping for the memory in question then
101*4882a593Smuzhiyun * unmap_physmem() will remove that mapping.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun #ifndef unmap_physmem
unmap_physmem(void * vaddr,unsigned long flags)104*4882a593Smuzhiyun static inline void unmap_physmem(void *vaddr, unsigned long flags)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
110*4882a593Smuzhiyun #endif /* __ASM_GENERIC_IO_H__ */
111