1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_MEMREMAP_H_
3*4882a593Smuzhiyun #define _LINUX_MEMREMAP_H_
4*4882a593Smuzhiyun #include <linux/range.h>
5*4882a593Smuzhiyun #include <linux/ioport.h>
6*4882a593Smuzhiyun #include <linux/percpu-refcount.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun struct resource;
9*4882a593Smuzhiyun struct device;
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /**
12*4882a593Smuzhiyun * struct vmem_altmap - pre-allocated storage for vmemmap_populate
13*4882a593Smuzhiyun * @base_pfn: base of the entire dev_pagemap mapping
14*4882a593Smuzhiyun * @reserve: pages mapped, but reserved for driver use (relative to @base)
15*4882a593Smuzhiyun * @free: free pages set aside in the mapping for memmap storage
16*4882a593Smuzhiyun * @align: pages reserved to meet allocation alignments
17*4882a593Smuzhiyun * @alloc: track pages consumed, private to vmemmap_populate()
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun struct vmem_altmap {
20*4882a593Smuzhiyun const unsigned long base_pfn;
21*4882a593Smuzhiyun const unsigned long end_pfn;
22*4882a593Smuzhiyun const unsigned long reserve;
23*4882a593Smuzhiyun unsigned long free;
24*4882a593Smuzhiyun unsigned long align;
25*4882a593Smuzhiyun unsigned long alloc;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Specialize ZONE_DEVICE memory into multiple types each having differents
30*4882a593Smuzhiyun * usage.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * MEMORY_DEVICE_PRIVATE:
33*4882a593Smuzhiyun * Device memory that is not directly addressable by the CPU: CPU can neither
34*4882a593Smuzhiyun * read nor write private memory. In this case, we do still have struct pages
35*4882a593Smuzhiyun * backing the device memory. Doing so simplifies the implementation, but it is
36*4882a593Smuzhiyun * important to remember that there are certain points at which the struct page
37*4882a593Smuzhiyun * must be treated as an opaque object, rather than a "normal" struct page.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * A more complete discussion of unaddressable memory may be found in
40*4882a593Smuzhiyun * include/linux/hmm.h and Documentation/vm/hmm.rst.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * MEMORY_DEVICE_FS_DAX:
43*4882a593Smuzhiyun * Host memory that has similar access semantics as System RAM i.e. DMA
44*4882a593Smuzhiyun * coherent and supports page pinning. In support of coordinating page
45*4882a593Smuzhiyun * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
46*4882a593Smuzhiyun * wakeup event whenever a page is unpinned and becomes idle. This
47*4882a593Smuzhiyun * wakeup is used to coordinate physical address space management (ex:
48*4882a593Smuzhiyun * fs truncate/hole punch) vs pinned pages (ex: device dma).
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * MEMORY_DEVICE_GENERIC:
51*4882a593Smuzhiyun * Host memory that has similar access semantics as System RAM i.e. DMA
52*4882a593Smuzhiyun * coherent and supports page pinning. This is for example used by DAX devices
53*4882a593Smuzhiyun * that expose memory using a character device.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * MEMORY_DEVICE_PCI_P2PDMA:
56*4882a593Smuzhiyun * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
57*4882a593Smuzhiyun * transactions.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun enum memory_type {
60*4882a593Smuzhiyun /* 0 is reserved to catch uninitialized type fields */
61*4882a593Smuzhiyun MEMORY_DEVICE_PRIVATE = 1,
62*4882a593Smuzhiyun MEMORY_DEVICE_FS_DAX,
63*4882a593Smuzhiyun MEMORY_DEVICE_GENERIC,
64*4882a593Smuzhiyun MEMORY_DEVICE_PCI_P2PDMA,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct dev_pagemap_ops {
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Called once the page refcount reaches 1. (ZONE_DEVICE pages never
70*4882a593Smuzhiyun * reach 0 refcount unless there is a refcount bug. This allows the
71*4882a593Smuzhiyun * device driver to implement its own memory management.)
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun void (*page_free)(struct page *page);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Transition the refcount in struct dev_pagemap to the dead state.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun void (*kill)(struct dev_pagemap *pgmap);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Wait for refcount in struct dev_pagemap to be idle and reap it.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun void (*cleanup)(struct dev_pagemap *pgmap);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Used for private (un-addressable) device memory only. Must migrate
87*4882a593Smuzhiyun * the page back to a CPU accessible page.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define PGMAP_ALTMAP_VALID (1 << 0)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * struct dev_pagemap - metadata for ZONE_DEVICE mappings
96*4882a593Smuzhiyun * @altmap: pre-allocated/reserved memory for vmemmap allocations
97*4882a593Smuzhiyun * @ref: reference count that pins the devm_memremap_pages() mapping
98*4882a593Smuzhiyun * @internal_ref: internal reference if @ref is not provided by the caller
99*4882a593Smuzhiyun * @done: completion for @internal_ref
100*4882a593Smuzhiyun * @type: memory type: see MEMORY_* in memory_hotplug.h
101*4882a593Smuzhiyun * @flags: PGMAP_* flags to specify defailed behavior
102*4882a593Smuzhiyun * @ops: method table
103*4882a593Smuzhiyun * @owner: an opaque pointer identifying the entity that manages this
104*4882a593Smuzhiyun * instance. Used by various helpers to make sure that no
105*4882a593Smuzhiyun * foreign ZONE_DEVICE memory is accessed.
106*4882a593Smuzhiyun * @nr_range: number of ranges to be mapped
107*4882a593Smuzhiyun * @range: range to be mapped when nr_range == 1
108*4882a593Smuzhiyun * @ranges: array of ranges to be mapped when nr_range > 1
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun struct dev_pagemap {
111*4882a593Smuzhiyun struct vmem_altmap altmap;
112*4882a593Smuzhiyun struct percpu_ref *ref;
113*4882a593Smuzhiyun struct percpu_ref internal_ref;
114*4882a593Smuzhiyun struct completion done;
115*4882a593Smuzhiyun enum memory_type type;
116*4882a593Smuzhiyun unsigned int flags;
117*4882a593Smuzhiyun const struct dev_pagemap_ops *ops;
118*4882a593Smuzhiyun void *owner;
119*4882a593Smuzhiyun int nr_range;
120*4882a593Smuzhiyun union {
121*4882a593Smuzhiyun struct range range;
122*4882a593Smuzhiyun struct range ranges[0];
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
pgmap_altmap(struct dev_pagemap * pgmap)126*4882a593Smuzhiyun static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if (pgmap->flags & PGMAP_ALTMAP_VALID)
129*4882a593Smuzhiyun return &pgmap->altmap;
130*4882a593Smuzhiyun return NULL;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DEVICE
134*4882a593Smuzhiyun void *memremap_pages(struct dev_pagemap *pgmap, int nid);
135*4882a593Smuzhiyun void memunmap_pages(struct dev_pagemap *pgmap);
136*4882a593Smuzhiyun void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
137*4882a593Smuzhiyun void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
138*4882a593Smuzhiyun struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
139*4882a593Smuzhiyun struct dev_pagemap *pgmap);
140*4882a593Smuzhiyun bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
143*4882a593Smuzhiyun void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
144*4882a593Smuzhiyun unsigned long memremap_compat_align(void);
145*4882a593Smuzhiyun #else
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)146*4882a593Smuzhiyun static inline void *devm_memremap_pages(struct device *dev,
147*4882a593Smuzhiyun struct dev_pagemap *pgmap)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Fail attempts to call devm_memremap_pages() without
151*4882a593Smuzhiyun * ZONE_DEVICE support enabled, this requires callers to fall
152*4882a593Smuzhiyun * back to plain devm_memremap() based on config
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun WARN_ON_ONCE(1);
155*4882a593Smuzhiyun return ERR_PTR(-ENXIO);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap)158*4882a593Smuzhiyun static inline void devm_memunmap_pages(struct device *dev,
159*4882a593Smuzhiyun struct dev_pagemap *pgmap)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap)163*4882a593Smuzhiyun static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
164*4882a593Smuzhiyun struct dev_pagemap *pgmap)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun return NULL;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
pgmap_pfn_valid(struct dev_pagemap * pgmap,unsigned long pfn)169*4882a593Smuzhiyun static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return false;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
vmem_altmap_offset(struct vmem_altmap * altmap)174*4882a593Smuzhiyun static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)179*4882a593Smuzhiyun static inline void vmem_altmap_free(struct vmem_altmap *altmap,
180*4882a593Smuzhiyun unsigned long nr_pfns)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* when memremap_pages() is disabled all archs can remap a single page */
memremap_compat_align(void)185*4882a593Smuzhiyun static inline unsigned long memremap_compat_align(void)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return PAGE_SIZE;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun #endif /* CONFIG_ZONE_DEVICE */
190*4882a593Smuzhiyun
put_dev_pagemap(struct dev_pagemap * pgmap)191*4882a593Smuzhiyun static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun if (pgmap)
194*4882a593Smuzhiyun percpu_ref_put(pgmap->ref);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun #endif /* _LINUX_MEMREMAP_H_ */
198