xref: /OK3568_Linux_fs/kernel/mm/memremap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3*4882a593Smuzhiyun #include <linux/device.h>
4*4882a593Smuzhiyun #include <linux/io.h>
5*4882a593Smuzhiyun #include <linux/kasan.h>
6*4882a593Smuzhiyun #include <linux/memory_hotplug.h>
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <linux/pfn_t.h>
9*4882a593Smuzhiyun #include <linux/swap.h>
10*4882a593Smuzhiyun #include <linux/mmzone.h>
11*4882a593Smuzhiyun #include <linux/swapops.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/wait_bit.h>
14*4882a593Smuzhiyun #include <linux/xarray.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun static DEFINE_XARRAY(pgmap_array);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * The memremap() and memremap_pages() interfaces are alternately used
20*4882a593Smuzhiyun  * to map persistent memory namespaces. These interfaces place different
21*4882a593Smuzhiyun  * constraints on the alignment and size of the mapping (namespace).
22*4882a593Smuzhiyun  * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23*4882a593Smuzhiyun  * only map subsections (2MB), and at least one architecture (PowerPC)
24*4882a593Smuzhiyun  * the minimum mapping granularity of memremap_pages() is 16MB.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * The role of memremap_compat_align() is to communicate the minimum
27*4882a593Smuzhiyun  * arch supported alignment of a namespace such that it can freely
28*4882a593Smuzhiyun  * switch modes without violating the arch constraint. Namely, do not
29*4882a593Smuzhiyun  * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30*4882a593Smuzhiyun  * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
memremap_compat_align(void)33*4882a593Smuzhiyun unsigned long memremap_compat_align(void)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	return SUBSECTION_SIZE;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(memremap_compat_align);
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #ifdef CONFIG_DEV_PAGEMAP_OPS
41*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
42*4882a593Smuzhiyun EXPORT_SYMBOL(devmap_managed_key);
43*4882a593Smuzhiyun 
devmap_managed_enable_put(struct dev_pagemap * pgmap)44*4882a593Smuzhiyun static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
47*4882a593Smuzhiyun 	    pgmap->type == MEMORY_DEVICE_FS_DAX)
48*4882a593Smuzhiyun 		static_branch_dec(&devmap_managed_key);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
devmap_managed_enable_get(struct dev_pagemap * pgmap)51*4882a593Smuzhiyun static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
54*4882a593Smuzhiyun 	    pgmap->type == MEMORY_DEVICE_FS_DAX)
55*4882a593Smuzhiyun 		static_branch_inc(&devmap_managed_key);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun #else
devmap_managed_enable_get(struct dev_pagemap * pgmap)58*4882a593Smuzhiyun static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun }
devmap_managed_enable_put(struct dev_pagemap * pgmap)61*4882a593Smuzhiyun static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun #endif /* CONFIG_DEV_PAGEMAP_OPS */
65*4882a593Smuzhiyun 
pgmap_array_delete(struct range * range)66*4882a593Smuzhiyun static void pgmap_array_delete(struct range *range)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
69*4882a593Smuzhiyun 			NULL, GFP_KERNEL);
70*4882a593Smuzhiyun 	synchronize_rcu();
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
pfn_first(struct dev_pagemap * pgmap,int range_id)73*4882a593Smuzhiyun static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct range *range = &pgmap->ranges[range_id];
76*4882a593Smuzhiyun 	unsigned long pfn = PHYS_PFN(range->start);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (range_id)
79*4882a593Smuzhiyun 		return pfn;
80*4882a593Smuzhiyun 	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
pgmap_pfn_valid(struct dev_pagemap * pgmap,unsigned long pfn)83*4882a593Smuzhiyun bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	int i;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	for (i = 0; i < pgmap->nr_range; i++) {
88*4882a593Smuzhiyun 		struct range *range = &pgmap->ranges[i];
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		if (pfn >= PHYS_PFN(range->start) &&
91*4882a593Smuzhiyun 		    pfn <= PHYS_PFN(range->end))
92*4882a593Smuzhiyun 			return pfn >= pfn_first(pgmap, i);
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return false;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
pfn_end(struct dev_pagemap * pgmap,int range_id)98*4882a593Smuzhiyun static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	const struct range *range = &pgmap->ranges[range_id];
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return (range->start + range_len(range)) >> PAGE_SHIFT;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
pfn_next(unsigned long pfn)105*4882a593Smuzhiyun static unsigned long pfn_next(unsigned long pfn)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	if (pfn % 1024 == 0)
108*4882a593Smuzhiyun 		cond_resched();
109*4882a593Smuzhiyun 	return pfn + 1;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define for_each_device_pfn(pfn, map, i) \
113*4882a593Smuzhiyun 	for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
114*4882a593Smuzhiyun 
dev_pagemap_kill(struct dev_pagemap * pgmap)115*4882a593Smuzhiyun static void dev_pagemap_kill(struct dev_pagemap *pgmap)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	if (pgmap->ops && pgmap->ops->kill)
118*4882a593Smuzhiyun 		pgmap->ops->kill(pgmap);
119*4882a593Smuzhiyun 	else
120*4882a593Smuzhiyun 		percpu_ref_kill(pgmap->ref);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
dev_pagemap_cleanup(struct dev_pagemap * pgmap)123*4882a593Smuzhiyun static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	if (pgmap->ops && pgmap->ops->cleanup) {
126*4882a593Smuzhiyun 		pgmap->ops->cleanup(pgmap);
127*4882a593Smuzhiyun 	} else {
128*4882a593Smuzhiyun 		wait_for_completion(&pgmap->done);
129*4882a593Smuzhiyun 		percpu_ref_exit(pgmap->ref);
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * Undo the pgmap ref assignment for the internal case as the
133*4882a593Smuzhiyun 	 * caller may re-enable the same pgmap.
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	if (pgmap->ref == &pgmap->internal_ref)
136*4882a593Smuzhiyun 		pgmap->ref = NULL;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
pageunmap_range(struct dev_pagemap * pgmap,int range_id)139*4882a593Smuzhiyun static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct range *range = &pgmap->ranges[range_id];
142*4882a593Smuzhiyun 	struct page *first_page;
143*4882a593Smuzhiyun 	int nid;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* make sure to access a memmap that was actually initialized */
146*4882a593Smuzhiyun 	first_page = pfn_to_page(pfn_first(pgmap, range_id));
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* pages are dead and unused, undo the arch mapping */
149*4882a593Smuzhiyun 	nid = page_to_nid(first_page);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	mem_hotplug_begin();
152*4882a593Smuzhiyun 	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
153*4882a593Smuzhiyun 				   PHYS_PFN(range_len(range)));
154*4882a593Smuzhiyun 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
155*4882a593Smuzhiyun 		__remove_pages(PHYS_PFN(range->start),
156*4882a593Smuzhiyun 			       PHYS_PFN(range_len(range)), NULL);
157*4882a593Smuzhiyun 	} else {
158*4882a593Smuzhiyun 		arch_remove_memory(nid, range->start, range_len(range),
159*4882a593Smuzhiyun 				pgmap_altmap(pgmap));
160*4882a593Smuzhiyun 		kasan_remove_zero_shadow(__va(range->start), range_len(range));
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 	mem_hotplug_done();
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
165*4882a593Smuzhiyun 	pgmap_array_delete(range);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
memunmap_pages(struct dev_pagemap * pgmap)168*4882a593Smuzhiyun void memunmap_pages(struct dev_pagemap *pgmap)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	unsigned long pfn;
171*4882a593Smuzhiyun 	int i;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	dev_pagemap_kill(pgmap);
174*4882a593Smuzhiyun 	for (i = 0; i < pgmap->nr_range; i++)
175*4882a593Smuzhiyun 		for_each_device_pfn(pfn, pgmap, i)
176*4882a593Smuzhiyun 			put_page(pfn_to_page(pfn));
177*4882a593Smuzhiyun 	dev_pagemap_cleanup(pgmap);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	for (i = 0; i < pgmap->nr_range; i++)
180*4882a593Smuzhiyun 		pageunmap_range(pgmap, i);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
183*4882a593Smuzhiyun 	devmap_managed_enable_put(pgmap);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(memunmap_pages);
186*4882a593Smuzhiyun 
devm_memremap_pages_release(void * data)187*4882a593Smuzhiyun static void devm_memremap_pages_release(void *data)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	memunmap_pages(data);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
dev_pagemap_percpu_release(struct percpu_ref * ref)192*4882a593Smuzhiyun static void dev_pagemap_percpu_release(struct percpu_ref *ref)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct dev_pagemap *pgmap =
195*4882a593Smuzhiyun 		container_of(ref, struct dev_pagemap, internal_ref);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	complete(&pgmap->done);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
pagemap_range(struct dev_pagemap * pgmap,struct mhp_params * params,int range_id,int nid)200*4882a593Smuzhiyun static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
201*4882a593Smuzhiyun 		int range_id, int nid)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct range *range = &pgmap->ranges[range_id];
204*4882a593Smuzhiyun 	struct dev_pagemap *conflict_pgmap;
205*4882a593Smuzhiyun 	int error, is_ram;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
208*4882a593Smuzhiyun 				"altmap not supported for multiple ranges\n"))
209*4882a593Smuzhiyun 		return -EINVAL;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
212*4882a593Smuzhiyun 	if (conflict_pgmap) {
213*4882a593Smuzhiyun 		WARN(1, "Conflicting mapping in same section\n");
214*4882a593Smuzhiyun 		put_dev_pagemap(conflict_pgmap);
215*4882a593Smuzhiyun 		return -ENOMEM;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
219*4882a593Smuzhiyun 	if (conflict_pgmap) {
220*4882a593Smuzhiyun 		WARN(1, "Conflicting mapping in same section\n");
221*4882a593Smuzhiyun 		put_dev_pagemap(conflict_pgmap);
222*4882a593Smuzhiyun 		return -ENOMEM;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	is_ram = region_intersects(range->start, range_len(range),
226*4882a593Smuzhiyun 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (is_ram != REGION_DISJOINT) {
229*4882a593Smuzhiyun 		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
230*4882a593Smuzhiyun 				is_ram == REGION_MIXED ? "mixed" : "ram",
231*4882a593Smuzhiyun 				range->start, range->end);
232*4882a593Smuzhiyun 		return -ENXIO;
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
236*4882a593Smuzhiyun 				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
237*4882a593Smuzhiyun 	if (error)
238*4882a593Smuzhiyun 		return error;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (nid < 0)
241*4882a593Smuzhiyun 		nid = numa_mem_id();
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
244*4882a593Smuzhiyun 			range_len(range));
245*4882a593Smuzhiyun 	if (error)
246*4882a593Smuzhiyun 		goto err_pfn_remap;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	mem_hotplug_begin();
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/*
251*4882a593Smuzhiyun 	 * For device private memory we call add_pages() as we only need to
252*4882a593Smuzhiyun 	 * allocate and initialize struct page for the device memory. More-
253*4882a593Smuzhiyun 	 * over the device memory is un-accessible thus we do not want to
254*4882a593Smuzhiyun 	 * create a linear mapping for the memory like arch_add_memory()
255*4882a593Smuzhiyun 	 * would do.
256*4882a593Smuzhiyun 	 *
257*4882a593Smuzhiyun 	 * For all other device memory types, which are accessible by
258*4882a593Smuzhiyun 	 * the CPU, we do want the linear mapping and thus use
259*4882a593Smuzhiyun 	 * arch_add_memory().
260*4882a593Smuzhiyun 	 */
261*4882a593Smuzhiyun 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
262*4882a593Smuzhiyun 		error = add_pages(nid, PHYS_PFN(range->start),
263*4882a593Smuzhiyun 				PHYS_PFN(range_len(range)), params);
264*4882a593Smuzhiyun 	} else {
265*4882a593Smuzhiyun 		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
266*4882a593Smuzhiyun 		if (error) {
267*4882a593Smuzhiyun 			mem_hotplug_done();
268*4882a593Smuzhiyun 			goto err_kasan;
269*4882a593Smuzhiyun 		}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		error = arch_add_memory(nid, range->start, range_len(range),
272*4882a593Smuzhiyun 					params);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!error) {
276*4882a593Smuzhiyun 		struct zone *zone;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
279*4882a593Smuzhiyun 		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
280*4882a593Smuzhiyun 				PHYS_PFN(range_len(range)), params->altmap,
281*4882a593Smuzhiyun 				MIGRATE_MOVABLE);
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	mem_hotplug_done();
285*4882a593Smuzhiyun 	if (error)
286*4882a593Smuzhiyun 		goto err_add_memory;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/*
289*4882a593Smuzhiyun 	 * Initialization of the pages has been deferred until now in order
290*4882a593Smuzhiyun 	 * to allow us to do the work while not holding the hotplug lock.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
293*4882a593Smuzhiyun 				PHYS_PFN(range->start),
294*4882a593Smuzhiyun 				PHYS_PFN(range_len(range)), pgmap);
295*4882a593Smuzhiyun 	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
296*4882a593Smuzhiyun 			- pfn_first(pgmap, range_id));
297*4882a593Smuzhiyun 	return 0;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun err_add_memory:
300*4882a593Smuzhiyun 	kasan_remove_zero_shadow(__va(range->start), range_len(range));
301*4882a593Smuzhiyun err_kasan:
302*4882a593Smuzhiyun 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
303*4882a593Smuzhiyun err_pfn_remap:
304*4882a593Smuzhiyun 	pgmap_array_delete(range);
305*4882a593Smuzhiyun 	return error;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun  * Not device managed version of dev_memremap_pages, undone by
311*4882a593Smuzhiyun  * memunmap_pages().  Please use dev_memremap_pages if you have a struct
312*4882a593Smuzhiyun  * device available.
313*4882a593Smuzhiyun  */
memremap_pages(struct dev_pagemap * pgmap,int nid)314*4882a593Smuzhiyun void *memremap_pages(struct dev_pagemap *pgmap, int nid)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct mhp_params params = {
317*4882a593Smuzhiyun 		.altmap = pgmap_altmap(pgmap),
318*4882a593Smuzhiyun 		.pgprot = PAGE_KERNEL,
319*4882a593Smuzhiyun 	};
320*4882a593Smuzhiyun 	const int nr_range = pgmap->nr_range;
321*4882a593Smuzhiyun 	int error, i;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
324*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	switch (pgmap->type) {
327*4882a593Smuzhiyun 	case MEMORY_DEVICE_PRIVATE:
328*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
329*4882a593Smuzhiyun 			WARN(1, "Device private memory not supported\n");
330*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
331*4882a593Smuzhiyun 		}
332*4882a593Smuzhiyun 		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
333*4882a593Smuzhiyun 			WARN(1, "Missing migrate_to_ram method\n");
334*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 		if (!pgmap->ops->page_free) {
337*4882a593Smuzhiyun 			WARN(1, "Missing page_free method\n");
338*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
339*4882a593Smuzhiyun 		}
340*4882a593Smuzhiyun 		if (!pgmap->owner) {
341*4882a593Smuzhiyun 			WARN(1, "Missing owner\n");
342*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 		break;
345*4882a593Smuzhiyun 	case MEMORY_DEVICE_FS_DAX:
346*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
347*4882a593Smuzhiyun 		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
348*4882a593Smuzhiyun 			WARN(1, "File system DAX not supported\n");
349*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 		params.pgprot = pgprot_decrypted(params.pgprot);
352*4882a593Smuzhiyun 		break;
353*4882a593Smuzhiyun 	case MEMORY_DEVICE_GENERIC:
354*4882a593Smuzhiyun 		break;
355*4882a593Smuzhiyun 	case MEMORY_DEVICE_PCI_P2PDMA:
356*4882a593Smuzhiyun 		params.pgprot = pgprot_noncached(params.pgprot);
357*4882a593Smuzhiyun 		break;
358*4882a593Smuzhiyun 	default:
359*4882a593Smuzhiyun 		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
360*4882a593Smuzhiyun 		break;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!pgmap->ref) {
364*4882a593Smuzhiyun 		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
365*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		init_completion(&pgmap->done);
368*4882a593Smuzhiyun 		error = percpu_ref_init(&pgmap->internal_ref,
369*4882a593Smuzhiyun 				dev_pagemap_percpu_release, 0, GFP_KERNEL);
370*4882a593Smuzhiyun 		if (error)
371*4882a593Smuzhiyun 			return ERR_PTR(error);
372*4882a593Smuzhiyun 		pgmap->ref = &pgmap->internal_ref;
373*4882a593Smuzhiyun 	} else {
374*4882a593Smuzhiyun 		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
375*4882a593Smuzhiyun 			WARN(1, "Missing reference count teardown definition\n");
376*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	devmap_managed_enable_get(pgmap);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/*
383*4882a593Smuzhiyun 	 * Clear the pgmap nr_range as it will be incremented for each
384*4882a593Smuzhiyun 	 * successfully processed range. This communicates how many
385*4882a593Smuzhiyun 	 * regions to unwind in the abort case.
386*4882a593Smuzhiyun 	 */
387*4882a593Smuzhiyun 	pgmap->nr_range = 0;
388*4882a593Smuzhiyun 	error = 0;
389*4882a593Smuzhiyun 	for (i = 0; i < nr_range; i++) {
390*4882a593Smuzhiyun 		error = pagemap_range(pgmap, &params, i, nid);
391*4882a593Smuzhiyun 		if (error)
392*4882a593Smuzhiyun 			break;
393*4882a593Smuzhiyun 		pgmap->nr_range++;
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (i < nr_range) {
397*4882a593Smuzhiyun 		memunmap_pages(pgmap);
398*4882a593Smuzhiyun 		pgmap->nr_range = nr_range;
399*4882a593Smuzhiyun 		return ERR_PTR(error);
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return __va(pgmap->ranges[0].start);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(memremap_pages);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun /**
407*4882a593Smuzhiyun  * devm_memremap_pages - remap and provide memmap backing for the given resource
408*4882a593Smuzhiyun  * @dev: hosting device for @res
409*4882a593Smuzhiyun  * @pgmap: pointer to a struct dev_pagemap
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * Notes:
412*4882a593Smuzhiyun  * 1/ At a minimum the res and type members of @pgmap must be initialized
413*4882a593Smuzhiyun  *    by the caller before passing it to this function
414*4882a593Smuzhiyun  *
415*4882a593Smuzhiyun  * 2/ The altmap field may optionally be initialized, in which case
416*4882a593Smuzhiyun  *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * 3/ The ref field may optionally be provided, in which pgmap->ref must be
419*4882a593Smuzhiyun  *    'live' on entry and will be killed and reaped at
420*4882a593Smuzhiyun  *    devm_memremap_pages_release() time, or if this routine fails.
421*4882a593Smuzhiyun  *
422*4882a593Smuzhiyun  * 4/ range is expected to be a host memory range that could feasibly be
423*4882a593Smuzhiyun  *    treated as a "System RAM" range, i.e. not a device mmio range, but
424*4882a593Smuzhiyun  *    this is not enforced.
425*4882a593Smuzhiyun  */
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)426*4882a593Smuzhiyun void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	int error;
429*4882a593Smuzhiyun 	void *ret;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	ret = memremap_pages(pgmap, dev_to_node(dev));
432*4882a593Smuzhiyun 	if (IS_ERR(ret))
433*4882a593Smuzhiyun 		return ret;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
436*4882a593Smuzhiyun 			pgmap);
437*4882a593Smuzhiyun 	if (error)
438*4882a593Smuzhiyun 		return ERR_PTR(error);
439*4882a593Smuzhiyun 	return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_memremap_pages);
442*4882a593Smuzhiyun 
devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap)443*4882a593Smuzhiyun void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	devm_release_action(dev, devm_memremap_pages_release, pgmap);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_memunmap_pages);
448*4882a593Smuzhiyun 
vmem_altmap_offset(struct vmem_altmap * altmap)449*4882a593Smuzhiyun unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	/* number of pfns from base where pfn_to_page() is valid */
452*4882a593Smuzhiyun 	if (altmap)
453*4882a593Smuzhiyun 		return altmap->reserve + altmap->free;
454*4882a593Smuzhiyun 	return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)457*4882a593Smuzhiyun void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	altmap->alloc -= nr_pfns;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
464*4882a593Smuzhiyun  * @pfn: page frame number to lookup page_map
465*4882a593Smuzhiyun  * @pgmap: optional known pgmap that already has a reference
466*4882a593Smuzhiyun  *
467*4882a593Smuzhiyun  * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
468*4882a593Smuzhiyun  * is non-NULL but does not cover @pfn the reference to it will be released.
469*4882a593Smuzhiyun  */
get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap)470*4882a593Smuzhiyun struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
471*4882a593Smuzhiyun 		struct dev_pagemap *pgmap)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	resource_size_t phys = PFN_PHYS(pfn);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/*
476*4882a593Smuzhiyun 	 * In the cached case we're already holding a live reference.
477*4882a593Smuzhiyun 	 */
478*4882a593Smuzhiyun 	if (pgmap) {
479*4882a593Smuzhiyun 		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
480*4882a593Smuzhiyun 			return pgmap;
481*4882a593Smuzhiyun 		put_dev_pagemap(pgmap);
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* fall back to slow path lookup */
485*4882a593Smuzhiyun 	rcu_read_lock();
486*4882a593Smuzhiyun 	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
487*4882a593Smuzhiyun 	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
488*4882a593Smuzhiyun 		pgmap = NULL;
489*4882a593Smuzhiyun 	rcu_read_unlock();
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	return pgmap;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_dev_pagemap);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #ifdef CONFIG_DEV_PAGEMAP_OPS
free_devmap_managed_page(struct page * page)496*4882a593Smuzhiyun void free_devmap_managed_page(struct page *page)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	/* notify page idle for dax */
499*4882a593Smuzhiyun 	if (!is_device_private_page(page)) {
500*4882a593Smuzhiyun 		wake_up_var(&page->_refcount);
501*4882a593Smuzhiyun 		return;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	__ClearPageWaiters(page);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	mem_cgroup_uncharge(page);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * When a device_private page is freed, the page->mapping field
510*4882a593Smuzhiyun 	 * may still contain a (stale) mapping value. For example, the
511*4882a593Smuzhiyun 	 * lower bits of page->mapping may still identify the page as an
512*4882a593Smuzhiyun 	 * anonymous page. Ultimately, this entire field is just stale
513*4882a593Smuzhiyun 	 * and wrong, and it will cause errors if not cleared.  One
514*4882a593Smuzhiyun 	 * example is:
515*4882a593Smuzhiyun 	 *
516*4882a593Smuzhiyun 	 *  migrate_vma_pages()
517*4882a593Smuzhiyun 	 *    migrate_vma_insert_page()
518*4882a593Smuzhiyun 	 *      page_add_new_anon_rmap()
519*4882a593Smuzhiyun 	 *        __page_set_anon_rmap()
520*4882a593Smuzhiyun 	 *          ...checks page->mapping, via PageAnon(page) call,
521*4882a593Smuzhiyun 	 *            and incorrectly concludes that the page is an
522*4882a593Smuzhiyun 	 *            anonymous page. Therefore, it incorrectly,
523*4882a593Smuzhiyun 	 *            silently fails to set up the new anon rmap.
524*4882a593Smuzhiyun 	 *
525*4882a593Smuzhiyun 	 * For other types of ZONE_DEVICE pages, migration is either
526*4882a593Smuzhiyun 	 * handled differently or not done at all, so there is no need
527*4882a593Smuzhiyun 	 * to clear page->mapping.
528*4882a593Smuzhiyun 	 */
529*4882a593Smuzhiyun 	page->mapping = NULL;
530*4882a593Smuzhiyun 	page->pgmap->ops->page_free(page);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun #endif /* CONFIG_DEV_PAGEMAP_OPS */
533