xref: /OK3568_Linux_fs/kernel/include/linux/memory_hotplug.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __LINUX_MEMORY_HOTPLUG_H
3*4882a593Smuzhiyun #define __LINUX_MEMORY_HOTPLUG_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mmzone.h>
6*4882a593Smuzhiyun #include <linux/spinlock.h>
7*4882a593Smuzhiyun #include <linux/notifier.h>
8*4882a593Smuzhiyun #include <linux/bug.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun struct page;
11*4882a593Smuzhiyun struct zone;
12*4882a593Smuzhiyun struct pglist_data;
13*4882a593Smuzhiyun struct mem_section;
14*4882a593Smuzhiyun struct memory_block;
15*4882a593Smuzhiyun struct resource;
16*4882a593Smuzhiyun struct vmem_altmap;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Return page for the valid pfn only if the page is online. All pfn
21*4882a593Smuzhiyun  * walkers which rely on the fully initialized page->flags and others
22*4882a593Smuzhiyun  * should use this rather than pfn_valid && pfn_to_page
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #define pfn_to_online_page(pfn)					   \
25*4882a593Smuzhiyun ({								   \
26*4882a593Smuzhiyun 	struct page *___page = NULL;				   \
27*4882a593Smuzhiyun 	unsigned long ___pfn = pfn;				   \
28*4882a593Smuzhiyun 	unsigned long ___nr = pfn_to_section_nr(___pfn);	   \
29*4882a593Smuzhiyun 								   \
30*4882a593Smuzhiyun 	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31*4882a593Smuzhiyun 	    pfn_valid_within(___pfn))				   \
32*4882a593Smuzhiyun 		___page = pfn_to_page(___pfn);			   \
33*4882a593Smuzhiyun 	___page;						   \
34*4882a593Smuzhiyun })
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Types for free bootmem stored in page->lru.next. These have to be in
38*4882a593Smuzhiyun  * some random range in unsigned long space for debugging purposes.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun enum {
41*4882a593Smuzhiyun 	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42*4882a593Smuzhiyun 	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43*4882a593Smuzhiyun 	MIX_SECTION_INFO,
44*4882a593Smuzhiyun 	NODE_INFO,
45*4882a593Smuzhiyun 	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* Types for control the zone type of onlined and offlined memory */
49*4882a593Smuzhiyun enum {
50*4882a593Smuzhiyun 	/* Offline the memory. */
51*4882a593Smuzhiyun 	MMOP_OFFLINE = 0,
52*4882a593Smuzhiyun 	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
53*4882a593Smuzhiyun 	MMOP_ONLINE,
54*4882a593Smuzhiyun 	/* Online the memory to ZONE_NORMAL. */
55*4882a593Smuzhiyun 	MMOP_ONLINE_KERNEL,
56*4882a593Smuzhiyun 	/* Online the memory to ZONE_MOVABLE. */
57*4882a593Smuzhiyun 	MMOP_ONLINE_MOVABLE,
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Flags for add_memory() and friends to specify memory hotplug details. */
61*4882a593Smuzhiyun typedef int __bitwise mhp_t;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* No special request */
64*4882a593Smuzhiyun #define MHP_NONE		((__force mhp_t)0)
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * Allow merging of the added System RAM resource with adjacent,
67*4882a593Smuzhiyun  * mergeable resources. After a successful call to add_memory_resource()
68*4882a593Smuzhiyun  * with this flag set, the resource pointer must no longer be used as it
69*4882a593Smuzhiyun  * might be stale, or the resource might have changed.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun #define MEMHP_MERGE_RESOURCE	((__force mhp_t)BIT(0))
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * Extended parameters for memory hotplug:
75*4882a593Smuzhiyun  * altmap: alternative allocator for memmap array (optional)
76*4882a593Smuzhiyun  * pgprot: page protection flags to apply to newly created page tables
77*4882a593Smuzhiyun  *	(required)
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun struct mhp_params {
80*4882a593Smuzhiyun 	struct vmem_altmap *altmap;
81*4882a593Smuzhiyun 	pgprot_t pgprot;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * Zone resizing functions
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  * Note: any attempt to resize a zone should has pgdat_resize_lock()
88*4882a593Smuzhiyun  * zone_span_writelock() both held. This ensure the size of a zone
89*4882a593Smuzhiyun  * can't be changed while pgdat_resize_lock() held.
90*4882a593Smuzhiyun  */
zone_span_seqbegin(struct zone * zone)91*4882a593Smuzhiyun static inline unsigned zone_span_seqbegin(struct zone *zone)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return read_seqbegin(&zone->span_seqlock);
94*4882a593Smuzhiyun }
zone_span_seqretry(struct zone * zone,unsigned iv)95*4882a593Smuzhiyun static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	return read_seqretry(&zone->span_seqlock, iv);
98*4882a593Smuzhiyun }
zone_span_writelock(struct zone * zone)99*4882a593Smuzhiyun static inline void zone_span_writelock(struct zone *zone)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	write_seqlock(&zone->span_seqlock);
102*4882a593Smuzhiyun }
zone_span_writeunlock(struct zone * zone)103*4882a593Smuzhiyun static inline void zone_span_writeunlock(struct zone *zone)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	write_sequnlock(&zone->span_seqlock);
106*4882a593Smuzhiyun }
zone_seqlock_init(struct zone * zone)107*4882a593Smuzhiyun static inline void zone_seqlock_init(struct zone *zone)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	seqlock_init(&zone->span_seqlock);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
112*4882a593Smuzhiyun extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
113*4882a593Smuzhiyun extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
114*4882a593Smuzhiyun /* VM interface that may be used by firmware interface */
115*4882a593Smuzhiyun extern int online_pages(unsigned long pfn, unsigned long nr_pages,
116*4882a593Smuzhiyun 			int online_type, int nid);
117*4882a593Smuzhiyun extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
118*4882a593Smuzhiyun 					 unsigned long end_pfn);
119*4882a593Smuzhiyun extern void __offline_isolated_pages(unsigned long start_pfn,
120*4882a593Smuzhiyun 				     unsigned long end_pfn);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun extern void generic_online_page(struct page *page, unsigned int order);
125*4882a593Smuzhiyun extern int set_online_page_callback(online_page_callback_t callback);
126*4882a593Smuzhiyun extern int restore_online_page_callback(online_page_callback_t callback);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun extern int try_online_node(int nid);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun extern int arch_add_memory(int nid, u64 start, u64 size,
131*4882a593Smuzhiyun 			   struct mhp_params *params);
132*4882a593Smuzhiyun extern u64 max_mem_size;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun extern int memhp_online_type_from_str(const char *str);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* Default online_type (MMOP_*) when new memory blocks are added. */
137*4882a593Smuzhiyun extern int memhp_default_online_type;
138*4882a593Smuzhiyun /* If movable_node boot option specified */
139*4882a593Smuzhiyun extern bool movable_node_enabled;
movable_node_is_enabled(void)140*4882a593Smuzhiyun static inline bool movable_node_is_enabled(void)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	return movable_node_enabled;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun extern void arch_remove_memory(int nid, u64 start, u64 size,
146*4882a593Smuzhiyun 			       struct vmem_altmap *altmap);
147*4882a593Smuzhiyun extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
148*4882a593Smuzhiyun 			   struct vmem_altmap *altmap);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /* reasonably generic interface to expand the physical pages */
151*4882a593Smuzhiyun extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
152*4882a593Smuzhiyun 		       struct mhp_params *params);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)155*4882a593Smuzhiyun static inline int add_pages(int nid, unsigned long start_pfn,
156*4882a593Smuzhiyun 		unsigned long nr_pages, struct mhp_params *params)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	return __add_pages(nid, start_pfn, nr_pages, params);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun #else /* ARCH_HAS_ADD_PAGES */
161*4882a593Smuzhiyun int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
162*4882a593Smuzhiyun 	      struct mhp_params *params);
163*4882a593Smuzhiyun #endif /* ARCH_HAS_ADD_PAGES */
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun  * For supporting node-hotadd, we have to allocate a new pgdat.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * If an arch has generic style NODE_DATA(),
170*4882a593Smuzhiyun  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * In general, generic_alloc_nodedata() is used.
173*4882a593Smuzhiyun  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun extern pg_data_t *arch_alloc_nodedata(int nid);
177*4882a593Smuzhiyun extern void arch_free_nodedata(pg_data_t *pgdat);
178*4882a593Smuzhiyun extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
183*4882a593Smuzhiyun #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #ifdef CONFIG_NUMA
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
188*4882a593Smuzhiyun  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
189*4882a593Smuzhiyun  *	Because, pgdat for the new node is not allocated/initialized yet itself.
190*4882a593Smuzhiyun  *	To use new node's memory, more consideration will be necessary.
191*4882a593Smuzhiyun  */
192*4882a593Smuzhiyun #define generic_alloc_nodedata(nid)				\
193*4882a593Smuzhiyun ({								\
194*4882a593Smuzhiyun 	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
195*4882a593Smuzhiyun })
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun  * This definition is just for error path in node hotadd.
198*4882a593Smuzhiyun  * For node hotremove, we have to replace this.
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun #define generic_free_nodedata(pgdat)	kfree(pgdat)
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)203*4882a593Smuzhiyun static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	node_data[nid] = pgdat;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #else /* !CONFIG_NUMA */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* never called */
generic_alloc_nodedata(int nid)211*4882a593Smuzhiyun static inline pg_data_t *generic_alloc_nodedata(int nid)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	BUG();
214*4882a593Smuzhiyun 	return NULL;
215*4882a593Smuzhiyun }
generic_free_nodedata(pg_data_t * pgdat)216*4882a593Smuzhiyun static inline void generic_free_nodedata(pg_data_t *pgdat)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)219*4882a593Smuzhiyun static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
223*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
226*4882a593Smuzhiyun extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
227*4882a593Smuzhiyun #else
register_page_bootmem_info_node(struct pglist_data * pgdat)228*4882a593Smuzhiyun static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun #endif
232*4882a593Smuzhiyun extern void put_page_bootmem(struct page *page);
233*4882a593Smuzhiyun extern void get_page_bootmem(unsigned long ingo, struct page *page,
234*4882a593Smuzhiyun 			     unsigned long type);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun void get_online_mems(void);
237*4882a593Smuzhiyun void put_online_mems(void);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun void mem_hotplug_begin(void);
240*4882a593Smuzhiyun void mem_hotplug_done(void);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun #else /* ! CONFIG_MEMORY_HOTPLUG */
243*4882a593Smuzhiyun #define pfn_to_online_page(pfn)			\
244*4882a593Smuzhiyun ({						\
245*4882a593Smuzhiyun 	struct page *___page = NULL;		\
246*4882a593Smuzhiyun 	if (pfn_valid(pfn))			\
247*4882a593Smuzhiyun 		___page = pfn_to_page(pfn);	\
248*4882a593Smuzhiyun 	___page;				\
249*4882a593Smuzhiyun  })
250*4882a593Smuzhiyun 
zone_span_seqbegin(struct zone * zone)251*4882a593Smuzhiyun static inline unsigned zone_span_seqbegin(struct zone *zone)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun }
zone_span_seqretry(struct zone * zone,unsigned iv)255*4882a593Smuzhiyun static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	return 0;
258*4882a593Smuzhiyun }
zone_span_writelock(struct zone * zone)259*4882a593Smuzhiyun static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)260*4882a593Smuzhiyun static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)261*4882a593Smuzhiyun static inline void zone_seqlock_init(struct zone *zone) {}
262*4882a593Smuzhiyun 
register_page_bootmem_info_node(struct pglist_data * pgdat)263*4882a593Smuzhiyun static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
try_online_node(int nid)267*4882a593Smuzhiyun static inline int try_online_node(int nid)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	return 0;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
get_online_mems(void)272*4882a593Smuzhiyun static inline void get_online_mems(void) {}
put_online_mems(void)273*4882a593Smuzhiyun static inline void put_online_mems(void) {}
274*4882a593Smuzhiyun 
mem_hotplug_begin(void)275*4882a593Smuzhiyun static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)276*4882a593Smuzhiyun static inline void mem_hotplug_done(void) {}
277*4882a593Smuzhiyun 
movable_node_is_enabled(void)278*4882a593Smuzhiyun static inline bool movable_node_is_enabled(void)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return false;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun #endif /* ! CONFIG_MEMORY_HOTPLUG */
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun  * pgdat resizing functions
287*4882a593Smuzhiyun  */
288*4882a593Smuzhiyun static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)289*4882a593Smuzhiyun void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)294*4882a593Smuzhiyun void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun static inline
pgdat_resize_init(struct pglist_data * pgdat)299*4882a593Smuzhiyun void pgdat_resize_init(struct pglist_data *pgdat)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	spin_lock_init(&pgdat->node_size_lock);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * Stub functions for when hotplug is off
306*4882a593Smuzhiyun  */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)307*4882a593Smuzhiyun static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)308*4882a593Smuzhiyun static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)309*4882a593Smuzhiyun static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
310*4882a593Smuzhiyun #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTREMOVE
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun extern void try_offline_node(int nid);
315*4882a593Smuzhiyun extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
316*4882a593Smuzhiyun extern int remove_memory(int nid, u64 start, u64 size);
317*4882a593Smuzhiyun extern int remove_memory_subsection(int nid, u64 start, u64 size);
318*4882a593Smuzhiyun extern void __remove_memory(int nid, u64 start, u64 size);
319*4882a593Smuzhiyun extern int offline_and_remove_memory(int nid, u64 start, u64 size);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun #else
try_offline_node(int nid)322*4882a593Smuzhiyun static inline void try_offline_node(int nid) {}
323*4882a593Smuzhiyun 
offline_pages(unsigned long start_pfn,unsigned long nr_pages)324*4882a593Smuzhiyun static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	return -EINVAL;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
remove_memory(int nid,u64 start,u64 size)329*4882a593Smuzhiyun static inline int remove_memory(int nid, u64 start, u64 size)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return -EBUSY;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
__remove_memory(int nid,u64 start,u64 size)334*4882a593Smuzhiyun static inline void __remove_memory(int nid, u64 start, u64 size) {}
335*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTREMOVE */
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun extern void set_zone_contiguous(struct zone *zone);
338*4882a593Smuzhiyun extern void clear_zone_contiguous(struct zone *zone);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
341*4882a593Smuzhiyun extern void __ref free_area_init_core_hotplug(int nid);
342*4882a593Smuzhiyun extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
343*4882a593Smuzhiyun extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
344*4882a593Smuzhiyun extern int add_memory_subsection(int nid, u64 start, u64 size);
345*4882a593Smuzhiyun extern int add_memory_resource(int nid, struct resource *resource,
346*4882a593Smuzhiyun 			       mhp_t mhp_flags);
347*4882a593Smuzhiyun extern int add_memory_driver_managed(int nid, u64 start, u64 size,
348*4882a593Smuzhiyun 				     const char *resource_name,
349*4882a593Smuzhiyun 				     mhp_t mhp_flags);
350*4882a593Smuzhiyun extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
351*4882a593Smuzhiyun 				   unsigned long nr_pages,
352*4882a593Smuzhiyun 				   struct vmem_altmap *altmap, int migratetype);
353*4882a593Smuzhiyun extern void remove_pfn_range_from_zone(struct zone *zone,
354*4882a593Smuzhiyun 				       unsigned long start_pfn,
355*4882a593Smuzhiyun 				       unsigned long nr_pages);
356*4882a593Smuzhiyun extern bool is_memblock_offlined(struct memory_block *mem);
357*4882a593Smuzhiyun extern int sparse_add_section(int nid, unsigned long pfn,
358*4882a593Smuzhiyun 		unsigned long nr_pages, struct vmem_altmap *altmap);
359*4882a593Smuzhiyun extern void sparse_remove_section(struct mem_section *ms,
360*4882a593Smuzhiyun 		unsigned long pfn, unsigned long nr_pages,
361*4882a593Smuzhiyun 		unsigned long map_offset, struct vmem_altmap *altmap);
362*4882a593Smuzhiyun extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
363*4882a593Smuzhiyun 					  unsigned long pnum);
364*4882a593Smuzhiyun extern struct zone *zone_for_pfn_range(int online_type, int nid,
365*4882a593Smuzhiyun 		unsigned long start_pfn, unsigned long nr_pages);
366*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun #endif /* __LINUX_MEMORY_HOTPLUG_H */
369