1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_MIGRATE_H
3*4882a593Smuzhiyun #define _LINUX_MIGRATE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/mempolicy.h>
7*4882a593Smuzhiyun #include <linux/migrate_mode.h>
8*4882a593Smuzhiyun #include <linux/hugetlb.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun typedef struct page *new_page_t(struct page *page, unsigned long private);
11*4882a593Smuzhiyun typedef void free_page_t(struct page *page, unsigned long private);
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun struct migration_target_control;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * Return values from addresss_space_operations.migratepage():
17*4882a593Smuzhiyun * - negative errno on page migration failure;
18*4882a593Smuzhiyun * - zero on page migration success;
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun #define MIGRATEPAGE_SUCCESS 0
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun enum migrate_reason {
23*4882a593Smuzhiyun MR_COMPACTION,
24*4882a593Smuzhiyun MR_MEMORY_FAILURE,
25*4882a593Smuzhiyun MR_MEMORY_HOTPLUG,
26*4882a593Smuzhiyun MR_SYSCALL, /* also applies to cpusets */
27*4882a593Smuzhiyun MR_MEMPOLICY_MBIND,
28*4882a593Smuzhiyun MR_NUMA_MISPLACED,
29*4882a593Smuzhiyun MR_CONTIG_RANGE,
30*4882a593Smuzhiyun MR_TYPES
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
34*4882a593Smuzhiyun extern const char *migrate_reason_names[MR_TYPES];
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_MIGRATION
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun extern void putback_movable_pages(struct list_head *l);
39*4882a593Smuzhiyun extern int migrate_page(struct address_space *mapping,
40*4882a593Smuzhiyun struct page *newpage, struct page *page,
41*4882a593Smuzhiyun enum migrate_mode mode);
42*4882a593Smuzhiyun extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
43*4882a593Smuzhiyun unsigned long private, enum migrate_mode mode, int reason);
44*4882a593Smuzhiyun extern struct page *alloc_migration_target(struct page *page, unsigned long private);
45*4882a593Smuzhiyun extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
46*4882a593Smuzhiyun extern void putback_movable_page(struct page *page);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun extern void migrate_page_states(struct page *newpage, struct page *page);
49*4882a593Smuzhiyun extern void migrate_page_copy(struct page *newpage, struct page *page);
50*4882a593Smuzhiyun extern int migrate_huge_page_move_mapping(struct address_space *mapping,
51*4882a593Smuzhiyun struct page *newpage, struct page *page);
52*4882a593Smuzhiyun extern int migrate_page_move_mapping(struct address_space *mapping,
53*4882a593Smuzhiyun struct page *newpage, struct page *page, int extra_count);
54*4882a593Smuzhiyun #else
55*4882a593Smuzhiyun
putback_movable_pages(struct list_head * l)56*4882a593Smuzhiyun static inline void putback_movable_pages(struct list_head *l) {}
migrate_pages(struct list_head * l,new_page_t new,free_page_t free,unsigned long private,enum migrate_mode mode,int reason)57*4882a593Smuzhiyun static inline int migrate_pages(struct list_head *l, new_page_t new,
58*4882a593Smuzhiyun free_page_t free, unsigned long private, enum migrate_mode mode,
59*4882a593Smuzhiyun int reason)
60*4882a593Smuzhiyun { return -ENOSYS; }
alloc_migration_target(struct page * page,unsigned long private)61*4882a593Smuzhiyun static inline struct page *alloc_migration_target(struct page *page,
62*4882a593Smuzhiyun unsigned long private)
63*4882a593Smuzhiyun { return NULL; }
isolate_movable_page(struct page * page,isolate_mode_t mode)64*4882a593Smuzhiyun static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
65*4882a593Smuzhiyun { return -EBUSY; }
66*4882a593Smuzhiyun
migrate_page_states(struct page * newpage,struct page * page)67*4882a593Smuzhiyun static inline void migrate_page_states(struct page *newpage, struct page *page)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
migrate_page_copy(struct page * newpage,struct page * page)71*4882a593Smuzhiyun static inline void migrate_page_copy(struct page *newpage,
72*4882a593Smuzhiyun struct page *page) {}
73*4882a593Smuzhiyun
migrate_huge_page_move_mapping(struct address_space * mapping,struct page * newpage,struct page * page)74*4882a593Smuzhiyun static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
75*4882a593Smuzhiyun struct page *newpage, struct page *page)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return -ENOSYS;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #endif /* CONFIG_MIGRATION */
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
83*4882a593Smuzhiyun extern int PageMovable(struct page *page);
84*4882a593Smuzhiyun extern void __SetPageMovable(struct page *page, struct address_space *mapping);
85*4882a593Smuzhiyun extern void __ClearPageMovable(struct page *page);
86*4882a593Smuzhiyun #else
PageMovable(struct page * page)87*4882a593Smuzhiyun static inline int PageMovable(struct page *page) { return 0; };
__SetPageMovable(struct page * page,struct address_space * mapping)88*4882a593Smuzhiyun static inline void __SetPageMovable(struct page *page,
89*4882a593Smuzhiyun struct address_space *mapping)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun }
__ClearPageMovable(struct page * page)92*4882a593Smuzhiyun static inline void __ClearPageMovable(struct page *page)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
98*4882a593Smuzhiyun extern bool pmd_trans_migrating(pmd_t pmd);
99*4882a593Smuzhiyun extern int migrate_misplaced_page(struct page *page,
100*4882a593Smuzhiyun struct vm_fault *vmf, int node);
101*4882a593Smuzhiyun #else
pmd_trans_migrating(pmd_t pmd)102*4882a593Smuzhiyun static inline bool pmd_trans_migrating(pmd_t pmd)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return false;
105*4882a593Smuzhiyun }
migrate_misplaced_page(struct page * page,struct vm_fault * vmf,int node)106*4882a593Smuzhiyun static inline int migrate_misplaced_page(struct page *page,
107*4882a593Smuzhiyun struct vm_fault *vmf, int node)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return -EAGAIN; /* can't migrate now */
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING */
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
114*4882a593Smuzhiyun extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
115*4882a593Smuzhiyun struct vm_area_struct *vma,
116*4882a593Smuzhiyun pmd_t *pmd, pmd_t entry,
117*4882a593Smuzhiyun unsigned long address,
118*4882a593Smuzhiyun struct page *page, int node);
119*4882a593Smuzhiyun #else
migrate_misplaced_transhuge_page(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t * pmd,pmd_t entry,unsigned long address,struct page * page,int node)120*4882a593Smuzhiyun static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
121*4882a593Smuzhiyun struct vm_area_struct *vma,
122*4882a593Smuzhiyun pmd_t *pmd, pmd_t entry,
123*4882a593Smuzhiyun unsigned long address,
124*4882a593Smuzhiyun struct page *page, int node)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return -EAGAIN;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #ifdef CONFIG_MIGRATION
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Watch out for PAE architecture, which has an unsigned long, and might not
135*4882a593Smuzhiyun * have enough bits to store all physical address and flags. So far we have
136*4882a593Smuzhiyun * enough room for all our flags.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun #define MIGRATE_PFN_VALID (1UL << 0)
139*4882a593Smuzhiyun #define MIGRATE_PFN_MIGRATE (1UL << 1)
140*4882a593Smuzhiyun #define MIGRATE_PFN_LOCKED (1UL << 2)
141*4882a593Smuzhiyun #define MIGRATE_PFN_WRITE (1UL << 3)
142*4882a593Smuzhiyun #define MIGRATE_PFN_SHIFT 6
143*4882a593Smuzhiyun
migrate_pfn_to_page(unsigned long mpfn)144*4882a593Smuzhiyun static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun if (!(mpfn & MIGRATE_PFN_VALID))
147*4882a593Smuzhiyun return NULL;
148*4882a593Smuzhiyun return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
migrate_pfn(unsigned long pfn)151*4882a593Smuzhiyun static inline unsigned long migrate_pfn(unsigned long pfn)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun enum migrate_vma_direction {
157*4882a593Smuzhiyun MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
158*4882a593Smuzhiyun MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun struct migrate_vma {
162*4882a593Smuzhiyun struct vm_area_struct *vma;
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Both src and dst array must be big enough for
165*4882a593Smuzhiyun * (end - start) >> PAGE_SHIFT entries.
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * The src array must not be modified by the caller after
168*4882a593Smuzhiyun * migrate_vma_setup(), and must not change the dst array after
169*4882a593Smuzhiyun * migrate_vma_pages() returns.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun unsigned long *dst;
172*4882a593Smuzhiyun unsigned long *src;
173*4882a593Smuzhiyun unsigned long cpages;
174*4882a593Smuzhiyun unsigned long npages;
175*4882a593Smuzhiyun unsigned long start;
176*4882a593Smuzhiyun unsigned long end;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Set to the owner value also stored in page->pgmap->owner for
180*4882a593Smuzhiyun * migrating out of device private memory. The flags also need to
181*4882a593Smuzhiyun * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
182*4882a593Smuzhiyun * The caller should always set this field when using mmu notifier
183*4882a593Smuzhiyun * callbacks to avoid device MMU invalidations for device private
184*4882a593Smuzhiyun * pages that are not being migrated.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun void *pgmap_owner;
187*4882a593Smuzhiyun unsigned long flags;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun int migrate_vma_setup(struct migrate_vma *args);
191*4882a593Smuzhiyun void migrate_vma_pages(struct migrate_vma *migrate);
192*4882a593Smuzhiyun void migrate_vma_finalize(struct migrate_vma *migrate);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #endif /* CONFIG_MIGRATION */
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #endif /* _LINUX_MIGRATE_H */
197