xref: /OK3568_Linux_fs/kernel/mm/swap_slots.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Manage cache of swap slots to be used for and returned from
4*4882a593Smuzhiyun  * swap.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright(c) 2016 Intel Corporation.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Tim Chen <tim.c.chen@linux.intel.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * We allocate the swap slots from the global pool and put
11*4882a593Smuzhiyun  * it into local per cpu caches.  This has the advantage
12*4882a593Smuzhiyun  * of no needing to acquire the swap_info lock every time
13*4882a593Smuzhiyun  * we need a new slot.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * There is also opportunity to simply return the slot
16*4882a593Smuzhiyun  * to local caches without needing to acquire swap_info
17*4882a593Smuzhiyun  * lock.  We do not reuse the returned slots directly but
18*4882a593Smuzhiyun  * move them back to the global pool in a batch.  This
19*4882a593Smuzhiyun  * allows the slots to coaellesce and reduce fragmentation.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The swap entry allocated is marked with SWAP_HAS_CACHE
22*4882a593Smuzhiyun  * flag in map_count that prevents it from being allocated
23*4882a593Smuzhiyun  * again from the global pool.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * The swap slots cache is protected by a mutex instead of
26*4882a593Smuzhiyun  * a spin lock as when we search for slots with scan_swap_map,
27*4882a593Smuzhiyun  * we can possibly sleep.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/swap_slots.h>
31*4882a593Smuzhiyun #include <linux/cpu.h>
32*4882a593Smuzhiyun #include <linux/cpumask.h>
33*4882a593Smuzhiyun #include <linux/vmalloc.h>
34*4882a593Smuzhiyun #include <linux/mutex.h>
35*4882a593Smuzhiyun #include <linux/mm.h>
36*4882a593Smuzhiyun #include <trace/hooks/mm.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
39*4882a593Smuzhiyun static bool	swap_slot_cache_active;
40*4882a593Smuzhiyun bool	swap_slot_cache_enabled;
41*4882a593Smuzhiyun static bool	swap_slot_cache_initialized;
42*4882a593Smuzhiyun static DEFINE_MUTEX(swap_slots_cache_mutex);
43*4882a593Smuzhiyun /* Serialize swap slots cache enable/disable operations */
44*4882a593Smuzhiyun static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static void __drain_swap_slots_cache(unsigned int type);
47*4882a593Smuzhiyun static void deactivate_swap_slots_cache(void);
48*4882a593Smuzhiyun static void reactivate_swap_slots_cache(void);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
51*4882a593Smuzhiyun #define SLOTS_CACHE 0x1
52*4882a593Smuzhiyun #define SLOTS_CACHE_RET 0x2
53*4882a593Smuzhiyun 
deactivate_swap_slots_cache(void)54*4882a593Smuzhiyun static void deactivate_swap_slots_cache(void)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_mutex);
57*4882a593Smuzhiyun 	swap_slot_cache_active = false;
58*4882a593Smuzhiyun 	trace_android_vh_swap_slot_cache_active(false);
59*4882a593Smuzhiyun 	__drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
60*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_mutex);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
reactivate_swap_slots_cache(void)63*4882a593Smuzhiyun static void reactivate_swap_slots_cache(void)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_mutex);
66*4882a593Smuzhiyun 	swap_slot_cache_active = true;
67*4882a593Smuzhiyun 	trace_android_vh_swap_slot_cache_active(true);
68*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_mutex);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* Must not be called with cpu hot plug lock */
disable_swap_slots_cache_lock(void)72*4882a593Smuzhiyun void disable_swap_slots_cache_lock(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_enable_mutex);
75*4882a593Smuzhiyun 	swap_slot_cache_enabled = false;
76*4882a593Smuzhiyun 	if (swap_slot_cache_initialized) {
77*4882a593Smuzhiyun 		/* serialize with cpu hotplug operations */
78*4882a593Smuzhiyun 		get_online_cpus();
79*4882a593Smuzhiyun 		__drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
80*4882a593Smuzhiyun 		put_online_cpus();
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
__reenable_swap_slots_cache(void)84*4882a593Smuzhiyun static void __reenable_swap_slots_cache(void)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	swap_slot_cache_enabled = has_usable_swap();
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
reenable_swap_slots_cache_unlock(void)89*4882a593Smuzhiyun void reenable_swap_slots_cache_unlock(void)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	__reenable_swap_slots_cache();
92*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_enable_mutex);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
is_swap_slot_cache_enabled(void)95*4882a593Smuzhiyun bool is_swap_slot_cache_enabled(void)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	return swap_slot_cache_enabled;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(is_swap_slot_cache_enabled);
100*4882a593Smuzhiyun 
check_cache_active(void)101*4882a593Smuzhiyun bool check_cache_active(void)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	long pages;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (!swap_slot_cache_enabled)
106*4882a593Smuzhiyun 		return false;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	pages = get_nr_swap_pages();
109*4882a593Smuzhiyun 	if (!swap_slot_cache_active) {
110*4882a593Smuzhiyun 		if (pages > num_online_cpus() *
111*4882a593Smuzhiyun 		    THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
112*4882a593Smuzhiyun 			reactivate_swap_slots_cache();
113*4882a593Smuzhiyun 		goto out;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* if global pool of slot caches too low, deactivate cache */
117*4882a593Smuzhiyun 	if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
118*4882a593Smuzhiyun 		deactivate_swap_slots_cache();
119*4882a593Smuzhiyun out:
120*4882a593Smuzhiyun 	return swap_slot_cache_active;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(check_cache_active);
123*4882a593Smuzhiyun 
alloc_swap_slot_cache(unsigned int cpu)124*4882a593Smuzhiyun static int alloc_swap_slot_cache(unsigned int cpu)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct swap_slots_cache *cache;
127*4882a593Smuzhiyun 	swp_entry_t *slots, *slots_ret;
128*4882a593Smuzhiyun 	bool skip = false;
129*4882a593Smuzhiyun 	int ret = 0;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * Do allocation outside swap_slots_cache_mutex
133*4882a593Smuzhiyun 	 * as kvzalloc could trigger reclaim and get_swap_page,
134*4882a593Smuzhiyun 	 * which can lock swap_slots_cache_mutex.
135*4882a593Smuzhiyun 	 */
136*4882a593Smuzhiyun 	trace_android_rvh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu),
137*4882a593Smuzhiyun 		&ret, &skip);
138*4882a593Smuzhiyun 	trace_android_vh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu),
139*4882a593Smuzhiyun 		&ret, &skip);
140*4882a593Smuzhiyun 	if (skip)
141*4882a593Smuzhiyun 		return ret;
142*4882a593Smuzhiyun 	slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
143*4882a593Smuzhiyun 			 GFP_KERNEL);
144*4882a593Smuzhiyun 	if (!slots)
145*4882a593Smuzhiyun 		return -ENOMEM;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
148*4882a593Smuzhiyun 			     GFP_KERNEL);
149*4882a593Smuzhiyun 	if (!slots_ret) {
150*4882a593Smuzhiyun 		kvfree(slots);
151*4882a593Smuzhiyun 		return -ENOMEM;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_mutex);
155*4882a593Smuzhiyun 	cache = &per_cpu(swp_slots, cpu);
156*4882a593Smuzhiyun 	if (cache->slots || cache->slots_ret) {
157*4882a593Smuzhiyun 		/* cache already allocated */
158*4882a593Smuzhiyun 		mutex_unlock(&swap_slots_cache_mutex);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		kvfree(slots);
161*4882a593Smuzhiyun 		kvfree(slots_ret);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		return 0;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (!cache->lock_initialized) {
167*4882a593Smuzhiyun 		mutex_init(&cache->alloc_lock);
168*4882a593Smuzhiyun 		spin_lock_init(&cache->free_lock);
169*4882a593Smuzhiyun 		cache->lock_initialized = true;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	cache->nr = 0;
172*4882a593Smuzhiyun 	cache->cur = 0;
173*4882a593Smuzhiyun 	cache->n_ret = 0;
174*4882a593Smuzhiyun 	/*
175*4882a593Smuzhiyun 	 * We initialized alloc_lock and free_lock earlier.  We use
176*4882a593Smuzhiyun 	 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
177*4882a593Smuzhiyun 	 * the corresponding lock and use the cache.  Memory barrier below
178*4882a593Smuzhiyun 	 * ensures the assumption.
179*4882a593Smuzhiyun 	 */
180*4882a593Smuzhiyun 	mb();
181*4882a593Smuzhiyun 	cache->slots = slots;
182*4882a593Smuzhiyun 	cache->slots_ret = slots_ret;
183*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_mutex);
184*4882a593Smuzhiyun 	return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
drain_slots_cache_cpu(unsigned int cpu,unsigned int type,bool free_slots)187*4882a593Smuzhiyun static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
188*4882a593Smuzhiyun 				  bool free_slots)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct swap_slots_cache *cache;
191*4882a593Smuzhiyun 	swp_entry_t *slots = NULL;
192*4882a593Smuzhiyun 	bool skip = false;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	cache = &per_cpu(swp_slots, cpu);
195*4882a593Smuzhiyun 	trace_android_rvh_drain_slots_cache_cpu(cache, type,
196*4882a593Smuzhiyun 		free_slots, &skip);
197*4882a593Smuzhiyun 	trace_android_vh_drain_slots_cache_cpu(cache, type,
198*4882a593Smuzhiyun 		free_slots, &skip);
199*4882a593Smuzhiyun 	if (skip)
200*4882a593Smuzhiyun 		return;
201*4882a593Smuzhiyun 	if ((type & SLOTS_CACHE) && cache->slots) {
202*4882a593Smuzhiyun 		mutex_lock(&cache->alloc_lock);
203*4882a593Smuzhiyun 		swapcache_free_entries(cache->slots + cache->cur, cache->nr);
204*4882a593Smuzhiyun 		cache->cur = 0;
205*4882a593Smuzhiyun 		cache->nr = 0;
206*4882a593Smuzhiyun 		if (free_slots && cache->slots) {
207*4882a593Smuzhiyun 			kvfree(cache->slots);
208*4882a593Smuzhiyun 			cache->slots = NULL;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 		mutex_unlock(&cache->alloc_lock);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
213*4882a593Smuzhiyun 		spin_lock_irq(&cache->free_lock);
214*4882a593Smuzhiyun 		swapcache_free_entries(cache->slots_ret, cache->n_ret);
215*4882a593Smuzhiyun 		cache->n_ret = 0;
216*4882a593Smuzhiyun 		if (free_slots && cache->slots_ret) {
217*4882a593Smuzhiyun 			slots = cache->slots_ret;
218*4882a593Smuzhiyun 			cache->slots_ret = NULL;
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 		spin_unlock_irq(&cache->free_lock);
221*4882a593Smuzhiyun 		if (slots)
222*4882a593Smuzhiyun 			kvfree(slots);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
__drain_swap_slots_cache(unsigned int type)226*4882a593Smuzhiyun static void __drain_swap_slots_cache(unsigned int type)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	unsigned int cpu;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/*
231*4882a593Smuzhiyun 	 * This function is called during
232*4882a593Smuzhiyun 	 *	1) swapoff, when we have to make sure no
233*4882a593Smuzhiyun 	 *	   left over slots are in cache when we remove
234*4882a593Smuzhiyun 	 *	   a swap device;
235*4882a593Smuzhiyun 	 *      2) disabling of swap slot cache, when we run low
236*4882a593Smuzhiyun 	 *	   on swap slots when allocating memory and need
237*4882a593Smuzhiyun 	 *	   to return swap slots to global pool.
238*4882a593Smuzhiyun 	 *
239*4882a593Smuzhiyun 	 * We cannot acquire cpu hot plug lock here as
240*4882a593Smuzhiyun 	 * this function can be invoked in the cpu
241*4882a593Smuzhiyun 	 * hot plug path:
242*4882a593Smuzhiyun 	 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
243*4882a593Smuzhiyun 	 *   -> memory allocation -> direct reclaim -> get_swap_page
244*4882a593Smuzhiyun 	 *   -> drain_swap_slots_cache
245*4882a593Smuzhiyun 	 *
246*4882a593Smuzhiyun 	 * Hence the loop over current online cpu below could miss cpu that
247*4882a593Smuzhiyun 	 * is being brought online but not yet marked as online.
248*4882a593Smuzhiyun 	 * That is okay as we do not schedule and run anything on a
249*4882a593Smuzhiyun 	 * cpu before it has been marked online. Hence, we will not
250*4882a593Smuzhiyun 	 * fill any swap slots in slots cache of such cpu.
251*4882a593Smuzhiyun 	 * There are no slots on such cpu that need to be drained.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	for_each_online_cpu(cpu)
254*4882a593Smuzhiyun 		drain_slots_cache_cpu(cpu, type, false);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
free_slot_cache(unsigned int cpu)257*4882a593Smuzhiyun static int free_slot_cache(unsigned int cpu)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_mutex);
260*4882a593Smuzhiyun 	drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
261*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_mutex);
262*4882a593Smuzhiyun 	return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
enable_swap_slots_cache(void)265*4882a593Smuzhiyun void enable_swap_slots_cache(void)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	mutex_lock(&swap_slots_cache_enable_mutex);
268*4882a593Smuzhiyun 	if (!swap_slot_cache_initialized) {
269*4882a593Smuzhiyun 		int ret;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
272*4882a593Smuzhiyun 					alloc_swap_slot_cache, free_slot_cache);
273*4882a593Smuzhiyun 		if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
274*4882a593Smuzhiyun 				       "without swap slots cache.\n", __func__))
275*4882a593Smuzhiyun 			goto out_unlock;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		swap_slot_cache_initialized = true;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	__reenable_swap_slots_cache();
281*4882a593Smuzhiyun out_unlock:
282*4882a593Smuzhiyun 	mutex_unlock(&swap_slots_cache_enable_mutex);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /* called with swap slot cache's alloc lock held */
refill_swap_slots_cache(struct swap_slots_cache * cache)286*4882a593Smuzhiyun static int refill_swap_slots_cache(struct swap_slots_cache *cache)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	if (!use_swap_slot_cache || cache->nr)
289*4882a593Smuzhiyun 		return 0;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	cache->cur = 0;
292*4882a593Smuzhiyun 	if (swap_slot_cache_active)
293*4882a593Smuzhiyun 		cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
294*4882a593Smuzhiyun 					   cache->slots, 1);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return cache->nr;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
free_swap_slot(swp_entry_t entry)299*4882a593Smuzhiyun int free_swap_slot(swp_entry_t entry)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct swap_slots_cache *cache;
302*4882a593Smuzhiyun 	bool skip = false;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	cache = raw_cpu_ptr(&swp_slots);
305*4882a593Smuzhiyun 	trace_android_rvh_free_swap_slot(entry, cache, &skip);
306*4882a593Smuzhiyun 	trace_android_vh_free_swap_slot(entry, cache, &skip);
307*4882a593Smuzhiyun 	if (skip)
308*4882a593Smuzhiyun 		return 0;
309*4882a593Smuzhiyun 	if (likely(use_swap_slot_cache && cache->slots_ret)) {
310*4882a593Smuzhiyun 		spin_lock_irq(&cache->free_lock);
311*4882a593Smuzhiyun 		/* Swap slots cache may be deactivated before acquiring lock */
312*4882a593Smuzhiyun 		if (!use_swap_slot_cache || !cache->slots_ret) {
313*4882a593Smuzhiyun 			spin_unlock_irq(&cache->free_lock);
314*4882a593Smuzhiyun 			goto direct_free;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 		if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
317*4882a593Smuzhiyun 			/*
318*4882a593Smuzhiyun 			 * Return slots to global pool.
319*4882a593Smuzhiyun 			 * The current swap_map value is SWAP_HAS_CACHE.
320*4882a593Smuzhiyun 			 * Set it to 0 to indicate it is available for
321*4882a593Smuzhiyun 			 * allocation in global pool
322*4882a593Smuzhiyun 			 */
323*4882a593Smuzhiyun 			swapcache_free_entries(cache->slots_ret, cache->n_ret);
324*4882a593Smuzhiyun 			cache->n_ret = 0;
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 		cache->slots_ret[cache->n_ret++] = entry;
327*4882a593Smuzhiyun 		spin_unlock_irq(&cache->free_lock);
328*4882a593Smuzhiyun 	} else {
329*4882a593Smuzhiyun direct_free:
330*4882a593Smuzhiyun 		swapcache_free_entries(&entry, 1);
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
get_swap_page(struct page * page)336*4882a593Smuzhiyun swp_entry_t get_swap_page(struct page *page)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	swp_entry_t entry;
339*4882a593Smuzhiyun 	struct swap_slots_cache *cache;
340*4882a593Smuzhiyun 	bool found = false;
341*4882a593Smuzhiyun 	entry.val = 0;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	trace_android_rvh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found);
344*4882a593Smuzhiyun 	trace_android_vh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found);
345*4882a593Smuzhiyun 	if (found)
346*4882a593Smuzhiyun 		goto out;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (PageTransHuge(page)) {
349*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_THP_SWAP))
350*4882a593Smuzhiyun 			get_swap_pages(1, &entry, HPAGE_PMD_NR);
351*4882a593Smuzhiyun 		goto out;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/*
355*4882a593Smuzhiyun 	 * Preemption is allowed here, because we may sleep
356*4882a593Smuzhiyun 	 * in refill_swap_slots_cache().  But it is safe, because
357*4882a593Smuzhiyun 	 * accesses to the per-CPU data structure are protected by the
358*4882a593Smuzhiyun 	 * mutex cache->alloc_lock.
359*4882a593Smuzhiyun 	 *
360*4882a593Smuzhiyun 	 * The alloc path here does not touch cache->slots_ret
361*4882a593Smuzhiyun 	 * so cache->free_lock is not taken.
362*4882a593Smuzhiyun 	 */
363*4882a593Smuzhiyun 	cache = raw_cpu_ptr(&swp_slots);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (likely(check_cache_active() && cache->slots)) {
366*4882a593Smuzhiyun 		mutex_lock(&cache->alloc_lock);
367*4882a593Smuzhiyun 		if (cache->slots) {
368*4882a593Smuzhiyun repeat:
369*4882a593Smuzhiyun 			if (cache->nr) {
370*4882a593Smuzhiyun 				entry = cache->slots[cache->cur];
371*4882a593Smuzhiyun 				cache->slots[cache->cur++].val = 0;
372*4882a593Smuzhiyun 				cache->nr--;
373*4882a593Smuzhiyun 			} else if (refill_swap_slots_cache(cache)) {
374*4882a593Smuzhiyun 				goto repeat;
375*4882a593Smuzhiyun 			}
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 		mutex_unlock(&cache->alloc_lock);
378*4882a593Smuzhiyun 		if (entry.val)
379*4882a593Smuzhiyun 			goto out;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	get_swap_pages(1, &entry, 1);
383*4882a593Smuzhiyun out:
384*4882a593Smuzhiyun 	if (mem_cgroup_try_charge_swap(page, entry)) {
385*4882a593Smuzhiyun 		put_swap_page(page, entry);
386*4882a593Smuzhiyun 		entry.val = 0;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 	return entry;
389*4882a593Smuzhiyun }
390