Lines Matching +full:cpu +full:- +full:nr
1 // SPDX-License-Identifier: GPL-2.0
11 * it into local per cpu caches. This has the advantage
31 #include <linux/cpu.h>
71 /* Must not be called with cpu hot plug lock */
77 /* serialize with cpu hotplug operations */ in disable_swap_slots_cache_lock()
124 static int alloc_swap_slot_cache(unsigned int cpu) in alloc_swap_slot_cache() argument
136 trace_android_rvh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu), in alloc_swap_slot_cache()
138 trace_android_vh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu), in alloc_swap_slot_cache()
145 return -ENOMEM; in alloc_swap_slot_cache()
151 return -ENOMEM; in alloc_swap_slot_cache()
155 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache()
156 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache()
166 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
167 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
168 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache()
169 cache->lock_initialized = true; in alloc_swap_slot_cache()
171 cache->nr = 0; in alloc_swap_slot_cache()
172 cache->cur = 0; in alloc_swap_slot_cache()
173 cache->n_ret = 0; in alloc_swap_slot_cache()
176 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
181 cache->slots = slots; in alloc_swap_slot_cache()
182 cache->slots_ret = slots_ret; in alloc_swap_slot_cache()
187 static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, in drain_slots_cache_cpu() argument
194 cache = &per_cpu(swp_slots, cpu); in drain_slots_cache_cpu()
201 if ((type & SLOTS_CACHE) && cache->slots) { in drain_slots_cache_cpu()
202 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
203 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
204 cache->cur = 0; in drain_slots_cache_cpu()
205 cache->nr = 0; in drain_slots_cache_cpu()
206 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
207 kvfree(cache->slots); in drain_slots_cache_cpu()
208 cache->slots = NULL; in drain_slots_cache_cpu()
210 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
212 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { in drain_slots_cache_cpu()
213 spin_lock_irq(&cache->free_lock); in drain_slots_cache_cpu()
214 swapcache_free_entries(cache->slots_ret, cache->n_ret); in drain_slots_cache_cpu()
215 cache->n_ret = 0; in drain_slots_cache_cpu()
216 if (free_slots && cache->slots_ret) { in drain_slots_cache_cpu()
217 slots = cache->slots_ret; in drain_slots_cache_cpu()
218 cache->slots_ret = NULL; in drain_slots_cache_cpu()
220 spin_unlock_irq(&cache->free_lock); in drain_slots_cache_cpu()
228 unsigned int cpu; in __drain_swap_slots_cache() local
239 * We cannot acquire cpu hot plug lock here as in __drain_swap_slots_cache()
240 * this function can be invoked in the cpu in __drain_swap_slots_cache()
242 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback in __drain_swap_slots_cache()
243 * -> memory allocation -> direct reclaim -> get_swap_page in __drain_swap_slots_cache()
244 * -> drain_swap_slots_cache in __drain_swap_slots_cache()
246 * Hence the loop over current online cpu below could miss cpu that in __drain_swap_slots_cache()
249 * cpu before it has been marked online. Hence, we will not in __drain_swap_slots_cache()
250 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
251 * There are no slots on such cpu that need to be drained. in __drain_swap_slots_cache()
253 for_each_online_cpu(cpu) in __drain_swap_slots_cache()
254 drain_slots_cache_cpu(cpu, type, false); in __drain_swap_slots_cache()
257 static int free_slot_cache(unsigned int cpu) in free_slot_cache() argument
260 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); in free_slot_cache()
288 if (!use_swap_slot_cache || cache->nr) in refill_swap_slots_cache()
291 cache->cur = 0; in refill_swap_slots_cache()
293 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
294 cache->slots, 1); in refill_swap_slots_cache()
296 return cache->nr; in refill_swap_slots_cache()
309 if (likely(use_swap_slot_cache && cache->slots_ret)) { in free_swap_slot()
310 spin_lock_irq(&cache->free_lock); in free_swap_slot()
312 if (!use_swap_slot_cache || !cache->slots_ret) { in free_swap_slot()
313 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
316 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { in free_swap_slot()
323 swapcache_free_entries(cache->slots_ret, cache->n_ret); in free_swap_slot()
324 cache->n_ret = 0; in free_swap_slot()
326 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
327 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
357 * accesses to the per-CPU data structure are protected by the in get_swap_page()
358 * mutex cache->alloc_lock. in get_swap_page()
360 * The alloc path here does not touch cache->slots_ret in get_swap_page()
361 * so cache->free_lock is not taken. in get_swap_page()
365 if (likely(check_cache_active() && cache->slots)) { in get_swap_page()
366 mutex_lock(&cache->alloc_lock); in get_swap_page()
367 if (cache->slots) { in get_swap_page()
369 if (cache->nr) { in get_swap_page()
370 entry = cache->slots[cache->cur]; in get_swap_page()
371 cache->slots[cache->cur++].val = 0; in get_swap_page()
372 cache->nr--; in get_swap_page()
377 mutex_unlock(&cache->alloc_lock); in get_swap_page()