Lines Matching full:cache

3  * Manage cache of swap slots to be used for and returned from
25 * The swap slots cache is protected by a mutex instead of
43 /* Serialize swap slots cache enable/disable operations */
116 /* if global pool of slot caches too low, deactivate cache */ in check_cache_active()
126 struct swap_slots_cache *cache; in alloc_swap_slot_cache() local
155 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache()
156 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache()
157 /* cache already allocated */ in alloc_swap_slot_cache()
166 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
167 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
168 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache()
169 cache->lock_initialized = true; in alloc_swap_slot_cache()
171 cache->nr = 0; in alloc_swap_slot_cache()
172 cache->cur = 0; in alloc_swap_slot_cache()
173 cache->n_ret = 0; in alloc_swap_slot_cache()
176 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
177 * the corresponding lock and use the cache. Memory barrier below in alloc_swap_slot_cache()
181 cache->slots = slots; in alloc_swap_slot_cache()
182 cache->slots_ret = slots_ret; in alloc_swap_slot_cache()
190 struct swap_slots_cache *cache; in drain_slots_cache_cpu() local
194 cache = &per_cpu(swp_slots, cpu); in drain_slots_cache_cpu()
195 trace_android_rvh_drain_slots_cache_cpu(cache, type, in drain_slots_cache_cpu()
197 trace_android_vh_drain_slots_cache_cpu(cache, type, in drain_slots_cache_cpu()
201 if ((type & SLOTS_CACHE) && cache->slots) { in drain_slots_cache_cpu()
202 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
203 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
204 cache->cur = 0; in drain_slots_cache_cpu()
205 cache->nr = 0; in drain_slots_cache_cpu()
206 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
207 kvfree(cache->slots); in drain_slots_cache_cpu()
208 cache->slots = NULL; in drain_slots_cache_cpu()
210 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
212 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { in drain_slots_cache_cpu()
213 spin_lock_irq(&cache->free_lock); in drain_slots_cache_cpu()
214 swapcache_free_entries(cache->slots_ret, cache->n_ret); in drain_slots_cache_cpu()
215 cache->n_ret = 0; in drain_slots_cache_cpu()
216 if (free_slots && cache->slots_ret) { in drain_slots_cache_cpu()
217 slots = cache->slots_ret; in drain_slots_cache_cpu()
218 cache->slots_ret = NULL; in drain_slots_cache_cpu()
220 spin_unlock_irq(&cache->free_lock); in drain_slots_cache_cpu()
233 * left over slots are in cache when we remove in __drain_swap_slots_cache()
235 * 2) disabling of swap slot cache, when we run low in __drain_swap_slots_cache()
250 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
273 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " in enable_swap_slots_cache()
274 "without swap slots cache.\n", __func__)) in enable_swap_slots_cache()
285 /* called with swap slot cache's alloc lock held */
286 static int refill_swap_slots_cache(struct swap_slots_cache *cache) in refill_swap_slots_cache() argument
288 if (!use_swap_slot_cache || cache->nr) in refill_swap_slots_cache()
291 cache->cur = 0; in refill_swap_slots_cache()
293 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
294 cache->slots, 1); in refill_swap_slots_cache()
296 return cache->nr; in refill_swap_slots_cache()
301 struct swap_slots_cache *cache; in free_swap_slot() local
304 cache = raw_cpu_ptr(&swp_slots); in free_swap_slot()
305 trace_android_rvh_free_swap_slot(entry, cache, &skip); in free_swap_slot()
306 trace_android_vh_free_swap_slot(entry, cache, &skip); in free_swap_slot()
309 if (likely(use_swap_slot_cache && cache->slots_ret)) { in free_swap_slot()
310 spin_lock_irq(&cache->free_lock); in free_swap_slot()
311 /* Swap slots cache may be deactivated before acquiring lock */ in free_swap_slot()
312 if (!use_swap_slot_cache || !cache->slots_ret) { in free_swap_slot()
313 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
316 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { in free_swap_slot()
323 swapcache_free_entries(cache->slots_ret, cache->n_ret); in free_swap_slot()
324 cache->n_ret = 0; in free_swap_slot()
326 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
327 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
339 struct swap_slots_cache *cache; in get_swap_page() local
358 * mutex cache->alloc_lock. in get_swap_page()
360 * The alloc path here does not touch cache->slots_ret in get_swap_page()
361 * so cache->free_lock is not taken. in get_swap_page()
363 cache = raw_cpu_ptr(&swp_slots); in get_swap_page()
365 if (likely(check_cache_active() && cache->slots)) { in get_swap_page()
366 mutex_lock(&cache->alloc_lock); in get_swap_page()
367 if (cache->slots) { in get_swap_page()
369 if (cache->nr) { in get_swap_page()
370 entry = cache->slots[cache->cur]; in get_swap_page()
371 cache->slots[cache->cur++].val = 0; in get_swap_page()
372 cache->nr--; in get_swap_page()
373 } else if (refill_swap_slots_cache(cache)) { in get_swap_page()
377 mutex_unlock(&cache->alloc_lock); in get_swap_page()