Lines Matching refs:gfp_mask
142 gfp_t gfp_mask; member
571 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
591 .gfp_mask = gfp_mask, in shrink_slab_memcg()
646 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
673 unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument
681 trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass); in shrink_slab()
693 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab()
700 .gfp_mask = gfp_mask, in shrink_slab()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1290 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1453 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1552 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list()
1872 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()
2182 .gfp_mask = GFP_KERNEL, in reclaim_pages()
2751 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
2755 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
2874 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
3008 orig_mask = sc->gfp_mask; in shrink_zones()
3010 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
3011 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
3058 sc->order, sc->gfp_mask, in shrink_zones()
3076 sc->gfp_mask = orig_mask; in shrink_zones()
3122 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
3245 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument
3284 gfp_zone(gfp_mask), nodemask) { in throttle_direct_reclaim()
3310 if (!(gfp_mask & __GFP_FS)) { in throttle_direct_reclaim()
3330 gfp_t gfp_mask, nodemask_t *nodemask) in try_to_free_pages() argument
3335 .gfp_mask = current_gfp_context(gfp_mask), in try_to_free_pages()
3336 .reclaim_idx = gfp_zone(gfp_mask), in try_to_free_pages()
3358 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages()
3362 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); in try_to_free_pages()
3376 gfp_t gfp_mask, bool noswap, in mem_cgroup_shrink_node() argument
3392 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | in mem_cgroup_shrink_node()
3396 sc.gfp_mask); in mem_cgroup_shrink_node()
3416 gfp_t gfp_mask, in try_to_free_mem_cgroup_pages() argument
3423 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | in try_to_free_mem_cgroup_pages()
3437 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in try_to_free_mem_cgroup_pages()
3440 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); in try_to_free_mem_cgroup_pages()
3650 .gfp_mask = GFP_KERNEL, in balance_pgdat()
3761 sc.gfp_mask, &nr_soft_scanned); in balance_pgdat()
4142 .gfp_mask = GFP_HIGHUSER_MOVABLE, in shrink_all_memory()
4150 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in shrink_all_memory()
4154 fs_reclaim_acquire(sc.gfp_mask); in shrink_all_memory()
4162 fs_reclaim_release(sc.gfp_mask); in shrink_all_memory()
4306 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in __node_reclaim() argument
4314 .gfp_mask = current_gfp_context(gfp_mask), in __node_reclaim()
4320 .reclaim_idx = gfp_zone(gfp_mask), in __node_reclaim()
4324 sc.gfp_mask); in __node_reclaim()
4327 fs_reclaim_acquire(sc.gfp_mask); in __node_reclaim()
4350 fs_reclaim_release(sc.gfp_mask); in __node_reclaim()
4357 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
4379 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
4394 ret = __node_reclaim(pgdat, gfp_mask, order); in node_reclaim()