Lines Matching refs:gfp
191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc()
374 if (unlikely(gfp & __GFP_ZERO)) in slob_alloc()
469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
477 gfp &= gfp_allowed_mask; in __do_kmalloc_node()
479 fs_reclaim_acquire(gfp); in __do_kmalloc_node()
480 fs_reclaim_release(gfp); in __do_kmalloc_node()
495 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
503 size, size + minalign, gfp, node); in __do_kmalloc_node()
508 gfp |= __GFP_COMP; in __do_kmalloc_node()
509 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node()
512 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
515 kmemleak_alloc(ret, size, 1, gfp); in __do_kmalloc_node()
519 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() argument
521 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc()
525 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() argument
527 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); in __kmalloc_track_caller()
532 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() argument
535 return __do_kmalloc_node(size, gfp, node, caller); in __kmalloc_node_track_caller()
637 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() argument
639 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node()
643 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) in kmem_cache_alloc_node() argument
645 return slob_alloc_node(cachep, gfp, node); in kmem_cache_alloc_node()