Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 245) sorted by relevance

12345678910

/OK3568_Linux_fs/kernel/include/linux/
H A Dgfp.h523 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
527 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument
529 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages()
537 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
540 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); in __alloc_pages_node()
542 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node()
550 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument
556 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
560 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
563 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument
[all …]
H A Dcpuset.h67 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
69 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
72 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed()
76 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
78 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed()
81 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
84 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed()
208 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
213 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
218 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
H A Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
H A Dswap.h371 gfp_t gfp_mask, nodemask_t *mask);
375 gfp_t gfp_mask,
378 gfp_t gfp_mask, bool noswap,
539 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument
562 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument
567 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument
602 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument
688 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
689 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) in cgroup_throttle_swaprate() argument
693 __cgroup_throttle_swaprate(page, gfp_mask); in cgroup_throttle_swaprate()
[all …]
H A Dpage_owner.h17 unsigned int order, gfp_t gfp_mask);
32 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
35 __set_page_owner(page, order, gfp_mask); in set_page_owner()
63 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
/OK3568_Linux_fs/kernel/block/
H A Dblk-lib.c26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument
97 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard()
132 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument
139 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard()
166 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument
191 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same()
227 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument
235 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same()
247 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument
267 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes()
[all …]
H A Dblk-map.c22 gfp_t gfp_mask) in bio_alloc_map_data() argument
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
131 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument
141 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov()
158 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
186 page = alloc_page(rq->q->bounce_gfp | gfp_mask); in bio_copy_user_iov()
244 gfp_t gfp_mask) in bio_map_user_iov() argument
254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov()
382 unsigned int len, gfp_t gfp_mask) in bio_map_kern() argument
393 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
[all …]
H A Dblk-crypto.c82 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument
90 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx()
92 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx()
107 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument
109 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone()
296 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument
299 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
/OK3568_Linux_fs/kernel/mm/
H A Dmempool.c182 gfp_t gfp_mask, int node_id) in mempool_init_node() argument
192 gfp_mask, node_id); in mempool_init_node()
202 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
263 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
267 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
272 gfp_mask, node_id)) { in mempool_create_node()
375 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
382 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
383 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc()
385 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
[all …]
H A Dpage_owner.c26 gfp_t gfp_mask; member
191 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
200 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle()
212 gfp_t gfp_mask) in __set_page_owner() argument
217 handle = save_stack(gfp_mask); in __set_page_owner()
222 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner()
275 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner()
360 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print()
401 page_owner->order, page_owner->gfp_mask, in print_page_owner()
402 &page_owner->gfp_mask, page_owner->pid, in print_page_owner()
[all …]
H A Dpage_alloc.c3671 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3675 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3677 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3680 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3711 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3718 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3720 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3835 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument
3867 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
3912 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
[all …]
H A Dvmscan.c142 gfp_t gfp_mask; member
571 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
591 .gfp_mask = gfp_mask, in shrink_slab_memcg()
646 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
673 unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument
681 trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass); in shrink_slab()
693 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab()
700 .gfp_mask = gfp_mask, in shrink_slab()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
[all …]
H A Dswap_state.c452 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument
494 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
527 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { in __read_swap_cache_async()
532 if (mem_cgroup_charge(page, NULL, gfp_mask)) { in __read_swap_cache_async()
558 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
562 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
654 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument
693 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead()
709 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead()
837 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/dma-buf/heaps/
H A Dpage_pool.c32 return alloc_pages(pool->gfp_mask, pool->order); in dmabuf_page_pool_alloc_pages()
127 struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order) in dmabuf_page_pool_create() argument
144 pool->gfp_mask = gfp_mask | __GFP_COMP; in dmabuf_page_pool_create()
179 static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, in dmabuf_page_pool_do_shrink() argument
188 high = !!(gfp_mask & __GFP_HIGHMEM); in dmabuf_page_pool_do_shrink()
211 static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan) in dmabuf_page_pool_shrink() argument
225 gfp_mask, in dmabuf_page_pool_shrink()
229 gfp_mask, in dmabuf_page_pool_shrink()
245 return dmabuf_page_pool_shrink(sc->gfp_mask, 0); in dmabuf_page_pool_shrink_count()
253 return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan); in dmabuf_page_pool_shrink_scan()
/OK3568_Linux_fs/kernel/fs/nfs/blocklayout/
H A Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple()
353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
407 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat()
447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe()
[all …]
/OK3568_Linux_fs/kernel/fs/btrfs/
H A Dulist.h48 struct ulist *ulist_alloc(gfp_t gfp_mask);
50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
52 u64 *old_aux, gfp_t gfp_mask);
57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument
61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr()
65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
H A Dulist.c92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument
94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc()
186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument
188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add()
192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument
203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
/OK3568_Linux_fs/kernel/lib/
H A Dgeneric-radix-tree.c79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument
83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node()
90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node()
105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument
122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
218 gfp_t gfp_mask) in __genradix_prealloc() argument
223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
H A Dscatterlist.c149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument
161 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc()
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc()
166 gfp_mask); in sg_kmalloc()
268 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() argument
302 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table()
355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument
360 NULL, 0, gfp_mask, sg_kmalloc); in sg_alloc_table()
371 gfp_t gfp_mask) in get_next_sg() argument
384 new_sg = sg_kmalloc(alloc_size, gfp_mask); in get_next_sg()
[all …]
/OK3568_Linux_fs/kernel/include/trace/hooks/
H A Dmm.h63 unsigned int align, gfp_t gfp_mask, s64 ts),
64 TP_ARGS(cma, page, count, align, gfp_mask, ts));
73 int fgp_flags, gfp_t gfp_mask, struct page *page),
74 TP_ARGS(mapping, index, fgp_flags, gfp_mask, page));
102 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long *pdata),
103 TP_ARGS(gfp_mask, order, pdata));
105 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long data),
106 TP_ARGS(gfp_mask, order, data));
158 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
161 TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass));
[all …]
/OK3568_Linux_fs/kernel/net/sunrpc/auth_gss/
H A Dgss_krb5_mech.c312 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument
339 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3()
357 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument
373 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
388 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
403 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
413 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
423 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
433 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
469 gfp_t gfp_mask) in gss_import_v2_context() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument
103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages()
115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument
118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent()
133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm()
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm()
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm()
162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm()
[all …]
/OK3568_Linux_fs/kernel/drivers/staging/android/ion/heaps/
H A Dion_page_pool.c19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages()
114 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument
123 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink()
149 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument
159 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
/OK3568_Linux_fs/kernel/drivers/connector/
H A Dconnector.c62 gfp_t gfp_mask) in cn_netlink_send_mult() argument
96 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult()
114 gfp_mask); in cn_netlink_send_mult()
116 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult()
122 gfp_t gfp_mask) in cn_netlink_send() argument
124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
/OK3568_Linux_fs/kernel/kernel/power/
H A Dsnapshot.c155 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument
159 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
165 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
174 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument
183 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page()
186 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument
188 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page()
191 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument
195 page = alloc_page(gfp_mask); in alloc_image_page()
261 gfp_t gfp_mask; /* mask for allocating pages */ member
[all …]

12345678910