| /OK3568_Linux_fs/kernel/mm/ |
| H A D | page_counter.c | 50 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument 54 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel() 67 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument 74 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge() 95 unsigned long nr_pages, in page_counter_try_charge() argument 116 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge() 118 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge() 141 page_counter_cancel(c, nr_pages); in page_counter_try_charge() 151 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument 156 page_counter_cancel(c, nr_pages); in page_counter_uncharge() [all …]
|
| H A D | memory_hotplug.c | 233 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 237 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node() 240 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 260 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument 279 || !IS_ALIGNED(nr_pages, min_align)) { in check_pfn_span() 281 reason, pfn, pfn + nr_pages - 1); in check_pfn_span() 288 unsigned long nr_pages) in check_hotplug_memory_addressable() argument 290 const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; in check_hotplug_memory_addressable() 309 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument 312 const unsigned long end_pfn = pfn + nr_pages; in __add_pages() [all …]
|
| H A D | sparse.c | 212 unsigned long nr_pages) in subsection_mask_set() argument 215 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set() 220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 222 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init() 225 if (!nr_pages) in subsection_map_init() 232 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 242 nr_pages -= pfns; in subsection_map_init() 246 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 446 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument 652 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in populate_section_memmap() argument [all …]
|
| H A D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 60 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 66 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 73 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 84 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 93 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 104 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 112 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 113 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 115 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
| H A D | hugetlb_cgroup.c | 164 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local 178 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent() 182 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent() 186 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent() 231 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument 258 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup() 274 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 277 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup() 280 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument 283 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd() [all …]
|
| H A D | gup_benchmark.c | 25 unsigned long nr_pages) in put_back_pages() argument 32 for (i = 0; i < nr_pages; i++) in put_back_pages() 39 unpin_user_pages(pages, nr_pages); in put_back_pages() 45 unsigned long nr_pages) in verify_dma_pinned() argument 54 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned() 71 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local 81 nr_pages = gup->size / PAGE_SIZE; in __gup_benchmark_ioctl() 82 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl() 141 nr_pages = i; in __gup_benchmark_ioctl() 150 verify_dma_pinned(cmd, pages, nr_pages); in __gup_benchmark_ioctl() [all …]
|
| H A D | gup.c | 1062 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 1070 if (!nr_pages) in __get_user_pages() 1113 &start, &nr_pages, i, in __get_user_pages() 1178 if (page_increm > nr_pages) in __get_user_pages() 1179 page_increm = nr_pages; in __get_user_pages() 1182 nr_pages -= page_increm; in __get_user_pages() 1183 } while (nr_pages); in __get_user_pages() 1293 unsigned long nr_pages, in __get_user_pages_locked() argument 1327 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked() 1336 BUG_ON(ret >= nr_pages); in __get_user_pages_locked() [all …]
|
| H A D | mlock.c | 62 int nr_pages; in clear_page_mlock() local 67 nr_pages = thp_nr_pages(page); in clear_page_mlock() 68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock() 69 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); in clear_page_mlock() 83 count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in clear_page_mlock() 100 int nr_pages = thp_nr_pages(page); in mlock_vma_page() local 102 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in mlock_vma_page() 103 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); in mlock_vma_page() 162 int nr_pages = thp_nr_pages(page); in __munlock_isolation_failed() local 165 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in __munlock_isolation_failed() [all …]
|
| H A D | process_vm_access.c | 81 unsigned long nr_pages; in process_vm_rw_single_vec() local 90 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 95 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 96 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 123 nr_pages -= pinned_pages; in process_vm_rw_single_vec() 162 unsigned long nr_pages = 0; in process_vm_rw_core() local 178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 182 if (nr_pages == 0) in process_vm_rw_core() 185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 189 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
| H A D | memcontrol.c | 261 unsigned int nr_pages; in obj_cgroup_release() local 286 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release() 290 if (nr_pages) in obj_cgroup_release() 291 __memcg_kmem_uncharge(memcg, nr_pages); in obj_cgroup_release() 670 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local 674 if (nr_pages > soft_limit) in soft_limit_excess() 675 excess = nr_pages - soft_limit; in soft_limit_excess() 944 int nr_pages) in mem_cgroup_charge_statistics() argument 947 if (nr_pages > 0) in mem_cgroup_charge_statistics() 951 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics() [all …]
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | hugetlb_cgroup.h | 128 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 130 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, 132 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 135 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, 138 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 140 extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, 143 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 145 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, 153 unsigned long nr_pages, 163 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument [all …]
|
| H A D | page_counter.h | 51 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 52 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 54 unsigned long nr_pages, 56 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 57 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); 58 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); 61 unsigned long nr_pages) in page_counter_set_high() argument 63 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high() 66 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); 68 unsigned long *nr_pages);
|
| H A D | memory_hotplug.h | 112 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 115 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 147 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 151 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 156 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument 158 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 161 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 315 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 324 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument 351 unsigned long nr_pages, [all …]
|
| /OK3568_Linux_fs/kernel/drivers/media/v4l2-core/ |
| H A D | videobuf-dma-sg.c | 63 int nr_pages) in videobuf_vmalloc_to_sg() argument 69 sglist = vzalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_vmalloc_to_sg() 72 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg() 73 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg() 93 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument 100 sglist = vmalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_pages_to_sg() 103 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg() 111 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg() 174 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked() 175 dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), in videobuf_dma_init_user_locked() [all …]
|
| /OK3568_Linux_fs/kernel/fs/iomap/ |
| H A D | swapfile.c | 19 unsigned long nr_pages; /* number of pages collected */ member 32 unsigned long nr_pages; in iomap_swapfile_add_extent() local 39 if (unlikely(isi->nr_pages >= isi->sis->max)) in iomap_swapfile_add_extent() 41 max_pages = isi->sis->max - isi->nr_pages; in iomap_swapfile_add_extent() 54 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent() 55 nr_pages = min(nr_pages, max_pages); in iomap_swapfile_add_extent() 71 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent() 75 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent() 184 if (isi.nr_pages == 0) { in iomap_swapfile_activate() 190 sis->max = isi.nr_pages; in iomap_swapfile_activate() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/dma-buf/heaps/ |
| H A D | deferred-free-helper.c | 28 size_t nr_pages) in deferred_free() argument 33 item->nr_pages = nr_pages; in deferred_free() 38 list_nr_pages += nr_pages; in deferred_free() 47 size_t nr_pages; in free_one_item() local 57 nr_pages = item->nr_pages; in free_one_item() 58 list_nr_pages -= nr_pages; in free_one_item() 62 return nr_pages; in free_one_item() 67 unsigned long nr_pages; in get_freelist_nr_pages() local 71 nr_pages = list_nr_pages; in get_freelist_nr_pages() 73 return nr_pages; in get_freelist_nr_pages()
|
| /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/ |
| H A D | reserved_mem.c | 57 u64 nr_pages, prev, hyp_mem_pages = 0; in kvm_hyp_reserve() local 82 nr_pages = 0; in kvm_hyp_reserve() 84 prev = nr_pages; in kvm_hyp_reserve() 85 nr_pages = hyp_mem_pages + prev; in kvm_hyp_reserve() 86 nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE); in kvm_hyp_reserve() 87 nr_pages += __hyp_pgtable_max_pages(nr_pages); in kvm_hyp_reserve() 88 } while (nr_pages != prev); in kvm_hyp_reserve() 89 hyp_mem_pages += nr_pages; in kvm_hyp_reserve()
|
| /OK3568_Linux_fs/kernel/drivers/xen/ |
| H A D | balloon.c | 402 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument 408 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation() 409 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation() 412 for (i = 0; i < nr_pages; i++) { in increase_reservation() 414 nr_pages = i; in increase_reservation() 422 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation() 441 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument 449 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation() 450 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation() 452 for (i = 0; i < nr_pages; i++) { in decrease_reservation() [all …]
|
| /OK3568_Linux_fs/kernel/include/trace/hooks/ |
| H A D | memory.h | 15 TP_PROTO(unsigned long addr, int nr_pages), 16 TP_ARGS(addr, nr_pages)); 19 TP_PROTO(unsigned long addr, int nr_pages), 20 TP_ARGS(addr, nr_pages)); 23 TP_PROTO(unsigned long addr, int nr_pages), 24 TP_ARGS(addr, nr_pages)); 27 TP_PROTO(unsigned long addr, int nr_pages), 28 TP_ARGS(addr, nr_pages));
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_mem_pool.c | 84 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list_locked() argument 89 pool->cur_size += nr_pages; in kbase_mem_pool_add_list_locked() 91 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked() 95 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list() argument 98 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); in kbase_mem_pool_add_list() 441 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, in kbase_mem_pool_alloc_pages() argument 449 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages); in kbase_mem_pool_alloc_pages() 453 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool)); in kbase_mem_pool_alloc_pages() 460 if (i != nr_pages && pool->next_pool) { in kbase_mem_pool_alloc_pages() 463 nr_pages - i, pages + i); in kbase_mem_pool_alloc_pages() [all …]
|
| H A D | mali_kbase_mem.c | 133 struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) in kbase_region_tracker_find_region_enclosing_range_free() argument 139 u64 end_pfn = start_pfn + nr_pages; in kbase_region_tracker_find_region_enclosing_range_free() 150 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_region_tracker_find_region_enclosing_range_free() 186 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_region_tracker_find_region_enclosing_address() 236 …g_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align) in kbase_region_tracker_find_region_meeting_reqs() argument 251 if ((reg->nr_pages >= nr_pages) && in kbase_region_tracker_find_region_meeting_reqs() 257 (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && in kbase_region_tracker_find_region_meeting_reqs() 258 ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) in kbase_region_tracker_find_region_meeting_reqs() 298 prev->nr_pages += reg->nr_pages; in kbase_remove_va_region() 315 next->nr_pages += reg->nr_pages; in kbase_remove_va_region() [all …]
|
| /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/nvhe/ |
| H A D | setup.c | 33 unsigned long vstart, vend, nr_pages; in divide_memory_pool() local 38 nr_pages = (vend - vstart) >> PAGE_SHIFT; in divide_memory_pool() 39 vmemmap_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 43 nr_pages = hyp_s1_pgtable_pages(); in divide_memory_pool() 44 hyp_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 48 nr_pages = host_s2_mem_pgtable_pages(); in divide_memory_pool() 49 host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 53 nr_pages = host_s2_dev_pgtable_pages(); in divide_memory_pool() 54 host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 150 unsigned long nr_pages, reserved_pages, pfn; in __pkvm_init_finalise() local [all …]
|
| /OK3568_Linux_fs/kernel/arch/powerpc/platforms/powernv/ |
| H A D | memtrace.c | 72 unsigned long nr_pages) in memtrace_clear_range() argument 81 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range() 89 static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) in memtrace_offline_pages() argument 92 const unsigned long size = PFN_PHYS(nr_pages); in memtrace_offline_pages() 100 if (offline_pages(start_pfn, nr_pages)) { in memtrace_offline_pages() 115 u64 start_pfn, end_pfn, nr_pages, pfn; in memtrace_alloc_node() local 124 nr_pages = size >> PAGE_SHIFT; in memtrace_alloc_node() 127 end_pfn = round_down(end_pfn - nr_pages, nr_pages); in memtrace_alloc_node() 130 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { in memtrace_alloc_node() 131 if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { in memtrace_alloc_node() [all …]
|
| /OK3568_Linux_fs/kernel/tools/testing/selftests/vm/ |
| H A D | userfaultfd.c | 64 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable 206 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) in anon_release_pages() 212 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in anon_allocate_area() 225 rel_area == huge_fd_off0 ? 0 : nr_pages * page_size, in hugetlb_release_pages() 226 nr_pages * page_size)) in hugetlb_release_pages() 235 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area() 239 nr_pages * page_size); in hugetlb_allocate_area() 244 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area() 247 nr_pages * page_size); in hugetlb_allocate_area() 278 if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) in shmem_release_pages() [all …]
|
| /OK3568_Linux_fs/kernel/net/rds/ |
| H A D | info.c | 163 unsigned long nr_pages = 0; in rds_info_getsockopt() local 187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 196 if (ret != nr_pages) { in rds_info_getsockopt() 198 nr_pages = ret; in rds_info_getsockopt() 200 nr_pages = 0; in rds_info_getsockopt() 205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt() 238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
|