Home
last modified time | relevance | path

Searched refs:new_pages (Results 1 – 15 of 15) sorted by relevance

/OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/
H A Dmali_linux_trace.h225 size_t new_pages),
226 TP_ARGS(reg, fault, new_pages),
231 __field(size_t, new_pages)
238 __entry->new_pages = new_pages;
243 __entry->fault_extra_addr, __entry->new_pages,
497 size_t old_pages, size_t available_pages, size_t new_pages),
498 TP_ARGS(reg, freed_pages, old_pages, available_pages, new_pages),
504 __field(size_t, new_pages)
511 __entry->new_pages = new_pages;
515 __entry->available_pages, __entry->new_pages)
H A Dmali_kbase_mem_linux.h127 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
139 struct kbase_va_region *reg, u64 new_pages);
184 struct kbase_va_region *reg, u64 new_pages,
265 u64 new_pages, u64 old_pages);
412 u64 new_pages, u64 old_pages);
H A Dmali_kbase_mem_linux.c2188 struct kbase_va_region *reg, u64 new_pages, in kbase_mem_grow_gpu_mapping() argument
2193 u64 delta = new_pages - old_pages; in kbase_mem_grow_gpu_mapping()
2209 u64 new_pages, u64 old_pages) in kbase_mem_shrink_cpu_mapping() argument
2213 if (new_pages == old_pages) in kbase_mem_shrink_cpu_mapping()
2218 (gpu_va_start + new_pages)<<PAGE_SHIFT, in kbase_mem_shrink_cpu_mapping()
2219 (old_pages - new_pages)<<PAGE_SHIFT, 1); in kbase_mem_shrink_cpu_mapping()
2223 struct kbase_va_region *const reg, u64 const new_pages, in kbase_mem_shrink_gpu_mapping() argument
2226 u64 delta = old_pages - new_pages; in kbase_mem_shrink_gpu_mapping()
2230 ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn + new_pages, in kbase_mem_shrink_gpu_mapping()
2231 alloc->pages + new_pages, delta, delta, kctx->as_nr, false); in kbase_mem_shrink_gpu_mapping()
[all …]
H A Dmali_kbase_mem.c2707 struct tagged_addr *new_pages = NULL; in kbase_alloc_phy_pages_helper_locked() local
2741 new_pages = tp; in kbase_alloc_phy_pages_helper_locked()
2844 return new_pages; in kbase_alloc_phy_pages_helper_locked()
3933 size_t new_pages = old_pages; in kbase_mem_jit_trim_pages_from_region() local
4027 new_pages -= to_free; in kbase_mem_jit_trim_pages_from_region()
4029 err = kbase_mem_shrink(kctx, reg, new_pages); in kbase_mem_jit_trim_pages_from_region()
4033 available_pages, new_pages); in kbase_mem_jit_trim_pages_from_region()
/OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_mmu.c116 size_t new_pages; in page_fault_worker() local
269 new_pages = make_multiple(fault_rel_pfn - in page_fault_worker()
274 if (new_pages + kbase_reg_current_backed_size(region) > in page_fault_worker()
276 new_pages = region->nr_pages - in page_fault_worker()
279 if (0 == new_pages) { in page_fault_worker()
297 if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) { in page_fault_worker()
300 region->cpu_alloc, new_pages) == 0) { in page_fault_worker()
304 new_pages); in page_fault_worker()
320 pfn_offset = kbase_reg_current_backed_size(region) - new_pages; in page_fault_worker()
332 new_pages, region->flags); in page_fault_worker()
[all …]
H A Dmali_kbase_mem_linux.c65 u64 new_pages, u64 old_pages);
82 u64 new_pages, u64 old_pages);
1441 u64 new_pages, u64 old_pages) argument
1444 u64 delta = new_pages - old_pages;
1459 u64 new_pages, u64 old_pages) argument
1463 if (new_pages == old_pages)
1468 (gpu_va_start + new_pages)<<PAGE_SHIFT,
1469 (old_pages - new_pages)<<PAGE_SHIFT, 1);
1474 u64 new_pages, u64 old_pages) argument
1476 u64 delta = old_pages - new_pages;
[all …]
H A Dmali_kbase_mem_linux.h54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
90 u64 new_pages, u64 old_pages);
/OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mmu/
H A Dmali_kbase_mmu.c815 struct kbase_va_region *region, size_t new_pages, in page_fault_try_alloc() argument
836 if (kctx->kbdev->pagesize_2mb && new_pages >= (SZ_2M / SZ_4K)) { in page_fault_try_alloc()
845 new_pages *= 2; in page_fault_try_alloc()
850 pages_still_required = estimate_pool_space_required(root_pool, new_pages); in page_fault_try_alloc()
864 pages_still_required = new_pages; in page_fault_try_alloc()
936 new_pages, total_gpu_pages_alloced + total_cpu_pages_alloced, in page_fault_try_alloc()
947 pages_still_required = estimate_pool_space_required(root_pool, new_pages); in page_fault_try_alloc()
973 size_t new_pages; in kbase_mmu_page_fault_worker() local
1218 new_pages = reg_grow_calc_extra_pages(kbdev, region, fault_rel_pfn); in kbase_mmu_page_fault_worker()
1221 new_pages = min(new_pages, region->nr_pages - current_backed_size); in kbase_mmu_page_fault_worker()
[all …]
/OK3568_Linux_fs/kernel/drivers/block/drbd/
H A Ddrbd_bitmap.c381 struct page **new_pages, *page; in bm_realloc_pages() local
397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); in bm_realloc_pages()
398 if (!new_pages) { in bm_realloc_pages()
399 new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO); in bm_realloc_pages()
400 if (!new_pages) in bm_realloc_pages()
406 new_pages[i] = old_pages[i]; in bm_realloc_pages()
410 bm_free_pages(new_pages + have, i - have); in bm_realloc_pages()
411 bm_vk_free(new_pages); in bm_realloc_pages()
417 new_pages[i] = page; in bm_realloc_pages()
421 new_pages[i] = old_pages[i]; in bm_realloc_pages()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c592 struct list_head new_pages; in ttm_page_pool_fill_locked() local
601 INIT_LIST_HEAD(&new_pages); in ttm_page_pool_fill_locked()
602 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, in ttm_page_pool_fill_locked()
607 list_splice(&new_pages, &pool->list); in ttm_page_pool_fill_locked()
613 list_for_each_entry(p, &new_pages, lru) { in ttm_page_pool_fill_locked()
616 list_splice(&new_pages, &pool->list); in ttm_page_pool_fill_locked()
/OK3568_Linux_fs/kernel/drivers/base/firmware_loader/
H A Dmain.c323 struct page **new_pages; in fw_grow_paged_buf() local
325 new_pages = kvmalloc_array(new_array_size, sizeof(void *), in fw_grow_paged_buf()
327 if (!new_pages) in fw_grow_paged_buf()
329 memcpy(new_pages, fw_priv->pages, in fw_grow_paged_buf()
331 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * in fw_grow_paged_buf()
334 fw_priv->pages = new_pages; in fw_grow_paged_buf()
/OK3568_Linux_fs/kernel/drivers/virtio/
H A Dvirtio_mem.c263 int new_pages = PFN_UP(new_bytes); in virtio_mem_mb_state_prepare_next_mb() local
266 if (vm->mb_state && old_pages == new_pages) in virtio_mem_mb_state_prepare_next_mb()
269 new_mb_state = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_mb_state_prepare_next_mb()
375 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); in virtio_mem_sb_bitmap_prepare_next_mb() local
378 if (vm->sb_bitmap && old_pages == new_pages) in virtio_mem_sb_bitmap_prepare_next_mb()
381 new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_sb_bitmap_prepare_next_mb()
/OK3568_Linux_fs/kernel/kernel/trace/
H A Dring_buffer.c531 struct list_head new_pages; /* new pages to add */ member
1642 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1929 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1994 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2094 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2096 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
2159 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2162 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
2216 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2219 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
[all …]
/OK3568_Linux_fs/kernel/arch/s390/kernel/
H A Ddebug.c1263 int rc, new_pages; in debug_input_pages_fn() local
1277 new_pages = debug_get_uint(str); in debug_input_pages_fn()
1278 if (new_pages < 0) { in debug_input_pages_fn()
1282 rc = debug_set_size(id, id->nr_areas, new_pages); in debug_input_pages_fn()
/OK3568_Linux_fs/kernel/io_uring/
H A Dio_uring.c8651 unsigned long page_limit, cur_pages, new_pages; in __io_account_mem() local
8658 new_pages = cur_pages + nr_pages; in __io_account_mem()
8659 if (new_pages > page_limit) in __io_account_mem()
8662 new_pages) != cur_pages); in __io_account_mem()