| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 25 * - Pages reserved or allocated early during boot (before the page allocator 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 29 * be given to the page allocator. 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. [all …]
|
| H A D | pagemap.h | 159 void release_pages(struct page **pages, int nr); 162 * speculatively take a reference to a page. 163 * If the page is free (_refcount == 0), then _refcount is untouched, and 0 167 * been used to lookup the page in the pagecache radix-tree (or page table): 173 * page has been finished with, no matter what it is subsequently allocated 180 * 1. find page in radix tree 182 * 3. check the page is still in pagecache (if no, goto 1) 187 * B. remove page from pagecache 188 * C. free the page 193 * subsequently, B will complete and 1 will find no page, causing the [all …]
|
| H A D | page_ref.h | 7 #include <linux/page-flags.h> 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument [all …]
|
| H A D | balloon_compaction.h | 7 * Balloon page migration makes use of the general non-lru movable page 10 * page->private is used to reference the responsible balloon device. 11 * page->mapping is used in context of non-lru page migration to reference 12 * the address space operations for page isolation/migration/compaction. 14 * As the page isolation scanning step a compaction thread does is a lockless 15 * procedure (from a page standpoint), it might bring some racy situations while 16 * performing balloon page compaction. In order to sort out these racy scenarios 17 * and safely perform balloon's page compaction and migration we must, always, 20 * i. when updating a balloon's page ->mapping element, strictly do it under 23 * +-page_lock(page); [all …]
|
| H A D | mm.h | 27 #include <linux/page-flags.h> 103 #include <asm/page.h> 139 * a zero page mapping on a read fault. 142 * related to the physical page in case of virtualization. 155 /* This function must be updated when the size of struct page grows above 80 162 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument 164 unsigned long *_pp = (void *)page; in __mm_zero_struct_page() 166 /* Check that struct page is either 56, 64, 72, or 80 bytes */ in __mm_zero_struct_page() 167 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page() 168 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page() [all …]
|
| H A D | mm_inline.h | 13 * page_is_file_lru - should the page be on a file LRU or anon LRU? 14 * @page: the page to test 16 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily 17 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal 18 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by 19 * functions that manipulate the LRU lists, to sort a page onto the right LRU 22 * We would like to get this info without a page flag, but the state 23 * needs to survive until the page is last deleted from the LRU, which 26 static inline int page_is_file_lru(struct page *page) in page_is_file_lru() argument 28 return !PageSwapBacked(page); in page_is_file_lru() [all …]
|
| H A D | migrate.h | 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 17 * - negative errno on page migration failure; 18 * - zero on page migration success; 40 struct page *newpage, struct page *page, 44 extern struct page *alloc_migration_target(struct page *page, unsigned long private); 45 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 46 extern void putback_movable_page(struct page *page); 48 extern void migrate_page_states(struct page *newpage, struct page *page); 49 extern void migrate_page_copy(struct page *newpage, struct page *page); [all …]
|
| H A D | page_idle.h | 6 #include <linux/page-flags.h> 12 static inline bool page_is_young(struct page *page) in page_is_young() argument 14 return PageYoung(page); in page_is_young() 17 static inline void set_page_young(struct page *page) in set_page_young() argument 19 SetPageYoung(page); in set_page_young() 22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument 24 return TestClearPageYoung(page); in test_and_clear_page_young() 27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument 29 return PageIdle(page); in page_is_idle() 32 static inline void set_page_idle(struct page *page) in set_page_idle() argument [all …]
|
| H A D | highmem.h | 15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument 21 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument 35 extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); 47 void *kmap_high(struct page *page); 48 static inline void *kmap(struct page *page) in kmap() argument 53 if (!PageHighMem(page)) in kmap() 54 addr = page_address(page); in kmap() 56 addr = kmap_high(page); in kmap() 61 void kunmap_high(struct page *page); 63 static inline void kunmap(struct page *page) in kunmap() argument [all …]
|
| H A D | huge_mm.h | 28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 48 * @pgprot: page protection to use 67 * @pgprot: page protection to use 187 void prep_transhuge_page(struct page *page); 188 void free_transhuge_page(struct page *page); 189 bool is_transparent_hugepage(struct page *page); 191 bool can_split_huge_page(struct page *page, int *pextra_pins); 192 int split_huge_page_to_list(struct page *page, struct list_head *list); 193 static inline int split_huge_page(struct page *page) in split_huge_page() argument 195 return split_huge_page_to_list(page, NULL); in split_huge_page() [all …]
|
| /OK3568_Linux_fs/kernel/mm/ |
| H A D | migrate.c | 7 * Page migration was first developed in the context of the memory hotplug 63 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 71 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page() 74 * release this page, thus avoiding a nasty leakage. in isolate_movable_page() 76 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page() 80 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page() 81 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page() 82 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page() 84 if (unlikely(!__PageMovable(page))) in isolate_movable_page() 88 * compaction threads can race against page migration functions in isolate_movable_page() [all …]
|
| H A D | swap.c | 46 /* How many pages do we try to swap or page in/out together? */ 81 static void __page_cache_release(struct page *page) in __page_cache_release() argument 83 if (PageLRU(page)) { in __page_cache_release() 84 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release() 89 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release() 90 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release() 91 __ClearPageLRU(page); in __page_cache_release() 92 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release() 95 __ClearPageWaiters(page); in __page_cache_release() 98 static void __put_single_page(struct page *page) in __put_single_page() argument [all …]
|
| H A D | filemap.c | 68 * finished 'unifying' the page and buffer cache and SMP-threaded the 69 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 126 struct page *page, void *shadow) in page_cache_delete() argument 128 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 134 if (!PageHuge(page)) { in page_cache_delete() 135 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 136 nr = compound_nr(page); in page_cache_delete() 139 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete() 140 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete() 141 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete() [all …]
|
| H A D | rmap.c | 10 * Provides methods for unmapping each kind of mapped page: 25 * page->flags PG_locked (lock_page) * (see huegtlbfs below) 28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51 * page->flags PG_locked (lock_page) 274 * searches where page is mapped. 457 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 461 * have been relevant to this page. 463 * The page might have been remapped to a different anon_vma or the anon_vma 468 * ensure that any anon_vma obtained from the page will still be valid for as [all …]
|
| H A D | memory-failure.c | 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/vm/page-types when running a real workload. 38 #include <linux/page-flags.h> 39 #include <linux/kernel-page-flags.h> 58 #include <linux/page-isolation.h> 68 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) in page_handle_poison() argument 75 if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) in page_handle_poison() 77 * We could fail to take off the target page from buddy in page_handle_poison() [all …]
|
| H A D | balloon_compaction.c | 15 struct page *page) in balloon_page_enqueue_one() argument 18 * Block others from accessing the 'page' when we get around to in balloon_page_enqueue_one() 20 * holding a reference to the 'page' at this point. If we are not, then in balloon_page_enqueue_one() 23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one() 24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one() 25 unlock_page(page); in balloon_page_enqueue_one() 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 32 * @b_dev_info: balloon device descriptor where we will insert a new page to 43 struct page *page, *tmp; in balloon_page_list_enqueue() local 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() [all …]
|
| H A D | truncate.c | 30 * Regular page slots are stabilized by the page lock even without the tree 83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local 86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries() 87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries() 99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries() 138 * do_invalidatepage - invalidate part or all of a page 139 * @page: the page which is affected 143 * do_invalidatepage() is called when all or part of the page has become 152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument 155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage() [all …]
|
| H A D | page_io.c | 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 31 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument 39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev); in get_swap_bio() 44 bio_add_page(bio, page, thp_size(page), 0); in get_swap_bio() 51 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local 54 SetPageError(page); in end_swap_bio_write() 56 * We failed to write the page out to swap-space. in end_swap_bio_write() 57 * Re-dirty the page in order to avoid it being reclaimed. in end_swap_bio_write() 63 set_page_dirty(page); in end_swap_bio_write() 67 ClearPageReclaim(page); in end_swap_bio_write() [all …]
|
| H A D | mlock.c | 45 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will 52 * may have mlocked a page that is being munlocked. So lazy mlock must take 60 void clear_page_mlock(struct page *page) in clear_page_mlock() argument 64 if (!TestClearPageMlocked(page)) in clear_page_mlock() 67 nr_pages = thp_nr_pages(page); in clear_page_mlock() 68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock() 76 if (!isolate_lru_page(page)) { in clear_page_mlock() 77 putback_lru_page(page); in clear_page_mlock() 80 * We lost the race. the page already moved to evictable list. in clear_page_mlock() 82 if (PageUnevictable(page)) in clear_page_mlock() [all …]
|
| /OK3568_Linux_fs/kernel/Documentation/vm/ |
| H A D | page_migration.rst | 4 Page migration 7 Page migration allows moving the physical location of pages between 15 The main intent of page migration is to reduce the latency of memory accesses 19 Page migration allows a process to manually relocate the node on which its 25 Page migration functions are provided by the numactl package by Andi Kleen 28 which provides an interface similar to other NUMA functionality for page 31 proc(5) man page. 37 manual page migration support. Automatic page migration may be implemented 54 Page migration allows the preservation of the relative location of pages 60 Page migration occurs in several steps. First a high level [all …]
|
| /OK3568_Linux_fs/kernel/fs/jfs/ |
| H A D | jfs_metapage.c | 25 uint pagealloc; /* # of page allocations */ 26 uint pagefree; /* # of page frees */ 48 unlock_page(mp->page); in __lock_metapage() 50 lock_page(mp->page); in __lock_metapage() 58 * Must have mp->page locked 79 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument 81 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument 83 if (!PagePrivate(page)) in page_to_mp() 85 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp() 88 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument [all …]
|
| /OK3568_Linux_fs/kernel/fs/sysv/ |
| H A D | dir.c | 31 static inline void dir_put_page(struct page *page) in dir_put_page() argument 33 kunmap(page); in dir_put_page() 34 put_page(page); in dir_put_page() 37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 39 struct address_space *mapping = page->mapping; in dir_commit_chunk() 43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 49 err = write_one_page(page); in dir_commit_chunk() 51 unlock_page(page); in dir_commit_chunk() 55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page() 58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local [all …]
|
| /OK3568_Linux_fs/kernel/net/core/ |
| H A D | page_pool.c | 16 #include <linux/page-flags.h> 43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init() 53 /* In order to request DMA-sync-for-device the page in page_pool_init() 101 static void page_pool_return_page(struct page_pool *pool, struct page *page); 104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() 107 struct page *page; in page_pool_refill_alloc_cache() local 129 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache() 130 if (unlikely(!page)) in page_pool_refill_alloc_cache() 133 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache() 134 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache() [all …]
|
| /OK3568_Linux_fs/kernel/sound/pci/trident/ |
| H A D | trident_memory.c | 7 * Trident 4DWave-NX memory page allocation (TLB area) 19 /* page arguments of these two macros are Trident page (4096 bytes), not like 22 #define __set_tlb_bus(trident,page,ptr,addr) \ argument 23 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ 24 (trident)->tlb.shadow_entries[page] = (ptr); } while (0) 25 #define __tlb_to_ptr(trident,page) \ argument 26 (void*)((trident)->tlb.shadow_entries[page]) 27 #define __tlb_to_addr(trident,page) \ argument 28 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 31 /* page size == SNDRV_TRIDENT_PAGE_SIZE */ [all …]
|
| /OK3568_Linux_fs/kernel/fs/nilfs2/ |
| H A D | page.c | 3 * page.c - buffer/page management specific to NILFS 14 #include <linux/page-flags.h> 20 #include "page.h" 29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, in __nilfs_get_page_block() argument 36 if (!page_has_buffers(page)) in __nilfs_get_page_block() 37 create_empty_buffers(page, 1 << blkbits, b_state); in __nilfs_get_page_block() 40 bh = nilfs_page_get_nth_block(page, block - first_block); in __nilfs_get_page_block() 54 struct page *page; in nilfs_grab_buffer() local 57 page = grab_cache_page(mapping, index); in nilfs_grab_buffer() 58 if (unlikely(!page)) in nilfs_grab_buffer() [all …]
|