Home
last modified time | relevance | path

Searched full:pmd (Results 1 – 25 of 456) sorted by relevance

12345678910>>...19

/OK3568_Linux_fs/kernel/drivers/md/
H A Ddm-thin-metadata.c230 struct dm_pool_metadata *pmd; member
390 static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd) in pmd_write_lock_in_core() argument
391 __acquires(pmd->root_lock) in pmd_write_lock_in_core()
393 down_write(&pmd->root_lock); in pmd_write_lock_in_core()
396 static inline void pmd_write_lock(struct dm_pool_metadata *pmd) in pmd_write_lock() argument
398 pmd_write_lock_in_core(pmd); in pmd_write_lock()
399 if (unlikely(!pmd->in_service)) in pmd_write_lock()
400 pmd->in_service = true; in pmd_write_lock()
403 static inline void pmd_write_unlock(struct dm_pool_metadata *pmd) in pmd_write_unlock() argument
404 __releases(pmd->root_lock) in pmd_write_unlock()
[all …]
H A Ddm-thin-metadata.h48 int dm_pool_metadata_close(struct dm_pool_metadata *pmd);
61 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev);
69 int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev,
77 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
84 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd);
94 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd);
99 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
103 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
115 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
116 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
[all …]
/OK3568_Linux_fs/kernel/arch/arc/include/asm/
H A Dhugepage.h13 static inline pte_t pmd_pte(pmd_t pmd) in pmd_pte() argument
15 return __pte(pmd_val(pmd)); in pmd_pte()
23 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) argument
24 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) argument
25 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) argument
26 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) argument
27 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) argument
28 #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) argument
29 #define pmd_mkinvalid(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) argument
30 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) argument
[all …]
/OK3568_Linux_fs/kernel/arch/arm/include/asm/
H A Dpgtable-3level.h114 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ argument
116 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ argument
118 #define pmd_large(pmd) pmd_sect(pmd) argument
119 #define pmd_leaf(pmd) pmd_sect(pmd) argument
138 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) argument
174 #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \ argument
175 : !!(pmd_val(pmd) & (val)))
176 #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) argument
178 #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) argument
179 #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) argument
[all …]
/OK3568_Linux_fs/kernel/arch/mips/include/asm/
H A Dpgtable.h92 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) argument
94 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) argument
96 #define pmd_page(pmd) __pmd_page(pmd) argument
99 #define pmd_page_vaddr(pmd) pmd_val(pmd) argument
585 static inline int pmd_trans_huge(pmd_t pmd) argument
587 return !!(pmd_val(pmd) & _PAGE_HUGE);
590 static inline pmd_t pmd_mkhuge(pmd_t pmd) argument
592 pmd_val(pmd) |= _PAGE_HUGE;
594 return pmd;
598 pmd_t *pmdp, pmd_t pmd);
[all …]
H A Dpgalloc.h21 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, in pmd_populate_kernel() argument
24 set_pmd(pmd, __pmd((unsigned long)pte)); in pmd_populate_kernel()
27 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
30 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); in pmd_populate()
32 #define pmd_pgtable(pmd) pmd_page(pmd) argument
35 * Initialize a new pmd table with invalid pointers.
41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
43 set_pud(pud, __pud((unsigned long)pmd)); in pud_populate()
48 * Initialize a new pgd / pmd table with invalid pointers.
68 pmd_t *pmd; in pmd_alloc_one() local
[all …]
/OK3568_Linux_fs/kernel/mm/
H A Ddebug_vm_pgtable.c152 pmd_t pmd; in pmd_basic_tests() local
157 pr_debug("Validating PMD basic (%pGv)\n", ptr); in pmd_basic_tests()
158 pmd = pfn_pmd(pfn, prot); in pmd_basic_tests()
167 WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); in pmd_basic_tests()
170 WARN_ON(!pmd_same(pmd, pmd)); in pmd_basic_tests()
171 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); in pmd_basic_tests()
172 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); in pmd_basic_tests()
173 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); in pmd_basic_tests()
174 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); in pmd_basic_tests()
175 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); in pmd_basic_tests()
[all …]
H A Dhuge_memory.c389 * DAX PMD support. in hugepage_init()
479 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
482 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
483 return pmd; in maybe_pmd_mkwrite()
613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
614 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
696 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
700 if (!pmd_none(*pmd)) in set_huge_zero_page()
[all …]
/OK3568_Linux_fs/kernel/arch/arm64/include/asm/
H A Dpgtable.h145 #define pmd_access_permitted(pmd, write) \ argument
146 (pte_access_permitted(pmd_pte(pmd), (write)))
162 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) in clear_pmd_bit() argument
164 pmd_val(pmd) &= ~pgprot_val(prot); in clear_pmd_bit()
165 return pmd; in clear_pmd_bit()
168 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) in set_pmd_bit() argument
170 pmd_val(pmd) |= pgprot_val(prot); in set_pmd_bit()
171 return pmd; in set_pmd_bit()
244 static inline pmd_t pmd_mkcont(pmd_t pmd) in pmd_mkcont() argument
246 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); in pmd_mkcont()
[all …]
/OK3568_Linux_fs/kernel/arch/x86/include/asm/
H A Dpgtable.h31 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
70 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) argument
94 #define pmd_clear(pmd) native_pmd_clear(pmd) argument
163 static inline int pmd_dirty(pmd_t pmd) in pmd_dirty() argument
165 return pmd_flags(pmd) & _PAGE_DIRTY; in pmd_dirty()
168 static inline int pmd_young(pmd_t pmd) in pmd_young() argument
170 return pmd_flags(pmd) & _PAGE_ACCESSED; in pmd_young()
219 static inline unsigned long pmd_pfn(pmd_t pmd) in pmd_pfn() argument
221 phys_addr_t pfn = pmd_val(pmd); in pmd_pfn()
223 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; in pmd_pfn()
[all …]
H A Dpgtable-3level.h18 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
46 * because GCC will not read the 64-bit value of the pmd atomically.
50 * function to know if the pmd is null or not, and in turn to know if
51 * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
54 * Without THP if the mmap_lock is held for reading, the pmd can only
56 * we can always return atomic pmd values with this function.
58 * With THP if the mmap_lock is held for reading, the pmd can become
65 * 'none' (zero) pmdval if the low part of the pmd is zero.
71 * needs the low part of the pmd to be read atomically to decide if the
72 * pmd is unstable or not, with the only exception when the low part
[all …]
H A Dpgalloc.h65 pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
68 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in pmd_populate_kernel()
72 pmd_t *pmd, pte_t *pte) in pmd_populate_kernel_safe() argument
75 set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in pmd_populate_kernel_safe()
78 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, in pmd_populate() argument
84 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); in pmd_populate()
87 #define pmd_pgtable(pmd) pmd_page(pmd) argument
90 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
92 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, in __pmd_free_tlb() argument
95 ___pmd_free_tlb(tlb, pmd); in __pmd_free_tlb()
[all …]
/OK3568_Linux_fs/kernel/arch/x86/mm/
H A Dpgtable.c61 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) in ___pmd_free_tlb() argument
63 struct page *page = virt_to_page(pmd); in ___pmd_free_tlb()
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); in ___pmd_free_tlb()
126 ptes in non-PAE, or shared PMD in PAE), then just copy the in pgd_ctor()
156 * kernel pmd is shared. If PAE were not to share the pmd a similar
172 * Also, if we're in a paravirt environment where the kernel pmd is
188 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) in pud_populate() argument
190 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate()
193 reserved at the pmd (PDPT) level. */ in pud_populate()
194 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); in pud_populate()
[all …]
H A Dinit_64.c74 DEFINE_POPULATE(pud_populate, pud, pmd, init)
75 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
89 DEFINE_ENTRY(pmd, pmd, init)
270 pmd_t *pmd = (pmd_t *) spp_getpage(); in fill_pmd() local
271 pud_populate(&init_mm, pud, pmd); in fill_pmd()
272 if (pmd != pmd_offset(pud, 0)) in fill_pmd()
274 pmd, pmd_offset(pud, 0)); in fill_pmd()
279 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) in fill_pte() argument
281 if (pmd_none(*pmd)) { in fill_pte()
283 pmd_populate_kernel(&init_mm, pmd, pte); in fill_pte()
[all …]
/OK3568_Linux_fs/kernel/mm/kasan/
H A Dinit.c70 static inline bool kasan_pte_table(pmd_t pmd) in kasan_pte_table() argument
72 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); in kasan_pte_table()
92 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, in zero_pte_populate() argument
95 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
105 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
112 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate() local
119 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate()
124 if (pmd_none(*pmd)) { in zero_pmd_populate()
134 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate()
136 zero_pte_populate(pmd, addr, next); in zero_pmd_populate()
[all …]
/OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/64/
H A Dpgtable.h47 * We need to mark a pmd pte invalid while splitting. We can do that by clearing
194 /* pmd table use page table fragments */
249 /* Bits to mask out from a PMD to get to the PTE page */
251 /* Bits to mask out from a PUD to get to the PMD page */
707 * This is potentially called with a pmd as the argument, in which case it's not
752 #define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd))) argument
891 static inline int pmd_none(pmd_t pmd) in pmd_none() argument
893 return !pmd_raw(pmd); in pmd_none()
896 static inline int pmd_present(pmd_t pmd) in pmd_present() argument
899 * A pmd is considerent present if _PAGE_PRESENT is set. in pmd_present()
[all …]
/OK3568_Linux_fs/kernel/arch/parisc/include/asm/
H A Dpgalloc.h39 /* Three Level Page Table Support for pmd's */
41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) in pud_populate() argument
44 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); in pud_populate()
49 pmd_t *pmd; in pmd_alloc_one() local
51 pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); in pmd_alloc_one()
52 if (likely(pmd)) in pmd_alloc_one()
53 memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); in pmd_alloc_one()
54 return pmd; in pmd_alloc_one()
57 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
59 free_pages((unsigned long)pmd, PMD_ORDER); in pmd_free()
[all …]
/OK3568_Linux_fs/kernel/arch/s390/include/asm/
H A Dpgtable.h72 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
605 * pgd/p4d/pud/pmd/pte query functions
696 static inline int pmd_large(pmd_t pmd) in pmd_large() argument
698 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; in pmd_large()
701 static inline int pmd_bad(pmd_t pmd) in pmd_bad() argument
703 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) in pmd_bad()
705 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; in pmd_bad()
730 static inline int pmd_present(pmd_t pmd) in pmd_present() argument
732 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; in pmd_present()
735 static inline int pmd_none(pmd_t pmd) in pmd_none() argument
[all …]
/OK3568_Linux_fs/kernel/include/linux/
H A Dpgtable.h18 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
71 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) in pte_offset_kernel() argument
73 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); in pte_offset_kernel()
126 * In many cases it is known that a virtual address is mapped at PMD or PTE
128 * pointer to the PMD entry in user or kernel page table or translate a virtual
144 pmd_t *pmd = pmd_off_k(vaddr); in virt_to_kpte() local
146 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); in virt_to_kpte()
202 pmd_t pmd = *pmdp; in pmdp_test_and_clear_young() local
204 if (!pmd_young(pmd)) in pmdp_test_and_clear_young()
207 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
[all …]
/OK3568_Linux_fs/kernel/arch/arm/mm/
H A Didmap.c27 pmd_t *pmd; in idmap_add_pmd() local
31 pmd = pmd_alloc_one(&init_mm, addr); in idmap_add_pmd()
32 if (!pmd) { in idmap_add_pmd()
33 pr_warn("Failed to allocate identity pmd.\n"); in idmap_add_pmd()
37 * Copy the original PMD to ensure that the PMD entries for in idmap_add_pmd()
41 memcpy(pmd, pmd_offset(pud, 0), in idmap_add_pmd()
43 pud_populate(&init_mm, pud, pmd); in idmap_add_pmd()
44 pmd += pmd_index(addr); in idmap_add_pmd()
46 pmd = pmd_offset(pud, addr); in idmap_add_pmd()
50 *pmd = __pmd((addr & PMD_MASK) | prot); in idmap_add_pmd()
[all …]
/OK3568_Linux_fs/kernel/drivers/soc/bcm/bcm63xx/
H A Dbcm63xx-power.c39 static int bcm63xx_power_get_state(struct bcm63xx_power_dev *pmd, bool *is_on) in bcm63xx_power_get_state() argument
41 struct bcm63xx_power *power = pmd->power; in bcm63xx_power_get_state()
43 if (!pmd->mask) { in bcm63xx_power_get_state()
48 *is_on = !(__raw_readl(power->base) & pmd->mask); in bcm63xx_power_get_state()
53 static int bcm63xx_power_set_state(struct bcm63xx_power_dev *pmd, bool on) in bcm63xx_power_set_state() argument
55 struct bcm63xx_power *power = pmd->power; in bcm63xx_power_set_state()
59 if (!pmd->mask) in bcm63xx_power_set_state()
65 val &= ~pmd->mask; in bcm63xx_power_set_state()
67 val |= pmd->mask; in bcm63xx_power_set_state()
76 struct bcm63xx_power_dev *pmd = container_of(genpd, in bcm63xx_power_on() local
[all …]
/OK3568_Linux_fs/kernel/arch/sparc/include/asm/
H A Dpgalloc_64.h36 static inline void __pud_populate(pud_t *pud, pmd_t *pmd) in __pud_populate() argument
38 pud_set(pud, pmd); in __pud_populate()
41 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) argument
58 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
60 kmem_cache_free(pgtable_cache, pmd); in pmd_free()
68 #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE) argument
69 #define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) argument
70 #define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD)) argument
109 #define __pmd_free_tlb(tlb, pmd, addr) \ argument
110 pgtable_free_tlb(tlb, pmd, false)
H A Dpgalloc_32.h36 #define pud_populate(MM, PGD, PMD) pud_set(PGD, PMD) argument
45 static inline void free_pmd_fast(pmd_t * pmd) in free_pmd_fast() argument
47 srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE); in free_pmd_fast()
50 #define pmd_free(mm, pmd) free_pmd_fast(pmd) argument
51 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) argument
53 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte) argument
54 #define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd) argument
/OK3568_Linux_fs/kernel/arch/x86/power/
H A Dhibernate_32.c59 static pte_t *resume_one_page_table_init(pmd_t *pmd) in resume_one_page_table_init() argument
61 if (pmd_none(*pmd)) { in resume_one_page_table_init()
66 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); in resume_one_page_table_init()
68 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); in resume_one_page_table_init()
73 return pte_offset_kernel(pmd, 0); in resume_one_page_table_init()
85 pmd_t *pmd; in resume_physical_mapping_init() local
94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init()
95 if (!pmd) in resume_physical_mapping_init()
101 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { in resume_physical_mapping_init()
110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); in resume_physical_mapping_init()
[all …]
/OK3568_Linux_fs/kernel/arch/s390/mm/
H A Dvmem.c133 * consecutive sections. Remember for the last added PMD the last in vmemmap_use_new_sub_pmd()
134 * unused range in the populated PMD. in vmemmap_use_new_sub_pmd()
140 /* Returns true if the PMD is completely unused and can be freed. */
151 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, in modify_pte_table() argument
162 pte = pte_offset_kernel(pmd, addr); in modify_pte_table()
192 static void try_free_pte_table(pmd_t *pmd, unsigned long start) in try_free_pte_table() argument
198 pte = pte_offset_kernel(pmd, start); in try_free_pte_table()
203 vmem_pte_free(__va(pmd_deref(*pmd))); in try_free_pte_table()
204 pmd_clear(pmd); in try_free_pte_table()
213 pmd_t *pmd; in modify_pmd_table() local
[all …]

12345678910>>...19