Lines Matching full:pmd
74 DEFINE_POPULATE(pud_populate, pud, pmd, init)
75 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
89 DEFINE_ENTRY(pmd, pmd, init)
270 pmd_t *pmd = (pmd_t *) spp_getpage(); in fill_pmd() local
271 pud_populate(&init_mm, pud, pmd); in fill_pmd()
272 if (pmd != pmd_offset(pud, 0)) in fill_pmd()
274 pmd, pmd_offset(pud, 0)); in fill_pmd()
279 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) in fill_pte() argument
281 if (pmd_none(*pmd)) { in fill_pte()
283 pmd_populate_kernel(&init_mm, pmd, pte); in fill_pte()
284 if (pte != pte_offset_kernel(pmd, 0)) in fill_pte()
287 return pte_offset_kernel(pmd, vaddr); in fill_pte()
292 pmd_t *pmd = fill_pmd(pud, vaddr); in __set_pte_vaddr() local
293 pte_t *pte = fill_pte(pmd, vaddr); in __set_pte_vaddr()
351 pmd_t *pmd; in populate_extra_pte() local
353 pmd = populate_extra_pmd(vaddr); in populate_extra_pte()
354 return fill_pte(pmd, vaddr); in populate_extra_pte()
366 pmd_t *pmd; in __init_extra_mapping() local
387 pmd = (pmd_t *) spp_getpage(); in __init_extra_mapping()
388 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | in __init_extra_mapping()
391 pmd = pmd_offset(pud, phys); in __init_extra_mapping()
392 BUG_ON(!pmd_none(*pmd)); in __init_extra_mapping()
393 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); in __init_extra_mapping()
425 pmd_t *pmd = level2_kernel_pgt; in cleanup_highmap() local
435 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
436 if (pmd_none(*pmd)) in cleanup_highmap()
439 set_pmd(pmd, __pmd(0)); in cleanup_highmap()
497 * Create PMD level page table mapping for physical addresses. The virtual
511 pmd_t *pmd = pmd_page + pmd_index(paddr); in phys_pmd_init() local
522 set_pmd_init(pmd, __pmd(0), init); in phys_pmd_init()
526 if (!pmd_none(*pmd)) { in phys_pmd_init()
527 if (!pmd_large(*pmd)) { in phys_pmd_init()
529 pte = (pte_t *)pmd_page_vaddr(*pmd); in phys_pmd_init()
554 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); in phys_pmd_init()
560 set_pte_init((pte_t *)pmd, in phys_pmd_init()
573 pmd_populate_kernel_init(&init_mm, pmd, pte, init); in phys_pmd_init()
597 pmd_t *pmd; in phys_pud_init() local
616 pmd = pmd_offset(pud, 0); in phys_pud_init()
617 paddr_last = phys_pmd_init(pmd, paddr, in phys_pud_init()
659 pmd = alloc_low_page(); in phys_pud_init()
660 paddr_last = phys_pmd_init(pmd, paddr, paddr_end, in phys_pud_init()
664 pud_populate_init(&init_mm, pud, pmd, init); in phys_pud_init()
778 * The virtual and physical addresses have to be aligned on PMD level
792 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
905 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) in free_pte_table() argument
917 free_pagetable(pmd_page(*pmd), 0); in free_pte_table()
919 pmd_clear(pmd); in free_pte_table()
925 pmd_t *pmd; in free_pmd_table() local
929 pmd = pmd_start + i; in free_pmd_table()
930 if (!pmd_none(*pmd)) in free_pmd_table()
934 /* free a pmd talbe */ in free_pmd_table()
1036 pmd_t *pmd; in remove_pmd_table() local
1039 pmd = pmd_start + pmd_index(addr); in remove_pmd_table()
1040 for (; addr < end; addr = next, pmd++) { in remove_pmd_table()
1043 if (!pmd_present(*pmd)) in remove_pmd_table()
1046 if (pmd_large(*pmd)) { in remove_pmd_table()
1050 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1054 pmd_clear(pmd); in remove_pmd_table()
1061 page_addr = page_address(pmd_page(*pmd)); in remove_pmd_table()
1064 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1068 pmd_clear(pmd); in remove_pmd_table()
1076 pte_base = (pte_t *)pmd_page_vaddr(*pmd); in remove_pmd_table()
1078 free_pte_table(pte_base, pmd); in remove_pmd_table()
1347 * is a full PMD. If we would align _brk_end to PAGE_SIZE we in mark_rodata_ro()
1348 * split the PMD and the reminder between _brk_end and the end in mark_rodata_ro()
1349 * of the PMD will remain mapped executable. in mark_rodata_ro()
1351 * Any PMD which was setup after the one which covers _brk_end in mark_rodata_ro()
1381 pmd_t *pmd; in kern_addr_valid() local
1402 pmd = pmd_offset(pud, addr); in kern_addr_valid()
1403 if (!pmd_present(*pmd)) in kern_addr_valid()
1406 if (pmd_large(*pmd)) in kern_addr_valid()
1407 return pfn_valid(pmd_pfn(*pmd)); in kern_addr_valid()
1409 pte = pte_offset_kernel(pmd, addr); in kern_addr_valid()
1486 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1500 pmd_t *pmd; in vmemmap_populate_hugepages() local
1517 pmd = pmd_offset(pud, addr); in vmemmap_populate_hugepages()
1518 if (pmd_none(*pmd)) { in vmemmap_populate_hugepages()
1527 set_pmd(pmd, __pmd(pte_val(entry))); in vmemmap_populate_hugepages()
1532 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_populate_hugepages()
1544 } else if (pmd_large(*pmd)) { in vmemmap_populate_hugepages()
1545 vmemmap_verify((pte_t *)pmd, node, addr, next); in vmemmap_populate_hugepages()
1584 pmd_t *pmd; in register_page_bootmem_memmap() local
1614 pmd = pmd_offset(pud, addr); in register_page_bootmem_memmap()
1615 if (pmd_none(*pmd)) in register_page_bootmem_memmap()
1617 get_page_bootmem(section_nr, pmd_page(*pmd), in register_page_bootmem_memmap()
1620 pte = pte_offset_kernel(pmd, addr); in register_page_bootmem_memmap()
1628 pmd = pmd_offset(pud, addr); in register_page_bootmem_memmap()
1629 if (pmd_none(*pmd)) in register_page_bootmem_memmap()
1633 page = pmd_page(*pmd); in register_page_bootmem_memmap()
1645 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_populate_print_last()