Lines Matching +full:start +full:- +full:up

27 #include <asm/text-patching.h>
43 * WC and WT fall back to UC-. pat_init() updates these values to support
45 * for the details. Note, __early_ioremap() used during early boot-time
82 * Check that the write-protect PAT entry is set for write-protect.
83 * To do this without making assumptions how PAT has been set up (Xen has
116 * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
176 * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
205 unsigned long start; member
215 * up after us can get the correct flags. Invoked on the boot CPU.
250 /* Except when with PTI where the kernel is mostly non-Global: */ in probe_page_size_mask()
273 * This can't be cr4_set_bits_and_update_boot() -- the in setup_pcid()
280 * Instead, we brute-force it and set CR4.PCIDE manually in in setup_pcid()
286 * INVPCID's single-context modes (2/3) only work if we set in setup_pcid()
318 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr()
339 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() local
347 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
352 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() local
355 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
368 if (mr->page_size_mask & (1<<PG_LEVEL_1G)) in page_size_string()
371 * 32-bit without PAE has a 4M large page size. in page_size_string()
377 mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
380 if (mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
387 unsigned long start, in split_mem_range() argument
397 pfn = start_pfn = PFN_DOWN(start); in split_mem_range()
465 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { in split_mem_range()
467 if (mr[i].end != mr[i+1].start || in split_mem_range()
471 old_start = mr[i].start; in split_mem_range()
473 (nr_range - 1 - i) * sizeof(struct map_range)); in split_mem_range()
474 mr[i--].start = old_start; in split_mem_range()
475 nr_range--; in split_mem_range()
479 pr_debug(" [mem %#010lx-%#010lx] page %s\n", in split_mem_range()
480 mr[i].start, mr[i].end - 1, in split_mem_range()
497 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) in add_pfn_range_mapped()
499 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); in add_pfn_range_mapped()
507 if ((start_pfn >= pfn_mapped[i].start) && in pfn_range_is_mapped()
519 unsigned long __ref init_memory_mapping(unsigned long start, in init_memory_mapping() argument
526 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", in init_memory_mapping()
527 start, end - 1); in init_memory_mapping()
530 nr_range = split_mem_range(mr, 0, start, end); in init_memory_mapping()
533 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, in init_memory_mapping()
537 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); in init_memory_mapping()
564 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); in init_range_memory_mapping() local
566 if (start >= end) in init_range_memory_mapping()
573 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= in init_range_memory_mapping()
575 init_memory_mapping(start, end, PAGE_KERNEL); in init_range_memory_mapping()
576 mapped_ram_size += end - start; in init_range_memory_mapping()
593 * Don't need to worry about overflow in the top-down case, on 32bit, in get_new_step_size()
594 * when step_size is 0, round_down() returns 0 for start, and that in get_new_step_size()
596 * In the bottom-up case, round_up(x, 0) returns 0 though too, which in get_new_step_size()
599 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); in get_new_step_size()
603 * memory_map_top_down - Map [map_start, map_end) top down
604 * @map_start: start address of the target memory range
608 * [map_start, map_end) in top-down. That said, the page tables
610 * memory in top-down.
615 unsigned long real_end, start, last_start; in memory_map_top_down() local
628 last_start = start = real_end; in memory_map_top_down()
631 * We start from the top (end of memory) and go to the bottom. in memory_map_top_down()
638 start = round_down(last_start - 1, step_size); in memory_map_top_down()
639 if (start < map_start) in memory_map_top_down()
640 start = map_start; in memory_map_top_down()
642 start = map_start; in memory_map_top_down()
643 mapped_ram_size += init_range_memory_mapping(start, in memory_map_top_down()
645 last_start = start; in memory_map_top_down()
656 * memory_map_bottom_up - Map [map_start, map_end) bottom up
657 * @map_start: start address of the target memory range
661 * [map_start, map_end) in bottom-up. Since we have limited the
662 * bottom-up allocation above the kernel, the page tables will
664 * in [map_start, map_end) in bottom-up.
669 unsigned long next, start; in memory_map_bottom_up() local
674 start = map_start; in memory_map_bottom_up()
675 min_pfn_mapped = start >> PAGE_SHIFT; in memory_map_bottom_up()
678 * We start from the bottom (@map_start) and go to the top (@map_end). in memory_map_bottom_up()
683 while (start < map_end) { in memory_map_bottom_up()
684 if (step_size && map_end - start > step_size) { in memory_map_bottom_up()
685 next = round_up(start + 1, step_size); in memory_map_bottom_up()
692 mapped_ram_size += init_range_memory_mapping(start, next); in memory_map_bottom_up()
693 start = next; in memory_map_bottom_up()
709 * area. This limits the randomization granularity to 1GB for both 4-level
710 * and 5-level paging.
743 * If the allocation is in bottom-up direction, we setup direct mapping in init_mem_mapping()
744 * in bottom-up, otherwise we setup direct mapping in top-down. in init_mem_mapping()
799 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); in poking_init()
805 * We need to trigger the allocation of the page-tables that will be in poking_init()
823 * Access has to be given to non-kernel-ram areas as well, these contain the
874 * mark them not present - any buggy init-section access will in free_init_pages()
878 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", in free_init_pages()
879 begin, end - 1); in free_init_pages()
884 kmemleak_free_part((void *)begin, end - begin); in free_init_pages()
885 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
890 * writeable and non-executable first. in free_init_pages()
892 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
893 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
909 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; in free_kernel_image_pages()
943 void __init free_initrd_mem(unsigned long start, unsigned long end) in free_initrd_mem() argument
949 * - i386_start_kernel() in free_initrd_mem()
950 * - x86_64_start_kernel() in free_initrd_mem()
951 * - relocate_initrd() in free_initrd_mem()
954 free_init_pages("initrd", start, PAGE_ALIGN(end)); in free_initrd_mem()
960 * and pass it to the MM layer - to help it set zone watermarks more
963 * Done on 64-bit systems only for the time being, although 32-bit systems
984 nr_pages += end_pfn - start_pfn; in memblock_find_dma_reserve()
998 nr_free_pages += end_pfn - start_pfn; in memblock_find_dma_reserve()
1001 set_dma_reserve(nr_pages - nr_free_pages); in memblock_find_dma_reserve()
1028 return -EINVAL; in early_disable_dma32()
1040 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1045 /* entry 0 MUST be WB (hardwired to speed up translations) */ in update_cache_mode_entry()
1067 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; in max_swapfile_size()