1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * MIPS Huge TLB Page Support for Kernel.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9*4882a593Smuzhiyun * Copyright 2005, Embedded Alley Solutions, Inc.
10*4882a593Smuzhiyun * Matt Porter <mporter@embeddedalley.com>
11*4882a593Smuzhiyun * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/fs.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/pagemap.h>
18*4882a593Smuzhiyun #include <linux/err.h>
19*4882a593Smuzhiyun #include <linux/sysctl.h>
20*4882a593Smuzhiyun #include <asm/mman.h>
21*4882a593Smuzhiyun #include <asm/tlb.h>
22*4882a593Smuzhiyun #include <asm/tlbflush.h>
23*4882a593Smuzhiyun
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)24*4882a593Smuzhiyun pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
25*4882a593Smuzhiyun unsigned long addr, unsigned long sz)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun pgd_t *pgd;
28*4882a593Smuzhiyun p4d_t *p4d;
29*4882a593Smuzhiyun pud_t *pud;
30*4882a593Smuzhiyun pte_t *pte = NULL;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
33*4882a593Smuzhiyun p4d = p4d_alloc(mm, pgd, addr);
34*4882a593Smuzhiyun pud = pud_alloc(mm, p4d, addr);
35*4882a593Smuzhiyun if (pud)
36*4882a593Smuzhiyun pte = (pte_t *)pmd_alloc(mm, pud, addr);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return pte;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)41*4882a593Smuzhiyun pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
42*4882a593Smuzhiyun unsigned long sz)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun pgd_t *pgd;
45*4882a593Smuzhiyun p4d_t *p4d;
46*4882a593Smuzhiyun pud_t *pud;
47*4882a593Smuzhiyun pmd_t *pmd = NULL;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
50*4882a593Smuzhiyun if (pgd_present(*pgd)) {
51*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
52*4882a593Smuzhiyun if (p4d_present(*p4d)) {
53*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
54*4882a593Smuzhiyun if (pud_present(*pud))
55*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun return (pte_t *) pmd;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * This function checks for proper alignment of input addr and len parameters.
63*4882a593Smuzhiyun */
is_aligned_hugepage_range(unsigned long addr,unsigned long len)64*4882a593Smuzhiyun int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun if (len & ~HPAGE_MASK)
67*4882a593Smuzhiyun return -EINVAL;
68*4882a593Smuzhiyun if (addr & ~HPAGE_MASK)
69*4882a593Smuzhiyun return -EINVAL;
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
pmd_huge(pmd_t pmd)73*4882a593Smuzhiyun int pmd_huge(pmd_t pmd)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return (pmd_val(pmd) & _PAGE_HUGE) != 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
pud_huge(pud_t pud)78*4882a593Smuzhiyun int pud_huge(pud_t pud)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun return (pud_val(pud) & _PAGE_HUGE) != 0;
81*4882a593Smuzhiyun }
82