1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> 4*4882a593Smuzhiyun */ 5*4882a593Smuzhiyun 6*4882a593Smuzhiyun #include <linux/slab.h> 7*4882a593Smuzhiyun #include <linux/sched.h> 8*4882a593Smuzhiyun #include <linux/mm.h> 9*4882a593Smuzhiyun #include <asm/page.h> 10*4882a593Smuzhiyun #include <asm/elf.h> 11*4882a593Smuzhiyun #include <linux/init.h> 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun static unsigned int __read_mostly vdso_enabled = 1; 14*4882a593Smuzhiyun unsigned long um_vdso_addr; 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun extern unsigned long task_size; 17*4882a593Smuzhiyun extern char vdso_start[], vdso_end[]; 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun static struct page **vdsop; 20*4882a593Smuzhiyun init_vdso(void)21*4882a593Smuzhiyunstatic int __init init_vdso(void) 22*4882a593Smuzhiyun { 23*4882a593Smuzhiyun struct page *um_vdso; 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun BUG_ON(vdso_end - vdso_start > PAGE_SIZE); 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun um_vdso_addr = task_size - PAGE_SIZE; 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL); 30*4882a593Smuzhiyun if (!vdsop) 31*4882a593Smuzhiyun goto oom; 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun um_vdso = alloc_page(GFP_KERNEL); 34*4882a593Smuzhiyun if (!um_vdso) { 35*4882a593Smuzhiyun kfree(vdsop); 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun goto oom; 38*4882a593Smuzhiyun } 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun copy_page(page_address(um_vdso), vdso_start); 41*4882a593Smuzhiyun *vdsop = um_vdso; 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun return 0; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun oom: 46*4882a593Smuzhiyun printk(KERN_ERR "Cannot allocate vdso\n"); 47*4882a593Smuzhiyun vdso_enabled = 0; 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun return -ENOMEM; 50*4882a593Smuzhiyun } 51*4882a593Smuzhiyun subsys_initcall(init_vdso); 52*4882a593Smuzhiyun arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)53*4882a593Smuzhiyunint arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 54*4882a593Smuzhiyun { 55*4882a593Smuzhiyun int err; 56*4882a593Smuzhiyun struct mm_struct *mm = current->mm; 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun if (!vdso_enabled) 59*4882a593Smuzhiyun return 0; 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun if (mmap_write_lock_killable(mm)) 62*4882a593Smuzhiyun return -EINTR; 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, 65*4882a593Smuzhiyun VM_READ|VM_EXEC| 66*4882a593Smuzhiyun VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 67*4882a593Smuzhiyun vdsop); 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun mmap_write_unlock(mm); 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun return err; 72*4882a593Smuzhiyun } 73