xref: /OK3568_Linux_fs/kernel/arch/powerpc/oprofile/cell/vma_map.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Cell Broadband Engine OProfile Support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) Copyright IBM Corporation 2006
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Maynard Johnson <maynardj@us.ibm.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* The code in this source file is responsible for generating
11*4882a593Smuzhiyun  * vma-to-fileOffset maps for both overlay and non-overlay SPU
12*4882a593Smuzhiyun  * applications.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/string.h>
17*4882a593Smuzhiyun #include <linux/uaccess.h>
18*4882a593Smuzhiyun #include <linux/elf.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include "pr_util.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 
vma_map_free(struct vma_to_fileoffset_map * map)23*4882a593Smuzhiyun void vma_map_free(struct vma_to_fileoffset_map *map)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	while (map) {
26*4882a593Smuzhiyun 		struct vma_to_fileoffset_map *next = map->next;
27*4882a593Smuzhiyun 		kfree(map);
28*4882a593Smuzhiyun 		map = next;
29*4882a593Smuzhiyun 	}
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun unsigned int
vma_map_lookup(struct vma_to_fileoffset_map * map,unsigned int vma,const struct spu * aSpu,int * grd_val)33*4882a593Smuzhiyun vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
34*4882a593Smuzhiyun 	       const struct spu *aSpu, int *grd_val)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * Default the offset to the physical address + a flag value.
38*4882a593Smuzhiyun 	 * Addresses of dynamically generated code can't be found in the vma
39*4882a593Smuzhiyun 	 * map.  For those addresses the flagged value will be sent on to
40*4882a593Smuzhiyun 	 * the user space tools so they can be reported rather than just
41*4882a593Smuzhiyun 	 * thrown away.
42*4882a593Smuzhiyun 	 */
43*4882a593Smuzhiyun 	u32 offset = 0x10000000 + vma;
44*4882a593Smuzhiyun 	u32 ovly_grd;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	for (; map; map = map->next) {
47*4882a593Smuzhiyun 		if (vma < map->vma || vma >= map->vma + map->size)
48*4882a593Smuzhiyun 			continue;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 		if (map->guard_ptr) {
51*4882a593Smuzhiyun 			ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
52*4882a593Smuzhiyun 			if (ovly_grd != map->guard_val)
53*4882a593Smuzhiyun 				continue;
54*4882a593Smuzhiyun 			*grd_val = ovly_grd;
55*4882a593Smuzhiyun 		}
56*4882a593Smuzhiyun 		offset = vma - map->vma + map->offset;
57*4882a593Smuzhiyun 		break;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	return offset;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun static struct vma_to_fileoffset_map *
vma_map_add(struct vma_to_fileoffset_map * map,unsigned int vma,unsigned int size,unsigned int offset,unsigned int guard_ptr,unsigned int guard_val)64*4882a593Smuzhiyun vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
65*4882a593Smuzhiyun 	    unsigned int size, unsigned int offset, unsigned int guard_ptr,
66*4882a593Smuzhiyun 	    unsigned int guard_val)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (!new) {
71*4882a593Smuzhiyun 		printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
72*4882a593Smuzhiyun 		       __func__, __LINE__);
73*4882a593Smuzhiyun 		vma_map_free(map);
74*4882a593Smuzhiyun 		return NULL;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	new->next = map;
78*4882a593Smuzhiyun 	new->vma = vma;
79*4882a593Smuzhiyun 	new->size = size;
80*4882a593Smuzhiyun 	new->offset = offset;
81*4882a593Smuzhiyun 	new->guard_ptr = guard_ptr;
82*4882a593Smuzhiyun 	new->guard_val = guard_val;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return new;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Parse SPE ELF header and generate a list of vma_maps.
89*4882a593Smuzhiyun  * A pointer to the first vma_map in the generated list
90*4882a593Smuzhiyun  * of vma_maps is returned.  */
create_vma_map(const struct spu * aSpu,unsigned long __spu_elf_start)91*4882a593Smuzhiyun struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
92*4882a593Smuzhiyun 					     unsigned long __spu_elf_start)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	static const unsigned char expected[EI_PAD] = {
95*4882a593Smuzhiyun 		[EI_MAG0] = ELFMAG0,
96*4882a593Smuzhiyun 		[EI_MAG1] = ELFMAG1,
97*4882a593Smuzhiyun 		[EI_MAG2] = ELFMAG2,
98*4882a593Smuzhiyun 		[EI_MAG3] = ELFMAG3,
99*4882a593Smuzhiyun 		[EI_CLASS] = ELFCLASS32,
100*4882a593Smuzhiyun 		[EI_DATA] = ELFDATA2MSB,
101*4882a593Smuzhiyun 		[EI_VERSION] = EV_CURRENT,
102*4882a593Smuzhiyun 		[EI_OSABI] = ELFOSABI_NONE
103*4882a593Smuzhiyun 	};
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	int grd_val;
106*4882a593Smuzhiyun 	struct vma_to_fileoffset_map *map = NULL;
107*4882a593Smuzhiyun 	void __user *spu_elf_start = (void __user *)__spu_elf_start;
108*4882a593Smuzhiyun 	struct spu_overlay_info ovly;
109*4882a593Smuzhiyun 	unsigned int overlay_tbl_offset = -1;
110*4882a593Smuzhiyun 	Elf32_Phdr __user *phdr_start;
111*4882a593Smuzhiyun 	Elf32_Shdr __user *shdr_start;
112*4882a593Smuzhiyun 	Elf32_Ehdr ehdr;
113*4882a593Smuzhiyun 	Elf32_Phdr phdr;
114*4882a593Smuzhiyun 	Elf32_Shdr shdr, shdr_str;
115*4882a593Smuzhiyun 	Elf32_Sym sym;
116*4882a593Smuzhiyun 	int i, j;
117*4882a593Smuzhiyun 	char name[32];
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	unsigned int ovly_table_sym = 0;
120*4882a593Smuzhiyun 	unsigned int ovly_buf_table_sym = 0;
121*4882a593Smuzhiyun 	unsigned int ovly_table_end_sym = 0;
122*4882a593Smuzhiyun 	unsigned int ovly_buf_table_end_sym = 0;
123*4882a593Smuzhiyun 	struct spu_overlay_info __user *ovly_table;
124*4882a593Smuzhiyun 	unsigned int n_ovlys;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Get and validate ELF header.	 */
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
129*4882a593Smuzhiyun 		goto fail;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
132*4882a593Smuzhiyun 		printk(KERN_ERR "SPU_PROF: "
133*4882a593Smuzhiyun 		       "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
134*4882a593Smuzhiyun 		       __func__, __LINE__);
135*4882a593Smuzhiyun 		goto fail;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 	if (ehdr.e_machine != EM_SPU) {
138*4882a593Smuzhiyun 		printk(KERN_ERR "SPU_PROF: "
139*4882a593Smuzhiyun 		       "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
140*4882a593Smuzhiyun 		       __func__,  __LINE__);
141*4882a593Smuzhiyun 		goto fail;
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 	if (ehdr.e_type != ET_EXEC) {
144*4882a593Smuzhiyun 		printk(KERN_ERR "SPU_PROF: "
145*4882a593Smuzhiyun 		       "%s, line %d: Unexpected e_type parsing SPU ELF\n",
146*4882a593Smuzhiyun 		       __func__, __LINE__);
147*4882a593Smuzhiyun 		goto fail;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 	phdr_start = spu_elf_start + ehdr.e_phoff;
150*4882a593Smuzhiyun 	shdr_start = spu_elf_start + ehdr.e_shoff;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Traverse program headers.  */
153*4882a593Smuzhiyun 	for (i = 0; i < ehdr.e_phnum; i++) {
154*4882a593Smuzhiyun 		if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
155*4882a593Smuzhiyun 			goto fail;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		if (phdr.p_type != PT_LOAD)
158*4882a593Smuzhiyun 			continue;
159*4882a593Smuzhiyun 		if (phdr.p_flags & (1 << 27))
160*4882a593Smuzhiyun 			continue;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
163*4882a593Smuzhiyun 				  phdr.p_offset, 0, 0);
164*4882a593Smuzhiyun 		if (!map)
165*4882a593Smuzhiyun 			goto fail;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	pr_debug("SPU_PROF: Created non-overlay maps\n");
169*4882a593Smuzhiyun 	/* Traverse section table and search for overlay-related symbols.  */
170*4882a593Smuzhiyun 	for (i = 0; i < ehdr.e_shnum; i++) {
171*4882a593Smuzhiyun 		if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
172*4882a593Smuzhiyun 			goto fail;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		if (shdr.sh_type != SHT_SYMTAB)
175*4882a593Smuzhiyun 			continue;
176*4882a593Smuzhiyun 		if (shdr.sh_entsize != sizeof (sym))
177*4882a593Smuzhiyun 			continue;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		if (copy_from_user(&shdr_str,
180*4882a593Smuzhiyun 				   shdr_start + shdr.sh_link,
181*4882a593Smuzhiyun 				   sizeof(shdr)))
182*4882a593Smuzhiyun 			goto fail;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		if (shdr_str.sh_type != SHT_STRTAB)
185*4882a593Smuzhiyun 			goto fail;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
188*4882a593Smuzhiyun 			if (copy_from_user(&sym, spu_elf_start +
189*4882a593Smuzhiyun 						 shdr.sh_offset +
190*4882a593Smuzhiyun 						 j * sizeof (sym),
191*4882a593Smuzhiyun 					   sizeof (sym)))
192*4882a593Smuzhiyun 				goto fail;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 			if (copy_from_user(name,
195*4882a593Smuzhiyun 					   spu_elf_start + shdr_str.sh_offset +
196*4882a593Smuzhiyun 					   sym.st_name,
197*4882a593Smuzhiyun 					   20))
198*4882a593Smuzhiyun 				goto fail;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 			if (memcmp(name, "_ovly_table", 12) == 0)
201*4882a593Smuzhiyun 				ovly_table_sym = sym.st_value;
202*4882a593Smuzhiyun 			if (memcmp(name, "_ovly_buf_table", 16) == 0)
203*4882a593Smuzhiyun 				ovly_buf_table_sym = sym.st_value;
204*4882a593Smuzhiyun 			if (memcmp(name, "_ovly_table_end", 16) == 0)
205*4882a593Smuzhiyun 				ovly_table_end_sym = sym.st_value;
206*4882a593Smuzhiyun 			if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
207*4882a593Smuzhiyun 				ovly_buf_table_end_sym = sym.st_value;
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* If we don't have overlays, we're done.  */
212*4882a593Smuzhiyun 	if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
213*4882a593Smuzhiyun 	    || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
214*4882a593Smuzhiyun 		pr_debug("SPU_PROF: No overlay table found\n");
215*4882a593Smuzhiyun 		goto out;
216*4882a593Smuzhiyun 	} else {
217*4882a593Smuzhiyun 		pr_debug("SPU_PROF: Overlay table found\n");
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* The _ovly_table symbol represents a table with one entry
221*4882a593Smuzhiyun 	 * per overlay section.	 The _ovly_buf_table symbol represents
222*4882a593Smuzhiyun 	 * a table with one entry per overlay region.
223*4882a593Smuzhiyun 	 * The struct spu_overlay_info gives the structure of the _ovly_table
224*4882a593Smuzhiyun 	 * entries.  The structure of _ovly_table_buf is simply one
225*4882a593Smuzhiyun 	 * u32 word per entry.
226*4882a593Smuzhiyun 	 */
227*4882a593Smuzhiyun 	overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
228*4882a593Smuzhiyun 					    aSpu, &grd_val);
229*4882a593Smuzhiyun 	if (overlay_tbl_offset > 0x10000000) {
230*4882a593Smuzhiyun 		printk(KERN_ERR "SPU_PROF: "
231*4882a593Smuzhiyun 		       "%s, line %d: Error finding SPU overlay table\n",
232*4882a593Smuzhiyun 		       __func__, __LINE__);
233*4882a593Smuzhiyun 		goto fail;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 	ovly_table = spu_elf_start + overlay_tbl_offset;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	n_ovlys = (ovly_table_end_sym -
238*4882a593Smuzhiyun 		   ovly_table_sym) / sizeof (ovly);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* Traverse overlay table.  */
241*4882a593Smuzhiyun 	for (i = 0; i < n_ovlys; i++) {
242*4882a593Smuzhiyun 		if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
243*4882a593Smuzhiyun 			goto fail;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		/* The ovly.vma/size/offset arguments are analogous to the same
246*4882a593Smuzhiyun 		 * arguments used above for non-overlay maps.  The final two
247*4882a593Smuzhiyun 		 * args are referred to as the guard pointer and the guard
248*4882a593Smuzhiyun 		 * value.
249*4882a593Smuzhiyun 		 * The guard pointer is an entry in the _ovly_buf_table,
250*4882a593Smuzhiyun 		 * computed using ovly.buf as the index into the table.	 Since
251*4882a593Smuzhiyun 		 * ovly.buf values begin at '1' to reference the first (or 0th)
252*4882a593Smuzhiyun 		 * entry in the _ovly_buf_table, the computation subtracts 1
253*4882a593Smuzhiyun 		 * from ovly.buf.
254*4882a593Smuzhiyun 		 * The guard value is stored in the _ovly_buf_table entry and
255*4882a593Smuzhiyun 		 * is an index (starting at 1) back to the _ovly_table entry
256*4882a593Smuzhiyun 		 * that is pointing at this _ovly_buf_table entry.  So, for
257*4882a593Smuzhiyun 		 * example, for an overlay scenario with one overlay segment
258*4882a593Smuzhiyun 		 * and two overlay sections:
259*4882a593Smuzhiyun 		 *	- Section 1 points to the first entry of the
260*4882a593Smuzhiyun 		 *	  _ovly_buf_table, which contains a guard value
261*4882a593Smuzhiyun 		 *	  of '1', referencing the first (index=0) entry of
262*4882a593Smuzhiyun 		 *	  _ovly_table.
263*4882a593Smuzhiyun 		 *	- Section 2 points to the second entry of the
264*4882a593Smuzhiyun 		 *	  _ovly_buf_table, which contains a guard value
265*4882a593Smuzhiyun 		 *	  of '2', referencing the second (index=1) entry of
266*4882a593Smuzhiyun 		 *	  _ovly_table.
267*4882a593Smuzhiyun 		 */
268*4882a593Smuzhiyun 		map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
269*4882a593Smuzhiyun 				  ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
270*4882a593Smuzhiyun 		if (!map)
271*4882a593Smuzhiyun 			goto fail;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 	goto out;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun  fail:
276*4882a593Smuzhiyun 	map = NULL;
277*4882a593Smuzhiyun  out:
278*4882a593Smuzhiyun 	return map;
279*4882a593Smuzhiyun }
280