1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Coredump functionality for Remoteproc framework.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2020, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/completion.h>
9*4882a593Smuzhiyun #include <linux/devcoredump.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/remoteproc.h>
13*4882a593Smuzhiyun #include "remoteproc_internal.h"
14*4882a593Smuzhiyun #include "remoteproc_elf_helpers.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct rproc_coredump_state {
17*4882a593Smuzhiyun struct rproc *rproc;
18*4882a593Smuzhiyun void *header;
19*4882a593Smuzhiyun struct completion dump_done;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * rproc_coredump_cleanup() - clean up dump_segments list
24*4882a593Smuzhiyun * @rproc: the remote processor handle
25*4882a593Smuzhiyun */
rproc_coredump_cleanup(struct rproc * rproc)26*4882a593Smuzhiyun void rproc_coredump_cleanup(struct rproc *rproc)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct rproc_dump_segment *entry, *tmp;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
31*4882a593Smuzhiyun list_del(&entry->node);
32*4882a593Smuzhiyun kfree(entry);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump_cleanup);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /**
38*4882a593Smuzhiyun * rproc_coredump_add_segment() - add segment of device memory to coredump
39*4882a593Smuzhiyun * @rproc: handle of a remote processor
40*4882a593Smuzhiyun * @da: device address
41*4882a593Smuzhiyun * @size: size of segment
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Add device memory to the list of segments to be included in a coredump for
44*4882a593Smuzhiyun * the remoteproc.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Return: 0 on success, negative errno on error.
47*4882a593Smuzhiyun */
rproc_coredump_add_segment(struct rproc * rproc,dma_addr_t da,size_t size)48*4882a593Smuzhiyun int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct rproc_dump_segment *segment;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun segment = kzalloc(sizeof(*segment), GFP_KERNEL);
53*4882a593Smuzhiyun if (!segment)
54*4882a593Smuzhiyun return -ENOMEM;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun segment->da = da;
57*4882a593Smuzhiyun segment->size = size;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun list_add_tail(&segment->node, &rproc->dump_segments);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return 0;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump_add_segment);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * rproc_coredump_add_custom_segment() - add custom coredump segment
67*4882a593Smuzhiyun * @rproc: handle of a remote processor
68*4882a593Smuzhiyun * @da: device address
69*4882a593Smuzhiyun * @size: size of segment
70*4882a593Smuzhiyun * @dumpfn: custom dump function called for each segment during coredump
71*4882a593Smuzhiyun * @priv: private data
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Add device memory to the list of segments to be included in the coredump
74*4882a593Smuzhiyun * and associate the segment with the given custom dump function and private
75*4882a593Smuzhiyun * data.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Return: 0 on success, negative errno on error.
78*4882a593Smuzhiyun */
rproc_coredump_add_custom_segment(struct rproc * rproc,dma_addr_t da,size_t size,void (* dumpfn)(struct rproc * rproc,struct rproc_dump_segment * segment,void * dest,size_t offset,size_t size),void * priv)79*4882a593Smuzhiyun int rproc_coredump_add_custom_segment(struct rproc *rproc,
80*4882a593Smuzhiyun dma_addr_t da, size_t size,
81*4882a593Smuzhiyun void (*dumpfn)(struct rproc *rproc,
82*4882a593Smuzhiyun struct rproc_dump_segment *segment,
83*4882a593Smuzhiyun void *dest, size_t offset,
84*4882a593Smuzhiyun size_t size),
85*4882a593Smuzhiyun void *priv)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct rproc_dump_segment *segment;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun segment = kzalloc(sizeof(*segment), GFP_KERNEL);
90*4882a593Smuzhiyun if (!segment)
91*4882a593Smuzhiyun return -ENOMEM;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun segment->da = da;
94*4882a593Smuzhiyun segment->size = size;
95*4882a593Smuzhiyun segment->priv = priv;
96*4882a593Smuzhiyun segment->dump = dumpfn;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun list_add_tail(&segment->node, &rproc->dump_segments);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun * rproc_coredump_set_elf_info() - set coredump elf information
106*4882a593Smuzhiyun * @rproc: handle of a remote processor
107*4882a593Smuzhiyun * @class: elf class for coredump elf file
108*4882a593Smuzhiyun * @machine: elf machine for coredump elf file
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Set elf information which will be used for coredump elf file.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * Return: 0 on success, negative errno on error.
113*4882a593Smuzhiyun */
rproc_coredump_set_elf_info(struct rproc * rproc,u8 class,u16 machine)114*4882a593Smuzhiyun int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (class != ELFCLASS64 && class != ELFCLASS32)
117*4882a593Smuzhiyun return -EINVAL;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun rproc->elf_class = class;
120*4882a593Smuzhiyun rproc->elf_machine = machine;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump_set_elf_info);
125*4882a593Smuzhiyun
rproc_coredump_free(void * data)126*4882a593Smuzhiyun static void rproc_coredump_free(void *data)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct rproc_coredump_state *dump_state = data;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun vfree(dump_state->header);
131*4882a593Smuzhiyun complete(&dump_state->dump_done);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
rproc_coredump_find_segment(loff_t user_offset,struct list_head * segments,size_t * data_left)134*4882a593Smuzhiyun static void *rproc_coredump_find_segment(loff_t user_offset,
135*4882a593Smuzhiyun struct list_head *segments,
136*4882a593Smuzhiyun size_t *data_left)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct rproc_dump_segment *segment;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun list_for_each_entry(segment, segments, node) {
141*4882a593Smuzhiyun if (user_offset < segment->size) {
142*4882a593Smuzhiyun *data_left = segment->size - user_offset;
143*4882a593Smuzhiyun return segment;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun user_offset -= segment->size;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun *data_left = 0;
149*4882a593Smuzhiyun return NULL;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
rproc_copy_segment(struct rproc * rproc,void * dest,struct rproc_dump_segment * segment,size_t offset,size_t size)152*4882a593Smuzhiyun static void rproc_copy_segment(struct rproc *rproc, void *dest,
153*4882a593Smuzhiyun struct rproc_dump_segment *segment,
154*4882a593Smuzhiyun size_t offset, size_t size)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun bool is_iomem = false;
157*4882a593Smuzhiyun void *ptr;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (segment->dump) {
160*4882a593Smuzhiyun segment->dump(rproc, segment, dest, offset, size);
161*4882a593Smuzhiyun } else {
162*4882a593Smuzhiyun ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
163*4882a593Smuzhiyun if (!ptr) {
164*4882a593Smuzhiyun dev_err(&rproc->dev,
165*4882a593Smuzhiyun "invalid copy request for segment %pad with offset %zu and size %zu)\n",
166*4882a593Smuzhiyun &segment->da, offset, size);
167*4882a593Smuzhiyun memset(dest, 0xff, size);
168*4882a593Smuzhiyun } else {
169*4882a593Smuzhiyun if (is_iomem)
170*4882a593Smuzhiyun memcpy_fromio(dest, ptr, size);
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun memcpy(dest, ptr, size);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
rproc_coredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t header_sz)177*4882a593Smuzhiyun static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
178*4882a593Smuzhiyun void *data, size_t header_sz)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun size_t seg_data, bytes_left = count;
181*4882a593Smuzhiyun ssize_t copy_sz;
182*4882a593Smuzhiyun struct rproc_dump_segment *seg;
183*4882a593Smuzhiyun struct rproc_coredump_state *dump_state = data;
184*4882a593Smuzhiyun struct rproc *rproc = dump_state->rproc;
185*4882a593Smuzhiyun void *elfcore = dump_state->header;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Copy the vmalloc'ed header first. */
188*4882a593Smuzhiyun if (offset < header_sz) {
189*4882a593Smuzhiyun copy_sz = memory_read_from_buffer(buffer, count, &offset,
190*4882a593Smuzhiyun elfcore, header_sz);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return copy_sz;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * Find out the segment memory chunk to be copied based on offset.
197*4882a593Smuzhiyun * Keep copying data until count bytes are read.
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun while (bytes_left) {
200*4882a593Smuzhiyun seg = rproc_coredump_find_segment(offset - header_sz,
201*4882a593Smuzhiyun &rproc->dump_segments,
202*4882a593Smuzhiyun &seg_data);
203*4882a593Smuzhiyun /* EOF check */
204*4882a593Smuzhiyun if (!seg) {
205*4882a593Smuzhiyun dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
206*4882a593Smuzhiyun offset);
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun copy_sz = min_t(size_t, bytes_left, seg_data);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
213*4882a593Smuzhiyun copy_sz);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun offset += copy_sz;
216*4882a593Smuzhiyun buffer += copy_sz;
217*4882a593Smuzhiyun bytes_left -= copy_sz;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return count - bytes_left;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * rproc_coredump() - perform coredump
225*4882a593Smuzhiyun * @rproc: rproc handle
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * This function will generate an ELF header for the registered segments
228*4882a593Smuzhiyun * and create a devcoredump device associated with rproc. Based on the
229*4882a593Smuzhiyun * coredump configuration this function will directly copy the segments
230*4882a593Smuzhiyun * from device memory to userspace or copy segments from device memory to
231*4882a593Smuzhiyun * a separate buffer, which can then be read by userspace.
232*4882a593Smuzhiyun * The first approach avoids using extra vmalloc memory. But it will stall
233*4882a593Smuzhiyun * recovery flow until dump is read by userspace.
234*4882a593Smuzhiyun */
rproc_coredump(struct rproc * rproc)235*4882a593Smuzhiyun void rproc_coredump(struct rproc *rproc)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct rproc_dump_segment *segment;
238*4882a593Smuzhiyun void *phdr;
239*4882a593Smuzhiyun void *ehdr;
240*4882a593Smuzhiyun size_t data_size;
241*4882a593Smuzhiyun size_t offset;
242*4882a593Smuzhiyun void *data;
243*4882a593Smuzhiyun u8 class = rproc->elf_class;
244*4882a593Smuzhiyun int phnum = 0;
245*4882a593Smuzhiyun struct rproc_coredump_state dump_state;
246*4882a593Smuzhiyun enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (list_empty(&rproc->dump_segments) ||
249*4882a593Smuzhiyun dump_conf == RPROC_COREDUMP_DISABLED)
250*4882a593Smuzhiyun return;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (class == ELFCLASSNONE) {
253*4882a593Smuzhiyun dev_err(&rproc->dev, "Elf class is not set\n");
254*4882a593Smuzhiyun return;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun data_size = elf_size_of_hdr(class);
258*4882a593Smuzhiyun list_for_each_entry(segment, &rproc->dump_segments, node) {
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * For default configuration buffer includes headers & segments.
261*4882a593Smuzhiyun * For inline dump buffer just includes headers as segments are
262*4882a593Smuzhiyun * directly read from device memory.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun data_size += elf_size_of_phdr(class);
265*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED)
266*4882a593Smuzhiyun data_size += segment->size;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun phnum++;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun data = vmalloc(data_size);
272*4882a593Smuzhiyun if (!data)
273*4882a593Smuzhiyun return;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun ehdr = data;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun memset(ehdr, 0, elf_size_of_hdr(class));
278*4882a593Smuzhiyun /* e_ident field is common for both elf32 and elf64 */
279*4882a593Smuzhiyun elf_hdr_init_ident(ehdr, class);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun elf_hdr_set_e_type(class, ehdr, ET_CORE);
282*4882a593Smuzhiyun elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
283*4882a593Smuzhiyun elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
284*4882a593Smuzhiyun elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
285*4882a593Smuzhiyun elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
286*4882a593Smuzhiyun elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
287*4882a593Smuzhiyun elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
288*4882a593Smuzhiyun elf_hdr_set_e_phnum(class, ehdr, phnum);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun phdr = data + elf_hdr_get_e_phoff(class, ehdr);
291*4882a593Smuzhiyun offset = elf_hdr_get_e_phoff(class, ehdr);
292*4882a593Smuzhiyun offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun list_for_each_entry(segment, &rproc->dump_segments, node) {
295*4882a593Smuzhiyun memset(phdr, 0, elf_size_of_phdr(class));
296*4882a593Smuzhiyun elf_phdr_set_p_type(class, phdr, PT_LOAD);
297*4882a593Smuzhiyun elf_phdr_set_p_offset(class, phdr, offset);
298*4882a593Smuzhiyun elf_phdr_set_p_vaddr(class, phdr, segment->da);
299*4882a593Smuzhiyun elf_phdr_set_p_paddr(class, phdr, segment->da);
300*4882a593Smuzhiyun elf_phdr_set_p_filesz(class, phdr, segment->size);
301*4882a593Smuzhiyun elf_phdr_set_p_memsz(class, phdr, segment->size);
302*4882a593Smuzhiyun elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
303*4882a593Smuzhiyun elf_phdr_set_p_align(class, phdr, 0);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED)
306*4882a593Smuzhiyun rproc_copy_segment(rproc, data + offset, segment, 0,
307*4882a593Smuzhiyun segment->size);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun offset += elf_phdr_get_p_filesz(class, phdr);
310*4882a593Smuzhiyun phdr += elf_size_of_phdr(class);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED) {
313*4882a593Smuzhiyun dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Initialize the dump state struct to be used by rproc_coredump_read */
318*4882a593Smuzhiyun dump_state.rproc = rproc;
319*4882a593Smuzhiyun dump_state.header = data;
320*4882a593Smuzhiyun init_completion(&dump_state.dump_done);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
323*4882a593Smuzhiyun rproc_coredump_read, rproc_coredump_free);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Wait until the dump is read and free is called. Data is freed
327*4882a593Smuzhiyun * by devcoredump framework automatically after 5 minutes.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun wait_for_completion(&dump_state.dump_done);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * rproc_coredump_using_sections() - perform coredump using section headers
335*4882a593Smuzhiyun * @rproc: rproc handle
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * This function will generate an ELF header for the registered sections of
338*4882a593Smuzhiyun * segments and create a devcoredump device associated with rproc. Based on
339*4882a593Smuzhiyun * the coredump configuration this function will directly copy the segments
340*4882a593Smuzhiyun * from device memory to userspace or copy segments from device memory to
341*4882a593Smuzhiyun * a separate buffer, which can then be read by userspace.
342*4882a593Smuzhiyun * The first approach avoids using extra vmalloc memory. But it will stall
343*4882a593Smuzhiyun * recovery flow until dump is read by userspace.
344*4882a593Smuzhiyun */
rproc_coredump_using_sections(struct rproc * rproc)345*4882a593Smuzhiyun void rproc_coredump_using_sections(struct rproc *rproc)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun struct rproc_dump_segment *segment;
348*4882a593Smuzhiyun void *shdr;
349*4882a593Smuzhiyun void *ehdr;
350*4882a593Smuzhiyun size_t data_size;
351*4882a593Smuzhiyun size_t strtbl_size = 0;
352*4882a593Smuzhiyun size_t strtbl_index = 1;
353*4882a593Smuzhiyun size_t offset;
354*4882a593Smuzhiyun void *data;
355*4882a593Smuzhiyun u8 class = rproc->elf_class;
356*4882a593Smuzhiyun int shnum;
357*4882a593Smuzhiyun struct rproc_coredump_state dump_state;
358*4882a593Smuzhiyun unsigned int dump_conf = rproc->dump_conf;
359*4882a593Smuzhiyun char *str_tbl = "STR_TBL";
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (list_empty(&rproc->dump_segments) ||
362*4882a593Smuzhiyun dump_conf == RPROC_COREDUMP_DISABLED)
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (class == ELFCLASSNONE) {
366*4882a593Smuzhiyun dev_err(&rproc->dev, "Elf class is not set\n");
367*4882a593Smuzhiyun return;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * We allocate two extra section headers. The first one is null.
372*4882a593Smuzhiyun * Second section header is for the string table. Also space is
373*4882a593Smuzhiyun * allocated for string table.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
376*4882a593Smuzhiyun shnum = 2;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* the extra byte is for the null character at index 0 */
379*4882a593Smuzhiyun strtbl_size += strlen(str_tbl) + 2;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun list_for_each_entry(segment, &rproc->dump_segments, node) {
382*4882a593Smuzhiyun data_size += elf_size_of_shdr(class);
383*4882a593Smuzhiyun strtbl_size += strlen(segment->priv) + 1;
384*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED)
385*4882a593Smuzhiyun data_size += segment->size;
386*4882a593Smuzhiyun shnum++;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun data_size += strtbl_size;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun data = vmalloc(data_size);
392*4882a593Smuzhiyun if (!data)
393*4882a593Smuzhiyun return;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun ehdr = data;
396*4882a593Smuzhiyun memset(ehdr, 0, elf_size_of_hdr(class));
397*4882a593Smuzhiyun /* e_ident field is common for both elf32 and elf64 */
398*4882a593Smuzhiyun elf_hdr_init_ident(ehdr, class);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun elf_hdr_set_e_type(class, ehdr, ET_CORE);
401*4882a593Smuzhiyun elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
402*4882a593Smuzhiyun elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
403*4882a593Smuzhiyun elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
404*4882a593Smuzhiyun elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
405*4882a593Smuzhiyun elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
406*4882a593Smuzhiyun elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
407*4882a593Smuzhiyun elf_hdr_set_e_shnum(class, ehdr, shnum);
408*4882a593Smuzhiyun elf_hdr_set_e_shstrndx(class, ehdr, 1);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * The zeroth index of the section header is reserved and is rarely used.
412*4882a593Smuzhiyun * Set the section header as null (SHN_UNDEF) and move to the next one.
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun shdr = data + elf_hdr_get_e_shoff(class, ehdr);
415*4882a593Smuzhiyun memset(shdr, 0, elf_size_of_shdr(class));
416*4882a593Smuzhiyun shdr += elf_size_of_shdr(class);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Initialize the string table. */
419*4882a593Smuzhiyun offset = elf_hdr_get_e_shoff(class, ehdr) +
420*4882a593Smuzhiyun elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
421*4882a593Smuzhiyun memset(data + offset, 0, strtbl_size);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* Fill in the string table section header. */
424*4882a593Smuzhiyun memset(shdr, 0, elf_size_of_shdr(class));
425*4882a593Smuzhiyun elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
426*4882a593Smuzhiyun elf_shdr_set_sh_offset(class, shdr, offset);
427*4882a593Smuzhiyun elf_shdr_set_sh_size(class, shdr, strtbl_size);
428*4882a593Smuzhiyun elf_shdr_set_sh_entsize(class, shdr, 0);
429*4882a593Smuzhiyun elf_shdr_set_sh_flags(class, shdr, 0);
430*4882a593Smuzhiyun elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
431*4882a593Smuzhiyun offset += elf_shdr_get_sh_size(class, shdr);
432*4882a593Smuzhiyun shdr += elf_size_of_shdr(class);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun list_for_each_entry(segment, &rproc->dump_segments, node) {
435*4882a593Smuzhiyun memset(shdr, 0, elf_size_of_shdr(class));
436*4882a593Smuzhiyun elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
437*4882a593Smuzhiyun elf_shdr_set_sh_offset(class, shdr, offset);
438*4882a593Smuzhiyun elf_shdr_set_sh_addr(class, shdr, segment->da);
439*4882a593Smuzhiyun elf_shdr_set_sh_size(class, shdr, segment->size);
440*4882a593Smuzhiyun elf_shdr_set_sh_entsize(class, shdr, 0);
441*4882a593Smuzhiyun elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
442*4882a593Smuzhiyun elf_shdr_set_sh_name(class, shdr,
443*4882a593Smuzhiyun elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* No need to copy segments for inline dumps */
446*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED)
447*4882a593Smuzhiyun rproc_copy_segment(rproc, data + offset, segment, 0,
448*4882a593Smuzhiyun segment->size);
449*4882a593Smuzhiyun offset += elf_shdr_get_sh_size(class, shdr);
450*4882a593Smuzhiyun shdr += elf_size_of_shdr(class);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (dump_conf == RPROC_COREDUMP_ENABLED) {
454*4882a593Smuzhiyun dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
455*4882a593Smuzhiyun return;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Initialize the dump state struct to be used by rproc_coredump_read */
459*4882a593Smuzhiyun dump_state.rproc = rproc;
460*4882a593Smuzhiyun dump_state.header = data;
461*4882a593Smuzhiyun init_completion(&dump_state.dump_done);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
464*4882a593Smuzhiyun rproc_coredump_read, rproc_coredump_free);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Wait until the dump is read and free is called. Data is freed
467*4882a593Smuzhiyun * by devcoredump framework automatically after 5 minutes.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun wait_for_completion(&dump_state.dump_done);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_coredump_using_sections);
472