xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/etnaviv/etnaviv_dump.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015-2018 Etnaviv Project
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/devcoredump.h>
7*4882a593Smuzhiyun #include <linux/moduleparam.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "etnaviv_cmdbuf.h"
10*4882a593Smuzhiyun #include "etnaviv_dump.h"
11*4882a593Smuzhiyun #include "etnaviv_gem.h"
12*4882a593Smuzhiyun #include "etnaviv_gpu.h"
13*4882a593Smuzhiyun #include "etnaviv_mmu.h"
14*4882a593Smuzhiyun #include "etnaviv_sched.h"
15*4882a593Smuzhiyun #include "state.xml.h"
16*4882a593Smuzhiyun #include "state_hi.xml.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static bool etnaviv_dump_core = true;
19*4882a593Smuzhiyun module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct core_dump_iterator {
22*4882a593Smuzhiyun 	void *start;
23*4882a593Smuzhiyun 	struct etnaviv_dump_object_header *hdr;
24*4882a593Smuzhiyun 	void *data;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static const unsigned short etnaviv_dump_registers[] = {
28*4882a593Smuzhiyun 	VIVS_HI_AXI_STATUS,
29*4882a593Smuzhiyun 	VIVS_HI_CLOCK_CONTROL,
30*4882a593Smuzhiyun 	VIVS_HI_IDLE_STATE,
31*4882a593Smuzhiyun 	VIVS_HI_AXI_CONFIG,
32*4882a593Smuzhiyun 	VIVS_HI_INTR_ENBL,
33*4882a593Smuzhiyun 	VIVS_HI_CHIP_IDENTITY,
34*4882a593Smuzhiyun 	VIVS_HI_CHIP_FEATURE,
35*4882a593Smuzhiyun 	VIVS_HI_CHIP_MODEL,
36*4882a593Smuzhiyun 	VIVS_HI_CHIP_REV,
37*4882a593Smuzhiyun 	VIVS_HI_CHIP_DATE,
38*4882a593Smuzhiyun 	VIVS_HI_CHIP_TIME,
39*4882a593Smuzhiyun 	VIVS_HI_CHIP_MINOR_FEATURE_0,
40*4882a593Smuzhiyun 	VIVS_HI_CACHE_CONTROL,
41*4882a593Smuzhiyun 	VIVS_HI_AXI_CONTROL,
42*4882a593Smuzhiyun 	VIVS_PM_POWER_CONTROLS,
43*4882a593Smuzhiyun 	VIVS_PM_MODULE_CONTROLS,
44*4882a593Smuzhiyun 	VIVS_PM_MODULE_STATUS,
45*4882a593Smuzhiyun 	VIVS_PM_PULSE_EATER,
46*4882a593Smuzhiyun 	VIVS_MC_MMU_FE_PAGE_TABLE,
47*4882a593Smuzhiyun 	VIVS_MC_MMU_TX_PAGE_TABLE,
48*4882a593Smuzhiyun 	VIVS_MC_MMU_PE_PAGE_TABLE,
49*4882a593Smuzhiyun 	VIVS_MC_MMU_PEZ_PAGE_TABLE,
50*4882a593Smuzhiyun 	VIVS_MC_MMU_RA_PAGE_TABLE,
51*4882a593Smuzhiyun 	VIVS_MC_DEBUG_MEMORY,
52*4882a593Smuzhiyun 	VIVS_MC_MEMORY_BASE_ADDR_RA,
53*4882a593Smuzhiyun 	VIVS_MC_MEMORY_BASE_ADDR_FE,
54*4882a593Smuzhiyun 	VIVS_MC_MEMORY_BASE_ADDR_TX,
55*4882a593Smuzhiyun 	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
56*4882a593Smuzhiyun 	VIVS_MC_MEMORY_BASE_ADDR_PE,
57*4882a593Smuzhiyun 	VIVS_MC_MEMORY_TIMING_CONTROL,
58*4882a593Smuzhiyun 	VIVS_MC_BUS_CONFIG,
59*4882a593Smuzhiyun 	VIVS_FE_DMA_STATUS,
60*4882a593Smuzhiyun 	VIVS_FE_DMA_DEBUG_STATE,
61*4882a593Smuzhiyun 	VIVS_FE_DMA_ADDRESS,
62*4882a593Smuzhiyun 	VIVS_FE_DMA_LOW,
63*4882a593Smuzhiyun 	VIVS_FE_DMA_HIGH,
64*4882a593Smuzhiyun 	VIVS_FE_AUTO_FLUSH,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
etnaviv_core_dump_header(struct core_dump_iterator * iter,u32 type,void * data_end)67*4882a593Smuzhiyun static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
68*4882a593Smuzhiyun 	u32 type, void *data_end)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct etnaviv_dump_object_header *hdr = iter->hdr;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
73*4882a593Smuzhiyun 	hdr->type = cpu_to_le32(type);
74*4882a593Smuzhiyun 	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
75*4882a593Smuzhiyun 	hdr->file_size = cpu_to_le32(data_end - iter->data);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	iter->hdr++;
78*4882a593Smuzhiyun 	iter->data += hdr->file_size;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
etnaviv_core_dump_registers(struct core_dump_iterator * iter,struct etnaviv_gpu * gpu)81*4882a593Smuzhiyun static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
82*4882a593Smuzhiyun 	struct etnaviv_gpu *gpu)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct etnaviv_dump_registers *reg = iter->data;
85*4882a593Smuzhiyun 	unsigned int i;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
88*4882a593Smuzhiyun 		reg->reg = etnaviv_dump_registers[i];
89*4882a593Smuzhiyun 		reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
etnaviv_core_dump_mmu(struct core_dump_iterator * iter,struct etnaviv_iommu_context * mmu,size_t mmu_size)95*4882a593Smuzhiyun static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
96*4882a593Smuzhiyun 	struct etnaviv_iommu_context *mmu, size_t mmu_size)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	etnaviv_iommu_dump(mmu, iter->data);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
etnaviv_core_dump_mem(struct core_dump_iterator * iter,u32 type,void * ptr,size_t size,u64 iova)103*4882a593Smuzhiyun static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
104*4882a593Smuzhiyun 	void *ptr, size_t size, u64 iova)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	memcpy(iter->data, ptr, size);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	iter->hdr->iova = cpu_to_le64(iova);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	etnaviv_core_dump_header(iter, type, iter->data + size);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
etnaviv_core_dump(struct etnaviv_gem_submit * submit)113*4882a593Smuzhiyun void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct etnaviv_gpu *gpu = submit->gpu;
116*4882a593Smuzhiyun 	struct core_dump_iterator iter;
117*4882a593Smuzhiyun 	struct etnaviv_gem_object *obj;
118*4882a593Smuzhiyun 	unsigned int n_obj, n_bomap_pages;
119*4882a593Smuzhiyun 	size_t file_size, mmu_size;
120*4882a593Smuzhiyun 	__le64 *bomap, *bomap_start;
121*4882a593Smuzhiyun 	int i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Only catch the first event, or when manually re-armed */
124*4882a593Smuzhiyun 	if (!etnaviv_dump_core)
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 	etnaviv_dump_core = false;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	mutex_lock(&gpu->mmu_context->lock);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
133*4882a593Smuzhiyun 	n_obj = 5;
134*4882a593Smuzhiyun 	n_bomap_pages = 0;
135*4882a593Smuzhiyun 	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
136*4882a593Smuzhiyun 			sizeof(struct etnaviv_dump_registers) +
137*4882a593Smuzhiyun 		    mmu_size + gpu->buffer.size + submit->cmdbuf.size;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* Add in the active buffer objects */
140*4882a593Smuzhiyun 	for (i = 0; i < submit->nr_bos; i++) {
141*4882a593Smuzhiyun 		obj = submit->bos[i].obj;
142*4882a593Smuzhiyun 		file_size += obj->base.size;
143*4882a593Smuzhiyun 		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
144*4882a593Smuzhiyun 		n_obj++;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* If we have any buffer objects, add a bomap object */
148*4882a593Smuzhiyun 	if (n_bomap_pages) {
149*4882a593Smuzhiyun 		file_size += n_bomap_pages * sizeof(__le64);
150*4882a593Smuzhiyun 		n_obj++;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Add the size of the headers */
154*4882a593Smuzhiyun 	file_size += sizeof(*iter.hdr) * n_obj;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Allocate the file in vmalloc memory, it's likely to be big */
157*4882a593Smuzhiyun 	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
158*4882a593Smuzhiyun 			__GFP_NORETRY);
159*4882a593Smuzhiyun 	if (!iter.start) {
160*4882a593Smuzhiyun 		mutex_unlock(&gpu->mmu_context->lock);
161*4882a593Smuzhiyun 		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* Point the data member after the headers */
166*4882a593Smuzhiyun 	iter.hdr = iter.start;
167*4882a593Smuzhiyun 	iter.data = &iter.hdr[n_obj];
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	memset(iter.hdr, 0, iter.data - iter.start);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	etnaviv_core_dump_registers(&iter, gpu);
172*4882a593Smuzhiyun 	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
173*4882a593Smuzhiyun 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
174*4882a593Smuzhiyun 			      gpu->buffer.size,
175*4882a593Smuzhiyun 			      etnaviv_cmdbuf_get_va(&gpu->buffer,
176*4882a593Smuzhiyun 					&gpu->mmu_context->cmdbuf_mapping));
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
179*4882a593Smuzhiyun 			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
180*4882a593Smuzhiyun 			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
181*4882a593Smuzhiyun 					&gpu->mmu_context->cmdbuf_mapping));
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	mutex_unlock(&gpu->mmu_context->lock);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Reserve space for the bomap */
186*4882a593Smuzhiyun 	if (n_bomap_pages) {
187*4882a593Smuzhiyun 		bomap_start = bomap = iter.data;
188*4882a593Smuzhiyun 		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
189*4882a593Smuzhiyun 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
190*4882a593Smuzhiyun 					 bomap + n_bomap_pages);
191*4882a593Smuzhiyun 	} else {
192*4882a593Smuzhiyun 		/* Silence warning */
193*4882a593Smuzhiyun 		bomap_start = bomap = NULL;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	for (i = 0; i < submit->nr_bos; i++) {
197*4882a593Smuzhiyun 		struct etnaviv_vram_mapping *vram;
198*4882a593Smuzhiyun 		struct page **pages;
199*4882a593Smuzhiyun 		void *vaddr;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		obj = submit->bos[i].obj;
202*4882a593Smuzhiyun 		vram = submit->bos[i].mapping;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		mutex_lock(&obj->lock);
205*4882a593Smuzhiyun 		pages = etnaviv_gem_get_pages(obj);
206*4882a593Smuzhiyun 		mutex_unlock(&obj->lock);
207*4882a593Smuzhiyun 		if (!IS_ERR(pages)) {
208*4882a593Smuzhiyun 			int j;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 			iter.hdr->data[0] = bomap - bomap_start;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
213*4882a593Smuzhiyun 				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		iter.hdr->iova = cpu_to_le64(vram->iova);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		vaddr = etnaviv_gem_vmap(&obj->base);
219*4882a593Smuzhiyun 		if (vaddr)
220*4882a593Smuzhiyun 			memcpy(iter.data, vaddr, obj->base.size);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
223*4882a593Smuzhiyun 					 obj->base.size);
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
229*4882a593Smuzhiyun }
230