xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/powernv/opal-imc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * OPAL IMC interface detection driver
4*4882a593Smuzhiyun  * Supported on POWERNV platform
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright	(C) 2017 Madhavan Srinivasan, IBM Corporation.
7*4882a593Smuzhiyun  *		(C) 2017 Anju T Sudhakar, IBM Corporation.
8*4882a593Smuzhiyun  *		(C) 2017 Hemant K Shaw, IBM Corporation.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/crash_dump.h>
16*4882a593Smuzhiyun #include <asm/opal.h>
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <asm/imc-pmu.h>
19*4882a593Smuzhiyun #include <asm/cputhreads.h>
20*4882a593Smuzhiyun #include <asm/debugfs.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static struct dentry *imc_debugfs_parent;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Helpers to export imc command and mode via debugfs */
imc_mem_get(void * data,u64 * val)25*4882a593Smuzhiyun static int imc_mem_get(void *data, u64 *val)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	*val = cpu_to_be64(*(u64 *)data);
28*4882a593Smuzhiyun 	return 0;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
imc_mem_set(void * data,u64 val)31*4882a593Smuzhiyun static int imc_mem_set(void *data, u64 val)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	*(u64 *)data = cpu_to_be64(val);
34*4882a593Smuzhiyun 	return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
37*4882a593Smuzhiyun 
imc_debugfs_create_x64(const char * name,umode_t mode,struct dentry * parent,u64 * value)38*4882a593Smuzhiyun static void imc_debugfs_create_x64(const char *name, umode_t mode,
39*4882a593Smuzhiyun 				   struct dentry *parent, u64  *value)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * export_imc_mode_and_cmd: Create a debugfs interface
46*4882a593Smuzhiyun  *                     for imc_cmd and imc_mode
47*4882a593Smuzhiyun  *                     for each node in the system.
48*4882a593Smuzhiyun  *  imc_mode and imc_cmd can be changed by echo into
49*4882a593Smuzhiyun  *  this interface.
50*4882a593Smuzhiyun  */
export_imc_mode_and_cmd(struct device_node * node,struct imc_pmu * pmu_ptr)51*4882a593Smuzhiyun static void export_imc_mode_and_cmd(struct device_node *node,
52*4882a593Smuzhiyun 				    struct imc_pmu *pmu_ptr)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	static u64 loc, *imc_mode_addr, *imc_cmd_addr;
55*4882a593Smuzhiyun 	char mode[16], cmd[16];
56*4882a593Smuzhiyun 	u32 cb_offset;
57*4882a593Smuzhiyun 	struct imc_mem_info *ptr = pmu_ptr->mem_info;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (of_property_read_u32(node, "cb_offset", &cb_offset))
62*4882a593Smuzhiyun 		cb_offset = IMC_CNTL_BLK_OFFSET;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	while (ptr->vbase != NULL) {
65*4882a593Smuzhiyun 		loc = (u64)(ptr->vbase) + cb_offset;
66*4882a593Smuzhiyun 		imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
67*4882a593Smuzhiyun 		sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
68*4882a593Smuzhiyun 		imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
69*4882a593Smuzhiyun 				       imc_mode_addr);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 		imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
72*4882a593Smuzhiyun 		sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
73*4882a593Smuzhiyun 		imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
74*4882a593Smuzhiyun 				       imc_cmd_addr);
75*4882a593Smuzhiyun 		ptr++;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * imc_get_mem_addr_nest: Function to get nest counter memory region
81*4882a593Smuzhiyun  * for each chip
82*4882a593Smuzhiyun  */
imc_get_mem_addr_nest(struct device_node * node,struct imc_pmu * pmu_ptr,u32 offset)83*4882a593Smuzhiyun static int imc_get_mem_addr_nest(struct device_node *node,
84*4882a593Smuzhiyun 				 struct imc_pmu *pmu_ptr,
85*4882a593Smuzhiyun 				 u32 offset)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	int nr_chips = 0, i;
88*4882a593Smuzhiyun 	u64 *base_addr_arr, baddr;
89*4882a593Smuzhiyun 	u32 *chipid_arr;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	nr_chips = of_property_count_u32_elems(node, "chip-id");
92*4882a593Smuzhiyun 	if (nr_chips <= 0)
93*4882a593Smuzhiyun 		return -ENODEV;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
96*4882a593Smuzhiyun 	if (!base_addr_arr)
97*4882a593Smuzhiyun 		return -ENOMEM;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
100*4882a593Smuzhiyun 	if (!chipid_arr) {
101*4882a593Smuzhiyun 		kfree(base_addr_arr);
102*4882a593Smuzhiyun 		return -ENOMEM;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
106*4882a593Smuzhiyun 		goto error;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
109*4882a593Smuzhiyun 								nr_chips))
110*4882a593Smuzhiyun 		goto error;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
113*4882a593Smuzhiyun 				    GFP_KERNEL);
114*4882a593Smuzhiyun 	if (!pmu_ptr->mem_info)
115*4882a593Smuzhiyun 		goto error;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	for (i = 0; i < nr_chips; i++) {
118*4882a593Smuzhiyun 		pmu_ptr->mem_info[i].id = chipid_arr[i];
119*4882a593Smuzhiyun 		baddr = base_addr_arr[i] + offset;
120*4882a593Smuzhiyun 		pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	pmu_ptr->imc_counter_mmaped = true;
124*4882a593Smuzhiyun 	kfree(base_addr_arr);
125*4882a593Smuzhiyun 	kfree(chipid_arr);
126*4882a593Smuzhiyun 	return 0;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun error:
129*4882a593Smuzhiyun 	kfree(base_addr_arr);
130*4882a593Smuzhiyun 	kfree(chipid_arr);
131*4882a593Smuzhiyun 	return -1;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun  * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
136*4882a593Smuzhiyun  *		    and domain as the inputs.
137*4882a593Smuzhiyun  * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
138*4882a593Smuzhiyun  */
imc_pmu_create(struct device_node * parent,int pmu_index,int domain)139*4882a593Smuzhiyun static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	int ret = 0;
142*4882a593Smuzhiyun 	struct imc_pmu *pmu_ptr;
143*4882a593Smuzhiyun 	u32 offset;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Return for unknown domain */
146*4882a593Smuzhiyun 	if (domain < 0)
147*4882a593Smuzhiyun 		return NULL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* memory for pmu */
150*4882a593Smuzhiyun 	pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
151*4882a593Smuzhiyun 	if (!pmu_ptr)
152*4882a593Smuzhiyun 		return NULL;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Set the domain */
155*4882a593Smuzhiyun 	pmu_ptr->domain = domain;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
158*4882a593Smuzhiyun 	if (ret)
159*4882a593Smuzhiyun 		goto free_pmu;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (!of_property_read_u32(parent, "offset", &offset)) {
162*4882a593Smuzhiyun 		if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
163*4882a593Smuzhiyun 			goto free_pmu;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Function to register IMC pmu */
167*4882a593Smuzhiyun 	ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
168*4882a593Smuzhiyun 	if (ret) {
169*4882a593Smuzhiyun 		pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
170*4882a593Smuzhiyun 		kfree(pmu_ptr->pmu.name);
171*4882a593Smuzhiyun 		if (pmu_ptr->domain == IMC_DOMAIN_NEST)
172*4882a593Smuzhiyun 			kfree(pmu_ptr->mem_info);
173*4882a593Smuzhiyun 		kfree(pmu_ptr);
174*4882a593Smuzhiyun 		return NULL;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return pmu_ptr;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun free_pmu:
180*4882a593Smuzhiyun 	kfree(pmu_ptr);
181*4882a593Smuzhiyun 	return NULL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
disable_nest_pmu_counters(void)184*4882a593Smuzhiyun static void disable_nest_pmu_counters(void)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	int nid, cpu;
187*4882a593Smuzhiyun 	const struct cpumask *l_cpumask;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	get_online_cpus();
190*4882a593Smuzhiyun 	for_each_node_with_cpus(nid) {
191*4882a593Smuzhiyun 		l_cpumask = cpumask_of_node(nid);
192*4882a593Smuzhiyun 		cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
193*4882a593Smuzhiyun 		if (cpu >= nr_cpu_ids)
194*4882a593Smuzhiyun 			continue;
195*4882a593Smuzhiyun 		opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
196*4882a593Smuzhiyun 				       get_hard_smp_processor_id(cpu));
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	put_online_cpus();
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
disable_core_pmu_counters(void)201*4882a593Smuzhiyun static void disable_core_pmu_counters(void)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	cpumask_t cores_map;
204*4882a593Smuzhiyun 	int cpu, rc;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	get_online_cpus();
207*4882a593Smuzhiyun 	/* Disable the IMC Core functions */
208*4882a593Smuzhiyun 	cores_map = cpu_online_cores_map();
209*4882a593Smuzhiyun 	for_each_cpu(cpu, &cores_map) {
210*4882a593Smuzhiyun 		rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
211*4882a593Smuzhiyun 					    get_hard_smp_processor_id(cpu));
212*4882a593Smuzhiyun 		if (rc)
213*4882a593Smuzhiyun 			pr_err("%s: Failed to stop Core (cpu = %d)\n",
214*4882a593Smuzhiyun 				__FUNCTION__, cpu);
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	put_online_cpus();
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
get_max_nest_dev(void)219*4882a593Smuzhiyun int get_max_nest_dev(void)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct device_node *node;
222*4882a593Smuzhiyun 	u32 pmu_units = 0, type;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
225*4882a593Smuzhiyun 		if (of_property_read_u32(node, "type", &type))
226*4882a593Smuzhiyun 			continue;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		if (type == IMC_TYPE_CHIP)
229*4882a593Smuzhiyun 			pmu_units++;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return pmu_units;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
opal_imc_counters_probe(struct platform_device * pdev)235*4882a593Smuzhiyun static int opal_imc_counters_probe(struct platform_device *pdev)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct device_node *imc_dev = pdev->dev.of_node;
238*4882a593Smuzhiyun 	struct imc_pmu *pmu;
239*4882a593Smuzhiyun 	int pmu_count = 0, domain;
240*4882a593Smuzhiyun 	bool core_imc_reg = false, thread_imc_reg = false;
241*4882a593Smuzhiyun 	u32 type;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/*
244*4882a593Smuzhiyun 	 * Check whether this is kdump kernel. If yes, force the engines to
245*4882a593Smuzhiyun 	 * stop and return.
246*4882a593Smuzhiyun 	 */
247*4882a593Smuzhiyun 	if (is_kdump_kernel()) {
248*4882a593Smuzhiyun 		disable_nest_pmu_counters();
249*4882a593Smuzhiyun 		disable_core_pmu_counters();
250*4882a593Smuzhiyun 		return -ENODEV;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
254*4882a593Smuzhiyun 		pmu = NULL;
255*4882a593Smuzhiyun 		if (of_property_read_u32(imc_dev, "type", &type)) {
256*4882a593Smuzhiyun 			pr_warn("IMC Device without type property\n");
257*4882a593Smuzhiyun 			continue;
258*4882a593Smuzhiyun 		}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		switch (type) {
261*4882a593Smuzhiyun 		case IMC_TYPE_CHIP:
262*4882a593Smuzhiyun 			domain = IMC_DOMAIN_NEST;
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		case IMC_TYPE_CORE:
265*4882a593Smuzhiyun 			domain =IMC_DOMAIN_CORE;
266*4882a593Smuzhiyun 			break;
267*4882a593Smuzhiyun 		case IMC_TYPE_THREAD:
268*4882a593Smuzhiyun 			domain = IMC_DOMAIN_THREAD;
269*4882a593Smuzhiyun 			break;
270*4882a593Smuzhiyun 		case IMC_TYPE_TRACE:
271*4882a593Smuzhiyun 			domain = IMC_DOMAIN_TRACE;
272*4882a593Smuzhiyun 			break;
273*4882a593Smuzhiyun 		default:
274*4882a593Smuzhiyun 			pr_warn("IMC Unknown Device type \n");
275*4882a593Smuzhiyun 			domain = -1;
276*4882a593Smuzhiyun 			break;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		pmu = imc_pmu_create(imc_dev, pmu_count, domain);
280*4882a593Smuzhiyun 		if (pmu != NULL) {
281*4882a593Smuzhiyun 			if (domain == IMC_DOMAIN_NEST) {
282*4882a593Smuzhiyun 				if (!imc_debugfs_parent)
283*4882a593Smuzhiyun 					export_imc_mode_and_cmd(imc_dev, pmu);
284*4882a593Smuzhiyun 				pmu_count++;
285*4882a593Smuzhiyun 			}
286*4882a593Smuzhiyun 			if (domain == IMC_DOMAIN_CORE)
287*4882a593Smuzhiyun 				core_imc_reg = true;
288*4882a593Smuzhiyun 			if (domain == IMC_DOMAIN_THREAD)
289*4882a593Smuzhiyun 				thread_imc_reg = true;
290*4882a593Smuzhiyun 		}
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* If core imc is not registered, unregister thread-imc */
294*4882a593Smuzhiyun 	if (!core_imc_reg && thread_imc_reg)
295*4882a593Smuzhiyun 		unregister_thread_imc();
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
opal_imc_counters_shutdown(struct platform_device * pdev)300*4882a593Smuzhiyun static void opal_imc_counters_shutdown(struct platform_device *pdev)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * Function only stops the engines which is bare minimum.
304*4882a593Smuzhiyun 	 * TODO: Need to handle proper memory cleanup and pmu
305*4882a593Smuzhiyun 	 * unregister.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	disable_nest_pmu_counters();
308*4882a593Smuzhiyun 	disable_core_pmu_counters();
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun static const struct of_device_id opal_imc_match[] = {
312*4882a593Smuzhiyun 	{ .compatible = IMC_DTB_COMPAT },
313*4882a593Smuzhiyun 	{},
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun static struct platform_driver opal_imc_driver = {
317*4882a593Smuzhiyun 	.driver = {
318*4882a593Smuzhiyun 		.name = "opal-imc-counters",
319*4882a593Smuzhiyun 		.of_match_table = opal_imc_match,
320*4882a593Smuzhiyun 	},
321*4882a593Smuzhiyun 	.probe = opal_imc_counters_probe,
322*4882a593Smuzhiyun 	.shutdown = opal_imc_counters_shutdown,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun builtin_platform_driver(opal_imc_driver);
326