xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/cell/cbe_regs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * cbe_regs.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Accessor routines for the various MMIO register blocks of the CBE
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/percpu.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/of_device.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/pgtable.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <asm/prom.h>
19*4882a593Smuzhiyun #include <asm/ptrace.h>
20*4882a593Smuzhiyun #include <asm/cell-regs.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Current implementation uses "cpu" nodes. We build our own mapping
24*4882a593Smuzhiyun  * array of cpu numbers to cpu nodes locally for now to allow interrupt
25*4882a593Smuzhiyun  * time code to have a fast path rather than call of_get_cpu_node(). If
26*4882a593Smuzhiyun  * we implement cpu hotplug, we'll have to install an appropriate norifier
27*4882a593Smuzhiyun  * in order to release references to the cpu going away
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun static struct cbe_regs_map
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct device_node *cpu_node;
32*4882a593Smuzhiyun 	struct device_node *be_node;
33*4882a593Smuzhiyun 	struct cbe_pmd_regs __iomem *pmd_regs;
34*4882a593Smuzhiyun 	struct cbe_iic_regs __iomem *iic_regs;
35*4882a593Smuzhiyun 	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
36*4882a593Smuzhiyun 	struct cbe_pmd_shadow_regs pmd_shadow_regs;
37*4882a593Smuzhiyun } cbe_regs_maps[MAX_CBE];
38*4882a593Smuzhiyun static int cbe_regs_map_count;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static struct cbe_thread_map
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct device_node *cpu_node;
43*4882a593Smuzhiyun 	struct device_node *be_node;
44*4882a593Smuzhiyun 	struct cbe_regs_map *regs;
45*4882a593Smuzhiyun 	unsigned int thread_id;
46*4882a593Smuzhiyun 	unsigned int cbe_id;
47*4882a593Smuzhiyun } cbe_thread_map[NR_CPUS];
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
50*4882a593Smuzhiyun static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
51*4882a593Smuzhiyun 
cbe_find_map(struct device_node * np)52*4882a593Smuzhiyun static struct cbe_regs_map *cbe_find_map(struct device_node *np)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	int i;
55*4882a593Smuzhiyun 	struct device_node *tmp_np;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (!of_node_is_type(np, "spe")) {
58*4882a593Smuzhiyun 		for (i = 0; i < cbe_regs_map_count; i++)
59*4882a593Smuzhiyun 			if (cbe_regs_maps[i].cpu_node == np ||
60*4882a593Smuzhiyun 			    cbe_regs_maps[i].be_node == np)
61*4882a593Smuzhiyun 				return &cbe_regs_maps[i];
62*4882a593Smuzhiyun 		return NULL;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (np->data)
66*4882a593Smuzhiyun 		return np->data;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* walk up path until cpu or be node was found */
69*4882a593Smuzhiyun 	tmp_np = np;
70*4882a593Smuzhiyun 	do {
71*4882a593Smuzhiyun 		tmp_np = tmp_np->parent;
72*4882a593Smuzhiyun 		/* on a correct devicetree we wont get up to root */
73*4882a593Smuzhiyun 		BUG_ON(!tmp_np);
74*4882a593Smuzhiyun 	} while (!of_node_is_type(tmp_np, "cpu") ||
75*4882a593Smuzhiyun 		 !of_node_is_type(tmp_np, "be"));
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	np->data = cbe_find_map(tmp_np);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return np->data;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
cbe_get_pmd_regs(struct device_node * np)82*4882a593Smuzhiyun struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_find_map(np);
85*4882a593Smuzhiyun 	if (map == NULL)
86*4882a593Smuzhiyun 		return NULL;
87*4882a593Smuzhiyun 	return map->pmd_regs;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
90*4882a593Smuzhiyun 
cbe_get_cpu_pmd_regs(int cpu)91*4882a593Smuzhiyun struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
94*4882a593Smuzhiyun 	if (map == NULL)
95*4882a593Smuzhiyun 		return NULL;
96*4882a593Smuzhiyun 	return map->pmd_regs;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
99*4882a593Smuzhiyun 
cbe_get_pmd_shadow_regs(struct device_node * np)100*4882a593Smuzhiyun struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_find_map(np);
103*4882a593Smuzhiyun 	if (map == NULL)
104*4882a593Smuzhiyun 		return NULL;
105*4882a593Smuzhiyun 	return &map->pmd_shadow_regs;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
cbe_get_cpu_pmd_shadow_regs(int cpu)108*4882a593Smuzhiyun struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
111*4882a593Smuzhiyun 	if (map == NULL)
112*4882a593Smuzhiyun 		return NULL;
113*4882a593Smuzhiyun 	return &map->pmd_shadow_regs;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
cbe_get_iic_regs(struct device_node * np)116*4882a593Smuzhiyun struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_find_map(np);
119*4882a593Smuzhiyun 	if (map == NULL)
120*4882a593Smuzhiyun 		return NULL;
121*4882a593Smuzhiyun 	return map->iic_regs;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
cbe_get_cpu_iic_regs(int cpu)124*4882a593Smuzhiyun struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
127*4882a593Smuzhiyun 	if (map == NULL)
128*4882a593Smuzhiyun 		return NULL;
129*4882a593Smuzhiyun 	return map->iic_regs;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
cbe_get_mic_tm_regs(struct device_node * np)132*4882a593Smuzhiyun struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_find_map(np);
135*4882a593Smuzhiyun 	if (map == NULL)
136*4882a593Smuzhiyun 		return NULL;
137*4882a593Smuzhiyun 	return map->mic_tm_regs;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
cbe_get_cpu_mic_tm_regs(int cpu)140*4882a593Smuzhiyun struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
143*4882a593Smuzhiyun 	if (map == NULL)
144*4882a593Smuzhiyun 		return NULL;
145*4882a593Smuzhiyun 	return map->mic_tm_regs;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
148*4882a593Smuzhiyun 
cbe_get_hw_thread_id(int cpu)149*4882a593Smuzhiyun u32 cbe_get_hw_thread_id(int cpu)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	return cbe_thread_map[cpu].thread_id;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
154*4882a593Smuzhiyun 
cbe_cpu_to_node(int cpu)155*4882a593Smuzhiyun u32 cbe_cpu_to_node(int cpu)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	return cbe_thread_map[cpu].cbe_id;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
160*4882a593Smuzhiyun 
cbe_node_to_cpu(int node)161*4882a593Smuzhiyun u32 cbe_node_to_cpu(int node)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	return cpumask_first(&cbe_local_mask[node]);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
167*4882a593Smuzhiyun 
cbe_get_be_node(int cpu_id)168*4882a593Smuzhiyun static struct device_node *cbe_get_be_node(int cpu_id)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct device_node *np;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	for_each_node_by_type (np, "be") {
173*4882a593Smuzhiyun 		int len,i;
174*4882a593Smuzhiyun 		const phandle *cpu_handle;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		cpu_handle = of_get_property(np, "cpus", &len);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		/*
179*4882a593Smuzhiyun 		 * the CAB SLOF tree is non compliant, so we just assume
180*4882a593Smuzhiyun 		 * there is only one node
181*4882a593Smuzhiyun 		 */
182*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!cpu_handle))
183*4882a593Smuzhiyun 			return np;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		for (i=0; i<len; i++)
186*4882a593Smuzhiyun 			if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
187*4882a593Smuzhiyun 				return np;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return NULL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
cbe_fill_regs_map(struct cbe_regs_map * map)193*4882a593Smuzhiyun static void __init cbe_fill_regs_map(struct cbe_regs_map *map)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	if(map->be_node) {
196*4882a593Smuzhiyun 		struct device_node *be, *np;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		be = map->be_node;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		for_each_node_by_type(np, "pervasive")
201*4882a593Smuzhiyun 			if (of_get_parent(np) == be)
202*4882a593Smuzhiyun 				map->pmd_regs = of_iomap(np, 0);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
205*4882a593Smuzhiyun 			if (of_get_parent(np) == be)
206*4882a593Smuzhiyun 				map->iic_regs = of_iomap(np, 2);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		for_each_node_by_type(np, "mic-tm")
209*4882a593Smuzhiyun 			if (of_get_parent(np) == be)
210*4882a593Smuzhiyun 				map->mic_tm_regs = of_iomap(np, 0);
211*4882a593Smuzhiyun 	} else {
212*4882a593Smuzhiyun 		struct device_node *cpu;
213*4882a593Smuzhiyun 		/* That hack must die die die ! */
214*4882a593Smuzhiyun 		const struct address_prop {
215*4882a593Smuzhiyun 			unsigned long address;
216*4882a593Smuzhiyun 			unsigned int len;
217*4882a593Smuzhiyun 		} __attribute__((packed)) *prop;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		cpu = map->cpu_node;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		prop = of_get_property(cpu, "pervasive", NULL);
222*4882a593Smuzhiyun 		if (prop != NULL)
223*4882a593Smuzhiyun 			map->pmd_regs = ioremap(prop->address, prop->len);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		prop = of_get_property(cpu, "iic", NULL);
226*4882a593Smuzhiyun 		if (prop != NULL)
227*4882a593Smuzhiyun 			map->iic_regs = ioremap(prop->address, prop->len);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		prop = of_get_property(cpu, "mic-tm", NULL);
230*4882a593Smuzhiyun 		if (prop != NULL)
231*4882a593Smuzhiyun 			map->mic_tm_regs = ioremap(prop->address, prop->len);
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 
cbe_regs_init(void)236*4882a593Smuzhiyun void __init cbe_regs_init(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	int i;
239*4882a593Smuzhiyun 	unsigned int thread_id;
240*4882a593Smuzhiyun 	struct device_node *cpu;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Build local fast map of CPUs */
243*4882a593Smuzhiyun 	for_each_possible_cpu(i) {
244*4882a593Smuzhiyun 		cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
245*4882a593Smuzhiyun 		cbe_thread_map[i].be_node = cbe_get_be_node(i);
246*4882a593Smuzhiyun 		cbe_thread_map[i].thread_id = thread_id;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* Find maps for each device tree CPU */
250*4882a593Smuzhiyun 	for_each_node_by_type(cpu, "cpu") {
251*4882a593Smuzhiyun 		struct cbe_regs_map *map;
252*4882a593Smuzhiyun 		unsigned int cbe_id;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		cbe_id = cbe_regs_map_count++;
255*4882a593Smuzhiyun 		map = &cbe_regs_maps[cbe_id];
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		if (cbe_regs_map_count > MAX_CBE) {
258*4882a593Smuzhiyun 			printk(KERN_ERR "cbe_regs: More BE chips than supported"
259*4882a593Smuzhiyun 			       "!\n");
260*4882a593Smuzhiyun 			cbe_regs_map_count--;
261*4882a593Smuzhiyun 			of_node_put(cpu);
262*4882a593Smuzhiyun 			return;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 		map->cpu_node = cpu;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		for_each_possible_cpu(i) {
267*4882a593Smuzhiyun 			struct cbe_thread_map *thread = &cbe_thread_map[i];
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 			if (thread->cpu_node == cpu) {
270*4882a593Smuzhiyun 				thread->regs = map;
271*4882a593Smuzhiyun 				thread->cbe_id = cbe_id;
272*4882a593Smuzhiyun 				map->be_node = thread->be_node;
273*4882a593Smuzhiyun 				cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
274*4882a593Smuzhiyun 				if(thread->thread_id == 0)
275*4882a593Smuzhiyun 					cpumask_set_cpu(i, &cbe_first_online_cpu);
276*4882a593Smuzhiyun 			}
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		cbe_fill_regs_map(map);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283