xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/cell/spu_manage.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * spu management operations for of based platforms
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6*4882a593Smuzhiyun  * Copyright 2006 Sony Corp.
7*4882a593Smuzhiyun  * (C) Copyright 2007 TOSHIBA CORPORATION
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/ptrace.h>
14*4882a593Smuzhiyun #include <linux/wait.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <asm/spu.h>
21*4882a593Smuzhiyun #include <asm/spu_priv1.h>
22*4882a593Smuzhiyun #include <asm/firmware.h>
23*4882a593Smuzhiyun #include <asm/prom.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "spufs/spufs.h"
26*4882a593Smuzhiyun #include "interrupt.h"
27*4882a593Smuzhiyun 
spu_devnode(struct spu * spu)28*4882a593Smuzhiyun struct device_node *spu_devnode(struct spu *spu)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	return spu->devnode;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spu_devnode);
34*4882a593Smuzhiyun 
find_spu_unit_number(struct device_node * spe)35*4882a593Smuzhiyun static u64 __init find_spu_unit_number(struct device_node *spe)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	const unsigned int *prop;
38*4882a593Smuzhiyun 	int proplen;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/* new device trees should provide the physical-id attribute */
41*4882a593Smuzhiyun 	prop = of_get_property(spe, "physical-id", &proplen);
42*4882a593Smuzhiyun 	if (proplen == 4)
43*4882a593Smuzhiyun 		return (u64)*prop;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* celleb device tree provides the unit-id */
46*4882a593Smuzhiyun 	prop = of_get_property(spe, "unit-id", &proplen);
47*4882a593Smuzhiyun 	if (proplen == 4)
48*4882a593Smuzhiyun 		return (u64)*prop;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* legacy device trees provide the id in the reg attribute */
51*4882a593Smuzhiyun 	prop = of_get_property(spe, "reg", &proplen);
52*4882a593Smuzhiyun 	if (proplen == 4)
53*4882a593Smuzhiyun 		return (u64)*prop;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
spu_unmap(struct spu * spu)58*4882a593Smuzhiyun static void spu_unmap(struct spu *spu)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_LPAR))
61*4882a593Smuzhiyun 		iounmap(spu->priv1);
62*4882a593Smuzhiyun 	iounmap(spu->priv2);
63*4882a593Smuzhiyun 	iounmap(spu->problem);
64*4882a593Smuzhiyun 	iounmap((__force u8 __iomem *)spu->local_store);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
spu_map_interrupts_old(struct spu * spu,struct device_node * np)67*4882a593Smuzhiyun static int __init spu_map_interrupts_old(struct spu *spu,
68*4882a593Smuzhiyun 	struct device_node *np)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	unsigned int isrc;
71*4882a593Smuzhiyun 	const u32 *tmp;
72*4882a593Smuzhiyun 	int nid;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	/* Get the interrupt source unit from the device-tree */
75*4882a593Smuzhiyun 	tmp = of_get_property(np, "isrc", NULL);
76*4882a593Smuzhiyun 	if (!tmp)
77*4882a593Smuzhiyun 		return -ENODEV;
78*4882a593Smuzhiyun 	isrc = tmp[0];
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	tmp = of_get_property(np->parent->parent, "node-id", NULL);
81*4882a593Smuzhiyun 	if (!tmp) {
82*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: can't find node-id\n", __func__);
83*4882a593Smuzhiyun 		nid = spu->node;
84*4882a593Smuzhiyun 	} else
85*4882a593Smuzhiyun 		nid = tmp[0];
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Add the node number */
88*4882a593Smuzhiyun 	isrc |= nid << IIC_IRQ_NODE_SHIFT;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Now map interrupts of all 3 classes */
91*4882a593Smuzhiyun 	spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
92*4882a593Smuzhiyun 	spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
93*4882a593Smuzhiyun 	spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* Right now, we only fail if class 2 failed */
96*4882a593Smuzhiyun 	if (!spu->irqs[2])
97*4882a593Smuzhiyun 		return -EINVAL;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
spu_map_prop_old(struct spu * spu,struct device_node * n,const char * name)102*4882a593Smuzhiyun static void __iomem * __init spu_map_prop_old(struct spu *spu,
103*4882a593Smuzhiyun 					      struct device_node *n,
104*4882a593Smuzhiyun 					      const char *name)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	const struct address_prop {
107*4882a593Smuzhiyun 		unsigned long address;
108*4882a593Smuzhiyun 		unsigned int len;
109*4882a593Smuzhiyun 	} __attribute__((packed)) *prop;
110*4882a593Smuzhiyun 	int proplen;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	prop = of_get_property(n, name, &proplen);
113*4882a593Smuzhiyun 	if (prop == NULL || proplen != sizeof (struct address_prop))
114*4882a593Smuzhiyun 		return NULL;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return ioremap(prop->address, prop->len);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
spu_map_device_old(struct spu * spu)119*4882a593Smuzhiyun static int __init spu_map_device_old(struct spu *spu)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct device_node *node = spu->devnode;
122*4882a593Smuzhiyun 	const char *prop;
123*4882a593Smuzhiyun 	int ret;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	ret = -ENODEV;
126*4882a593Smuzhiyun 	spu->name = of_get_property(node, "name", NULL);
127*4882a593Smuzhiyun 	if (!spu->name)
128*4882a593Smuzhiyun 		goto out;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	prop = of_get_property(node, "local-store", NULL);
131*4882a593Smuzhiyun 	if (!prop)
132*4882a593Smuzhiyun 		goto out;
133*4882a593Smuzhiyun 	spu->local_store_phys = *(unsigned long *)prop;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* we use local store as ram, not io memory */
136*4882a593Smuzhiyun 	spu->local_store = (void __force *)
137*4882a593Smuzhiyun 		spu_map_prop_old(spu, node, "local-store");
138*4882a593Smuzhiyun 	if (!spu->local_store)
139*4882a593Smuzhiyun 		goto out;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	prop = of_get_property(node, "problem", NULL);
142*4882a593Smuzhiyun 	if (!prop)
143*4882a593Smuzhiyun 		goto out_unmap;
144*4882a593Smuzhiyun 	spu->problem_phys = *(unsigned long *)prop;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	spu->problem = spu_map_prop_old(spu, node, "problem");
147*4882a593Smuzhiyun 	if (!spu->problem)
148*4882a593Smuzhiyun 		goto out_unmap;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	spu->priv2 = spu_map_prop_old(spu, node, "priv2");
151*4882a593Smuzhiyun 	if (!spu->priv2)
152*4882a593Smuzhiyun 		goto out_unmap;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
155*4882a593Smuzhiyun 		spu->priv1 = spu_map_prop_old(spu, node, "priv1");
156*4882a593Smuzhiyun 		if (!spu->priv1)
157*4882a593Smuzhiyun 			goto out_unmap;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	ret = 0;
161*4882a593Smuzhiyun 	goto out;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun out_unmap:
164*4882a593Smuzhiyun 	spu_unmap(spu);
165*4882a593Smuzhiyun out:
166*4882a593Smuzhiyun 	return ret;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
spu_map_interrupts(struct spu * spu,struct device_node * np)169*4882a593Smuzhiyun static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	int i;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	for (i=0; i < 3; i++) {
174*4882a593Smuzhiyun 		spu->irqs[i] = irq_of_parse_and_map(np, i);
175*4882a593Smuzhiyun 		if (!spu->irqs[i])
176*4882a593Smuzhiyun 			goto err;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun err:
181*4882a593Smuzhiyun 	pr_debug("failed to map irq %x for spu %s\n", i, spu->name);
182*4882a593Smuzhiyun 	for (; i >= 0; i--) {
183*4882a593Smuzhiyun 		if (spu->irqs[i])
184*4882a593Smuzhiyun 			irq_dispose_mapping(spu->irqs[i]);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 	return -EINVAL;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
spu_map_resource(struct spu * spu,int nr,void __iomem ** virt,unsigned long * phys)189*4882a593Smuzhiyun static int spu_map_resource(struct spu *spu, int nr,
190*4882a593Smuzhiyun 			    void __iomem** virt, unsigned long *phys)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct device_node *np = spu->devnode;
193*4882a593Smuzhiyun 	struct resource resource = { };
194*4882a593Smuzhiyun 	unsigned long len;
195*4882a593Smuzhiyun 	int ret;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	ret = of_address_to_resource(np, nr, &resource);
198*4882a593Smuzhiyun 	if (ret)
199*4882a593Smuzhiyun 		return ret;
200*4882a593Smuzhiyun 	if (phys)
201*4882a593Smuzhiyun 		*phys = resource.start;
202*4882a593Smuzhiyun 	len = resource_size(&resource);
203*4882a593Smuzhiyun 	*virt = ioremap(resource.start, len);
204*4882a593Smuzhiyun 	if (!*virt)
205*4882a593Smuzhiyun 		return -EINVAL;
206*4882a593Smuzhiyun 	return 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
spu_map_device(struct spu * spu)209*4882a593Smuzhiyun static int __init spu_map_device(struct spu *spu)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct device_node *np = spu->devnode;
212*4882a593Smuzhiyun 	int ret = -ENODEV;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	spu->name = of_get_property(np, "name", NULL);
215*4882a593Smuzhiyun 	if (!spu->name)
216*4882a593Smuzhiyun 		goto out;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
219*4882a593Smuzhiyun 			       &spu->local_store_phys);
220*4882a593Smuzhiyun 	if (ret) {
221*4882a593Smuzhiyun 		pr_debug("spu_new: failed to map %pOF resource 0\n",
222*4882a593Smuzhiyun 			 np);
223*4882a593Smuzhiyun 		goto out;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
226*4882a593Smuzhiyun 			       &spu->problem_phys);
227*4882a593Smuzhiyun 	if (ret) {
228*4882a593Smuzhiyun 		pr_debug("spu_new: failed to map %pOF resource 1\n",
229*4882a593Smuzhiyun 			 np);
230*4882a593Smuzhiyun 		goto out_unmap;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 	ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
233*4882a593Smuzhiyun 	if (ret) {
234*4882a593Smuzhiyun 		pr_debug("spu_new: failed to map %pOF resource 2\n",
235*4882a593Smuzhiyun 			 np);
236*4882a593Smuzhiyun 		goto out_unmap;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_LPAR))
239*4882a593Smuzhiyun 		ret = spu_map_resource(spu, 3,
240*4882a593Smuzhiyun 			       (void __iomem**)&spu->priv1, NULL);
241*4882a593Smuzhiyun 	if (ret) {
242*4882a593Smuzhiyun 		pr_debug("spu_new: failed to map %pOF resource 3\n",
243*4882a593Smuzhiyun 			 np);
244*4882a593Smuzhiyun 		goto out_unmap;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	pr_debug("spu_new: %pOF maps:\n", np);
247*4882a593Smuzhiyun 	pr_debug("  local store   : 0x%016lx -> 0x%p\n",
248*4882a593Smuzhiyun 		 spu->local_store_phys, spu->local_store);
249*4882a593Smuzhiyun 	pr_debug("  problem state : 0x%016lx -> 0x%p\n",
250*4882a593Smuzhiyun 		 spu->problem_phys, spu->problem);
251*4882a593Smuzhiyun 	pr_debug("  priv2         :                       0x%p\n", spu->priv2);
252*4882a593Smuzhiyun 	pr_debug("  priv1         :                       0x%p\n", spu->priv1);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun out_unmap:
257*4882a593Smuzhiyun 	spu_unmap(spu);
258*4882a593Smuzhiyun out:
259*4882a593Smuzhiyun 	pr_debug("failed to map spe %s: %d\n", spu->name, ret);
260*4882a593Smuzhiyun 	return ret;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
of_enumerate_spus(int (* fn)(void * data))263*4882a593Smuzhiyun static int __init of_enumerate_spus(int (*fn)(void *data))
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	int ret;
266*4882a593Smuzhiyun 	struct device_node *node;
267*4882a593Smuzhiyun 	unsigned int n = 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ret = -ENODEV;
270*4882a593Smuzhiyun 	for_each_node_by_type(node, "spe") {
271*4882a593Smuzhiyun 		ret = fn(node);
272*4882a593Smuzhiyun 		if (ret) {
273*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: Error initializing %pOFn\n",
274*4882a593Smuzhiyun 				__func__, node);
275*4882a593Smuzhiyun 			of_node_put(node);
276*4882a593Smuzhiyun 			break;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 		n++;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 	return ret ? ret : n;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
of_create_spu(struct spu * spu,void * data)283*4882a593Smuzhiyun static int __init of_create_spu(struct spu *spu, void *data)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	int ret;
286*4882a593Smuzhiyun 	struct device_node *spe = (struct device_node *)data;
287*4882a593Smuzhiyun 	static int legacy_map = 0, legacy_irq = 0;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	spu->devnode = of_node_get(spe);
290*4882a593Smuzhiyun 	spu->spe_id = find_spu_unit_number(spe);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	spu->node = of_node_to_nid(spe);
293*4882a593Smuzhiyun 	if (spu->node >= MAX_NUMNODES) {
294*4882a593Smuzhiyun 		printk(KERN_WARNING "SPE %pOF on node %d ignored,"
295*4882a593Smuzhiyun 		       " node number too big\n", spe, spu->node);
296*4882a593Smuzhiyun 		printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
297*4882a593Smuzhiyun 		ret = -ENODEV;
298*4882a593Smuzhiyun 		goto out;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	ret = spu_map_device(spu);
302*4882a593Smuzhiyun 	if (ret) {
303*4882a593Smuzhiyun 		if (!legacy_map) {
304*4882a593Smuzhiyun 			legacy_map = 1;
305*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: Legacy device tree found, "
306*4882a593Smuzhiyun 				"trying to map old style\n", __func__);
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 		ret = spu_map_device_old(spu);
309*4882a593Smuzhiyun 		if (ret) {
310*4882a593Smuzhiyun 			printk(KERN_ERR "Unable to map %s\n",
311*4882a593Smuzhiyun 				spu->name);
312*4882a593Smuzhiyun 			goto out;
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	ret = spu_map_interrupts(spu, spe);
317*4882a593Smuzhiyun 	if (ret) {
318*4882a593Smuzhiyun 		if (!legacy_irq) {
319*4882a593Smuzhiyun 			legacy_irq = 1;
320*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: Legacy device tree found, "
321*4882a593Smuzhiyun 				"trying old style irq\n", __func__);
322*4882a593Smuzhiyun 		}
323*4882a593Smuzhiyun 		ret = spu_map_interrupts_old(spu, spe);
324*4882a593Smuzhiyun 		if (ret) {
325*4882a593Smuzhiyun 			printk(KERN_ERR "%s: could not map interrupts\n",
326*4882a593Smuzhiyun 				spu->name);
327*4882a593Smuzhiyun 			goto out_unmap;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
332*4882a593Smuzhiyun 		spu->local_store, spu->problem, spu->priv1,
333*4882a593Smuzhiyun 		spu->priv2, spu->number);
334*4882a593Smuzhiyun 	goto out;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun out_unmap:
337*4882a593Smuzhiyun 	spu_unmap(spu);
338*4882a593Smuzhiyun out:
339*4882a593Smuzhiyun 	return ret;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
of_destroy_spu(struct spu * spu)342*4882a593Smuzhiyun static int of_destroy_spu(struct spu *spu)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	spu_unmap(spu);
345*4882a593Smuzhiyun 	of_node_put(spu->devnode);
346*4882a593Smuzhiyun 	return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
enable_spu_by_master_run(struct spu_context * ctx)349*4882a593Smuzhiyun static void enable_spu_by_master_run(struct spu_context *ctx)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	ctx->ops->master_start(ctx);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
disable_spu_by_master_run(struct spu_context * ctx)354*4882a593Smuzhiyun static void disable_spu_by_master_run(struct spu_context *ctx)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	ctx->ops->master_stop(ctx);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /* Hardcoded affinity idxs for qs20 */
360*4882a593Smuzhiyun #define QS20_SPES_PER_BE 8
361*4882a593Smuzhiyun static int qs20_reg_idxs[QS20_SPES_PER_BE] =   { 0, 2, 4, 6, 7, 5, 3, 1 };
362*4882a593Smuzhiyun static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
363*4882a593Smuzhiyun 
spu_lookup_reg(int node,u32 reg)364*4882a593Smuzhiyun static struct spu *spu_lookup_reg(int node, u32 reg)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct spu *spu;
367*4882a593Smuzhiyun 	const u32 *spu_reg;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
370*4882a593Smuzhiyun 		spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
371*4882a593Smuzhiyun 		if (*spu_reg == reg)
372*4882a593Smuzhiyun 			return spu;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	return NULL;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
init_affinity_qs20_harcoded(void)377*4882a593Smuzhiyun static void init_affinity_qs20_harcoded(void)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	int node, i;
380*4882a593Smuzhiyun 	struct spu *last_spu, *spu;
381*4882a593Smuzhiyun 	u32 reg;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	for (node = 0; node < MAX_NUMNODES; node++) {
384*4882a593Smuzhiyun 		last_spu = NULL;
385*4882a593Smuzhiyun 		for (i = 0; i < QS20_SPES_PER_BE; i++) {
386*4882a593Smuzhiyun 			reg = qs20_reg_idxs[i];
387*4882a593Smuzhiyun 			spu = spu_lookup_reg(node, reg);
388*4882a593Smuzhiyun 			if (!spu)
389*4882a593Smuzhiyun 				continue;
390*4882a593Smuzhiyun 			spu->has_mem_affinity = qs20_reg_memory[reg];
391*4882a593Smuzhiyun 			if (last_spu)
392*4882a593Smuzhiyun 				list_add_tail(&spu->aff_list,
393*4882a593Smuzhiyun 						&last_spu->aff_list);
394*4882a593Smuzhiyun 			last_spu = spu;
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
of_has_vicinity(void)399*4882a593Smuzhiyun static int of_has_vicinity(void)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct device_node *dn;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	for_each_node_by_type(dn, "spe") {
404*4882a593Smuzhiyun 		if (of_find_property(dn, "vicinity", NULL))  {
405*4882a593Smuzhiyun 			of_node_put(dn);
406*4882a593Smuzhiyun 			return 1;
407*4882a593Smuzhiyun 		}
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
devnode_spu(int cbe,struct device_node * dn)412*4882a593Smuzhiyun static struct spu *devnode_spu(int cbe, struct device_node *dn)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	struct spu *spu;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
417*4882a593Smuzhiyun 		if (spu_devnode(spu) == dn)
418*4882a593Smuzhiyun 			return spu;
419*4882a593Smuzhiyun 	return NULL;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun static struct spu *
neighbour_spu(int cbe,struct device_node * target,struct device_node * avoid)423*4882a593Smuzhiyun neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct spu *spu;
426*4882a593Smuzhiyun 	struct device_node *spu_dn;
427*4882a593Smuzhiyun 	const phandle *vic_handles;
428*4882a593Smuzhiyun 	int lenp, i;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
431*4882a593Smuzhiyun 		spu_dn = spu_devnode(spu);
432*4882a593Smuzhiyun 		if (spu_dn == avoid)
433*4882a593Smuzhiyun 			continue;
434*4882a593Smuzhiyun 		vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
435*4882a593Smuzhiyun 		for (i=0; i < (lenp / sizeof(phandle)); i++) {
436*4882a593Smuzhiyun 			if (vic_handles[i] == target->phandle)
437*4882a593Smuzhiyun 				return spu;
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 	return NULL;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
init_affinity_node(int cbe)443*4882a593Smuzhiyun static void init_affinity_node(int cbe)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct spu *spu, *last_spu;
446*4882a593Smuzhiyun 	struct device_node *vic_dn, *last_spu_dn;
447*4882a593Smuzhiyun 	phandle avoid_ph;
448*4882a593Smuzhiyun 	const phandle *vic_handles;
449*4882a593Smuzhiyun 	int lenp, i, added;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
452*4882a593Smuzhiyun 								cbe_list);
453*4882a593Smuzhiyun 	avoid_ph = 0;
454*4882a593Smuzhiyun 	for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
455*4882a593Smuzhiyun 		last_spu_dn = spu_devnode(last_spu);
456*4882a593Smuzhiyun 		vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 		/*
459*4882a593Smuzhiyun 		 * Walk through each phandle in vicinity property of the spu
460*4882a593Smuzhiyun 		 * (tipically two vicinity phandles per spe node)
461*4882a593Smuzhiyun 		 */
462*4882a593Smuzhiyun 		for (i = 0; i < (lenp / sizeof(phandle)); i++) {
463*4882a593Smuzhiyun 			if (vic_handles[i] == avoid_ph)
464*4882a593Smuzhiyun 				continue;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 			vic_dn = of_find_node_by_phandle(vic_handles[i]);
467*4882a593Smuzhiyun 			if (!vic_dn)
468*4882a593Smuzhiyun 				continue;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 			if (of_node_name_eq(vic_dn, "spe") ) {
471*4882a593Smuzhiyun 				spu = devnode_spu(cbe, vic_dn);
472*4882a593Smuzhiyun 				avoid_ph = last_spu_dn->phandle;
473*4882a593Smuzhiyun 			} else {
474*4882a593Smuzhiyun 				/*
475*4882a593Smuzhiyun 				 * "mic-tm" and "bif0" nodes do not have
476*4882a593Smuzhiyun 				 * vicinity property. So we need to find the
477*4882a593Smuzhiyun 				 * spe which has vic_dn as neighbour, but
478*4882a593Smuzhiyun 				 * skipping the one we came from (last_spu_dn)
479*4882a593Smuzhiyun 				 */
480*4882a593Smuzhiyun 				spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
481*4882a593Smuzhiyun 				if (!spu)
482*4882a593Smuzhiyun 					continue;
483*4882a593Smuzhiyun 				if (of_node_name_eq(vic_dn, "mic-tm")) {
484*4882a593Smuzhiyun 					last_spu->has_mem_affinity = 1;
485*4882a593Smuzhiyun 					spu->has_mem_affinity = 1;
486*4882a593Smuzhiyun 				}
487*4882a593Smuzhiyun 				avoid_ph = vic_dn->phandle;
488*4882a593Smuzhiyun 			}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 			list_add_tail(&spu->aff_list, &last_spu->aff_list);
491*4882a593Smuzhiyun 			last_spu = spu;
492*4882a593Smuzhiyun 			break;
493*4882a593Smuzhiyun 		}
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
init_affinity_fw(void)497*4882a593Smuzhiyun static void init_affinity_fw(void)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	int cbe;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
502*4882a593Smuzhiyun 		init_affinity_node(cbe);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
init_affinity(void)505*4882a593Smuzhiyun static int __init init_affinity(void)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	if (of_has_vicinity()) {
508*4882a593Smuzhiyun 		init_affinity_fw();
509*4882a593Smuzhiyun 	} else {
510*4882a593Smuzhiyun 		if (of_machine_is_compatible("IBM,CPBW-1.0"))
511*4882a593Smuzhiyun 			init_affinity_qs20_harcoded();
512*4882a593Smuzhiyun 		else
513*4882a593Smuzhiyun 			printk("No affinity configuration found\n");
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun const struct spu_management_ops spu_management_of_ops = {
520*4882a593Smuzhiyun 	.enumerate_spus = of_enumerate_spus,
521*4882a593Smuzhiyun 	.create_spu = of_create_spu,
522*4882a593Smuzhiyun 	.destroy_spu = of_destroy_spu,
523*4882a593Smuzhiyun 	.enable_spu = enable_spu_by_master_run,
524*4882a593Smuzhiyun 	.disable_spu = disable_spu_by_master_run,
525*4882a593Smuzhiyun 	.init_affinity = init_affinity,
526*4882a593Smuzhiyun };
527