xref: /OK3568_Linux_fs/kernel/drivers/rapidio/rio-scan.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * RapidIO enumeration and discovery support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2005 MontaVista Software, Inc.
6*4882a593Smuzhiyun  * Matt Porter <mporter@kernel.crashing.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright 2009 Integrated Device Technology, Inc.
9*4882a593Smuzhiyun  * Alex Bounine <alexandre.bounine@idt.com>
10*4882a593Smuzhiyun  * - Added Port-Write/Error Management initialization and handling
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Copyright 2009 Sysgo AG
13*4882a593Smuzhiyun  * Thomas Moll <thomas.moll@sysgo.com>
14*4882a593Smuzhiyun  * - Added Input- Output- enable functionality, to allow full communication
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/rio.h>
24*4882a593Smuzhiyun #include <linux/rio_drv.h>
25*4882a593Smuzhiyun #include <linux/rio_ids.h>
26*4882a593Smuzhiyun #include <linux/rio_regs.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/spinlock.h>
29*4882a593Smuzhiyun #include <linux/timer.h>
30*4882a593Smuzhiyun #include <linux/sched.h>
31*4882a593Smuzhiyun #include <linux/jiffies.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "rio.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void rio_init_em(struct rio_dev *rdev);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct rio_id_table {
39*4882a593Smuzhiyun 	u16 start;	/* logical minimal id */
40*4882a593Smuzhiyun 	u32 max;	/* max number of IDs in table */
41*4882a593Smuzhiyun 	spinlock_t lock;
42*4882a593Smuzhiyun 	unsigned long table[];
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static int next_destid = 0;
46*4882a593Smuzhiyun static int next_comptag = 1;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  * rio_destid_alloc - Allocate next available destID for given network
50*4882a593Smuzhiyun  * @net: RIO network
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * Returns next available device destination ID for the specified RIO network.
53*4882a593Smuzhiyun  * Marks allocated ID as one in use.
54*4882a593Smuzhiyun  * Returns RIO_INVALID_DESTID if new destID is not available.
55*4882a593Smuzhiyun  */
rio_destid_alloc(struct rio_net * net)56*4882a593Smuzhiyun static u16 rio_destid_alloc(struct rio_net *net)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int destid;
59*4882a593Smuzhiyun 	struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	spin_lock(&idtab->lock);
62*4882a593Smuzhiyun 	destid = find_first_zero_bit(idtab->table, idtab->max);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (destid < idtab->max) {
65*4882a593Smuzhiyun 		set_bit(destid, idtab->table);
66*4882a593Smuzhiyun 		destid += idtab->start;
67*4882a593Smuzhiyun 	} else
68*4882a593Smuzhiyun 		destid = RIO_INVALID_DESTID;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	spin_unlock(&idtab->lock);
71*4882a593Smuzhiyun 	return (u16)destid;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * rio_destid_reserve - Reserve the specified destID
76*4882a593Smuzhiyun  * @net: RIO network
77*4882a593Smuzhiyun  * @destid: destID to reserve
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Tries to reserve the specified destID.
80*4882a593Smuzhiyun  * Returns 0 if successful.
81*4882a593Smuzhiyun  */
rio_destid_reserve(struct rio_net * net,u16 destid)82*4882a593Smuzhiyun static int rio_destid_reserve(struct rio_net *net, u16 destid)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	int oldbit;
85*4882a593Smuzhiyun 	struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	destid -= idtab->start;
88*4882a593Smuzhiyun 	spin_lock(&idtab->lock);
89*4882a593Smuzhiyun 	oldbit = test_and_set_bit(destid, idtab->table);
90*4882a593Smuzhiyun 	spin_unlock(&idtab->lock);
91*4882a593Smuzhiyun 	return oldbit;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * rio_destid_free - free a previously allocated destID
96*4882a593Smuzhiyun  * @net: RIO network
97*4882a593Smuzhiyun  * @destid: destID to free
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * Makes the specified destID available for use.
100*4882a593Smuzhiyun  */
rio_destid_free(struct rio_net * net,u16 destid)101*4882a593Smuzhiyun static void rio_destid_free(struct rio_net *net, u16 destid)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	destid -= idtab->start;
106*4882a593Smuzhiyun 	spin_lock(&idtab->lock);
107*4882a593Smuzhiyun 	clear_bit(destid, idtab->table);
108*4882a593Smuzhiyun 	spin_unlock(&idtab->lock);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /**
112*4882a593Smuzhiyun  * rio_destid_first - return first destID in use
113*4882a593Smuzhiyun  * @net: RIO network
114*4882a593Smuzhiyun  */
rio_destid_first(struct rio_net * net)115*4882a593Smuzhiyun static u16 rio_destid_first(struct rio_net *net)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	int destid;
118*4882a593Smuzhiyun 	struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	spin_lock(&idtab->lock);
121*4882a593Smuzhiyun 	destid = find_first_bit(idtab->table, idtab->max);
122*4882a593Smuzhiyun 	if (destid >= idtab->max)
123*4882a593Smuzhiyun 		destid = RIO_INVALID_DESTID;
124*4882a593Smuzhiyun 	else
125*4882a593Smuzhiyun 		destid += idtab->start;
126*4882a593Smuzhiyun 	spin_unlock(&idtab->lock);
127*4882a593Smuzhiyun 	return (u16)destid;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun  * rio_destid_next - return next destID in use
132*4882a593Smuzhiyun  * @net: RIO network
133*4882a593Smuzhiyun  * @from: destination ID from which search shall continue
134*4882a593Smuzhiyun  */
rio_destid_next(struct rio_net * net,u16 from)135*4882a593Smuzhiyun static u16 rio_destid_next(struct rio_net *net, u16 from)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	int destid;
138*4882a593Smuzhiyun 	struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	spin_lock(&idtab->lock);
141*4882a593Smuzhiyun 	destid = find_next_bit(idtab->table, idtab->max, from);
142*4882a593Smuzhiyun 	if (destid >= idtab->max)
143*4882a593Smuzhiyun 		destid = RIO_INVALID_DESTID;
144*4882a593Smuzhiyun 	else
145*4882a593Smuzhiyun 		destid += idtab->start;
146*4882a593Smuzhiyun 	spin_unlock(&idtab->lock);
147*4882a593Smuzhiyun 	return (u16)destid;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * rio_get_device_id - Get the base/extended device id for a device
152*4882a593Smuzhiyun  * @port: RIO master port
153*4882a593Smuzhiyun  * @destid: Destination ID of device
154*4882a593Smuzhiyun  * @hopcount: Hopcount to device
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * Reads the base/extended device id from a device. Returns the
157*4882a593Smuzhiyun  * 8/16-bit device ID.
158*4882a593Smuzhiyun  */
rio_get_device_id(struct rio_mport * port,u16 destid,u8 hopcount)159*4882a593Smuzhiyun static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	u32 result;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return RIO_GET_DID(port->sys_size, result);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun  * rio_set_device_id - Set the base/extended device id for a device
170*4882a593Smuzhiyun  * @port: RIO master port
171*4882a593Smuzhiyun  * @destid: Destination ID of device
172*4882a593Smuzhiyun  * @hopcount: Hopcount to device
173*4882a593Smuzhiyun  * @did: Device ID value to be written
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  * Writes the base/extended device id from a device.
176*4882a593Smuzhiyun  */
rio_set_device_id(struct rio_mport * port,u16 destid,u8 hopcount,u16 did)177*4882a593Smuzhiyun static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR,
180*4882a593Smuzhiyun 				  RIO_SET_DID(port->sys_size, did));
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun  * rio_clear_locks- Release all host locks and signal enumeration complete
185*4882a593Smuzhiyun  * @net: RIO network to run on
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * Marks the component tag CSR on each device with the enumeration
188*4882a593Smuzhiyun  * complete flag. When complete, it then release the host locks on
189*4882a593Smuzhiyun  * each device. Returns 0 on success or %-EINVAL on failure.
190*4882a593Smuzhiyun  */
rio_clear_locks(struct rio_net * net)191*4882a593Smuzhiyun static int rio_clear_locks(struct rio_net *net)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct rio_mport *port = net->hport;
194*4882a593Smuzhiyun 	struct rio_dev *rdev;
195*4882a593Smuzhiyun 	u32 result;
196*4882a593Smuzhiyun 	int ret = 0;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* Release host device id locks */
199*4882a593Smuzhiyun 	rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
200*4882a593Smuzhiyun 				  port->host_deviceid);
201*4882a593Smuzhiyun 	rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result);
202*4882a593Smuzhiyun 	if ((result & 0xffff) != 0xffff) {
203*4882a593Smuzhiyun 		printk(KERN_INFO
204*4882a593Smuzhiyun 		       "RIO: badness when releasing host lock on master port, result %8.8x\n",
205*4882a593Smuzhiyun 		       result);
206*4882a593Smuzhiyun 		ret = -EINVAL;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 	list_for_each_entry(rdev, &net->devices, net_list) {
209*4882a593Smuzhiyun 		rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR,
210*4882a593Smuzhiyun 				    port->host_deviceid);
211*4882a593Smuzhiyun 		rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result);
212*4882a593Smuzhiyun 		if ((result & 0xffff) != 0xffff) {
213*4882a593Smuzhiyun 			printk(KERN_INFO
214*4882a593Smuzhiyun 			       "RIO: badness when releasing host lock on vid %4.4x did %4.4x\n",
215*4882a593Smuzhiyun 			       rdev->vid, rdev->did);
216*4882a593Smuzhiyun 			ret = -EINVAL;
217*4882a593Smuzhiyun 		}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		/* Mark device as discovered and enable master */
220*4882a593Smuzhiyun 		rio_read_config_32(rdev,
221*4882a593Smuzhiyun 				   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
222*4882a593Smuzhiyun 				   &result);
223*4882a593Smuzhiyun 		result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER;
224*4882a593Smuzhiyun 		rio_write_config_32(rdev,
225*4882a593Smuzhiyun 				    rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
226*4882a593Smuzhiyun 				    result);
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return ret;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun  * rio_enum_host- Set host lock and initialize host destination ID
234*4882a593Smuzhiyun  * @port: Master port to issue transaction
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * Sets the local host master port lock and destination ID register
237*4882a593Smuzhiyun  * with the host device ID value. The host device ID value is provided
238*4882a593Smuzhiyun  * by the platform. Returns %0 on success or %-1 on failure.
239*4882a593Smuzhiyun  */
rio_enum_host(struct rio_mport * port)240*4882a593Smuzhiyun static int rio_enum_host(struct rio_mport *port)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	u32 result;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Set master port host device id lock */
245*4882a593Smuzhiyun 	rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
246*4882a593Smuzhiyun 				  port->host_deviceid);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result);
249*4882a593Smuzhiyun 	if ((result & 0xffff) != port->host_deviceid)
250*4882a593Smuzhiyun 		return -1;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Set master port destid and init destid ctr */
253*4882a593Smuzhiyun 	rio_local_set_device_id(port, port->host_deviceid);
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun  * rio_device_has_destid- Test if a device contains a destination ID register
259*4882a593Smuzhiyun  * @port: Master port to issue transaction
260*4882a593Smuzhiyun  * @src_ops: RIO device source operations
261*4882a593Smuzhiyun  * @dst_ops: RIO device destination operations
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * Checks the provided @src_ops and @dst_ops for the necessary transaction
264*4882a593Smuzhiyun  * capabilities that indicate whether or not a device will implement a
265*4882a593Smuzhiyun  * destination ID register. Returns 1 if true or 0 if false.
266*4882a593Smuzhiyun  */
rio_device_has_destid(struct rio_mport * port,int src_ops,int dst_ops)267*4882a593Smuzhiyun static int rio_device_has_destid(struct rio_mport *port, int src_ops,
268*4882a593Smuzhiyun 				 int dst_ops)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	u32 mask = RIO_OPS_READ | RIO_OPS_WRITE | RIO_OPS_ATOMIC_TST_SWP | RIO_OPS_ATOMIC_INC | RIO_OPS_ATOMIC_DEC | RIO_OPS_ATOMIC_SET | RIO_OPS_ATOMIC_CLR;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return !!((src_ops | dst_ops) & mask);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * rio_release_dev- Frees a RIO device struct
277*4882a593Smuzhiyun  * @dev: LDM device associated with a RIO device struct
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * Gets the RIO device struct associated a RIO device struct.
280*4882a593Smuzhiyun  * The RIO device struct is freed.
281*4882a593Smuzhiyun  */
rio_release_dev(struct device * dev)282*4882a593Smuzhiyun static void rio_release_dev(struct device *dev)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct rio_dev *rdev;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	rdev = to_rio_dev(dev);
287*4882a593Smuzhiyun 	kfree(rdev);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * rio_is_switch- Tests if a RIO device has switch capabilities
292*4882a593Smuzhiyun  * @rdev: RIO device
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * Gets the RIO device Processing Element Features register
295*4882a593Smuzhiyun  * contents and tests for switch capabilities. Returns 1 if
296*4882a593Smuzhiyun  * the device is a switch or 0 if it is not a switch.
297*4882a593Smuzhiyun  * The RIO device struct is freed.
298*4882a593Smuzhiyun  */
rio_is_switch(struct rio_dev * rdev)299*4882a593Smuzhiyun static int rio_is_switch(struct rio_dev *rdev)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	if (rdev->pef & RIO_PEF_SWITCH)
302*4882a593Smuzhiyun 		return 1;
303*4882a593Smuzhiyun 	return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun  * rio_setup_device- Allocates and sets up a RIO device
308*4882a593Smuzhiyun  * @net: RIO network
309*4882a593Smuzhiyun  * @port: Master port to send transactions
310*4882a593Smuzhiyun  * @destid: Current destination ID
311*4882a593Smuzhiyun  * @hopcount: Current hopcount
312*4882a593Smuzhiyun  * @do_enum: Enumeration/Discovery mode flag
313*4882a593Smuzhiyun  *
314*4882a593Smuzhiyun  * Allocates a RIO device and configures fields based on configuration
315*4882a593Smuzhiyun  * space contents. If device has a destination ID register, a destination
316*4882a593Smuzhiyun  * ID is either assigned in enumeration mode or read from configuration
317*4882a593Smuzhiyun  * space in discovery mode.  If the device has switch capabilities, then
318*4882a593Smuzhiyun  * a switch is allocated and configured appropriately. Returns a pointer
319*4882a593Smuzhiyun  * to a RIO device on success or NULL on failure.
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  */
rio_setup_device(struct rio_net * net,struct rio_mport * port,u16 destid,u8 hopcount,int do_enum)322*4882a593Smuzhiyun static struct rio_dev *rio_setup_device(struct rio_net *net,
323*4882a593Smuzhiyun 					struct rio_mport *port, u16 destid,
324*4882a593Smuzhiyun 					u8 hopcount, int do_enum)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	int ret = 0;
327*4882a593Smuzhiyun 	struct rio_dev *rdev;
328*4882a593Smuzhiyun 	struct rio_switch *rswitch = NULL;
329*4882a593Smuzhiyun 	int result, rdid;
330*4882a593Smuzhiyun 	size_t size;
331*4882a593Smuzhiyun 	u32 swpinfo = 0;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	size = sizeof(*rdev);
334*4882a593Smuzhiyun 	if (rio_mport_read_config_32(port, destid, hopcount,
335*4882a593Smuzhiyun 				     RIO_PEF_CAR, &result))
336*4882a593Smuzhiyun 		return NULL;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
339*4882a593Smuzhiyun 		rio_mport_read_config_32(port, destid, hopcount,
340*4882a593Smuzhiyun 					 RIO_SWP_INFO_CAR, &swpinfo);
341*4882a593Smuzhiyun 		if (result & RIO_PEF_SWITCH)
342*4882a593Smuzhiyun 			size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	rdev = kzalloc(size, GFP_KERNEL);
346*4882a593Smuzhiyun 	if (!rdev)
347*4882a593Smuzhiyun 		return NULL;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	rdev->net = net;
350*4882a593Smuzhiyun 	rdev->pef = result;
351*4882a593Smuzhiyun 	rdev->swpinfo = swpinfo;
352*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
353*4882a593Smuzhiyun 				 &result);
354*4882a593Smuzhiyun 	rdev->did = result >> 16;
355*4882a593Smuzhiyun 	rdev->vid = result & 0xffff;
356*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_INFO_CAR,
357*4882a593Smuzhiyun 				 &rdev->device_rev);
358*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_ID_CAR,
359*4882a593Smuzhiyun 				 &result);
360*4882a593Smuzhiyun 	rdev->asm_did = result >> 16;
361*4882a593Smuzhiyun 	rdev->asm_vid = result & 0xffff;
362*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,
363*4882a593Smuzhiyun 				 &result);
364*4882a593Smuzhiyun 	rdev->asm_rev = result >> 16;
365*4882a593Smuzhiyun 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
366*4882a593Smuzhiyun 		rdev->efptr = result & 0xffff;
367*4882a593Smuzhiyun 		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
368*4882a593Smuzhiyun 						hopcount, &rdev->phys_rmap);
369*4882a593Smuzhiyun 		pr_debug("RIO: %s Register Map %d device\n",
370*4882a593Smuzhiyun 			 __func__, rdev->phys_rmap);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
373*4882a593Smuzhiyun 						hopcount, RIO_EFB_ERR_MGMNT);
374*4882a593Smuzhiyun 		if (!rdev->em_efptr)
375*4882a593Smuzhiyun 			rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
376*4882a593Smuzhiyun 						hopcount, RIO_EFB_ERR_MGMNT_HS);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
380*4882a593Smuzhiyun 				 &rdev->src_ops);
381*4882a593Smuzhiyun 	rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
382*4882a593Smuzhiyun 				 &rdev->dst_ops);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (do_enum) {
385*4882a593Smuzhiyun 		/* Assign component tag to device */
386*4882a593Smuzhiyun 		if (next_comptag >= 0x10000) {
387*4882a593Smuzhiyun 			pr_err("RIO: Component Tag Counter Overflow\n");
388*4882a593Smuzhiyun 			goto cleanup;
389*4882a593Smuzhiyun 		}
390*4882a593Smuzhiyun 		rio_mport_write_config_32(port, destid, hopcount,
391*4882a593Smuzhiyun 					  RIO_COMPONENT_TAG_CSR, next_comptag);
392*4882a593Smuzhiyun 		rdev->comp_tag = next_comptag++;
393*4882a593Smuzhiyun 		rdev->do_enum = true;
394*4882a593Smuzhiyun 	}  else {
395*4882a593Smuzhiyun 		rio_mport_read_config_32(port, destid, hopcount,
396*4882a593Smuzhiyun 					 RIO_COMPONENT_TAG_CSR,
397*4882a593Smuzhiyun 					 &rdev->comp_tag);
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
401*4882a593Smuzhiyun 		if (do_enum) {
402*4882a593Smuzhiyun 			rio_set_device_id(port, destid, hopcount, next_destid);
403*4882a593Smuzhiyun 			rdev->destid = next_destid;
404*4882a593Smuzhiyun 			next_destid = rio_destid_alloc(net);
405*4882a593Smuzhiyun 		} else
406*4882a593Smuzhiyun 			rdev->destid = rio_get_device_id(port, destid, hopcount);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		rdev->hopcount = 0xff;
409*4882a593Smuzhiyun 	} else {
410*4882a593Smuzhiyun 		/* Switch device has an associated destID which
411*4882a593Smuzhiyun 		 * will be adjusted later
412*4882a593Smuzhiyun 		 */
413*4882a593Smuzhiyun 		rdev->destid = destid;
414*4882a593Smuzhiyun 		rdev->hopcount = hopcount;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* If a PE has both switch and other functions, show it as a switch */
418*4882a593Smuzhiyun 	if (rio_is_switch(rdev)) {
419*4882a593Smuzhiyun 		rswitch = rdev->rswitch;
420*4882a593Smuzhiyun 		rswitch->port_ok = 0;
421*4882a593Smuzhiyun 		spin_lock_init(&rswitch->lock);
422*4882a593Smuzhiyun 		rswitch->route_table =
423*4882a593Smuzhiyun 			kzalloc(RIO_MAX_ROUTE_ENTRIES(port->sys_size),
424*4882a593Smuzhiyun 				GFP_KERNEL);
425*4882a593Smuzhiyun 		if (!rswitch->route_table)
426*4882a593Smuzhiyun 			goto cleanup;
427*4882a593Smuzhiyun 		/* Initialize switch route table */
428*4882a593Smuzhiyun 		for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
429*4882a593Smuzhiyun 				rdid++)
430*4882a593Smuzhiyun 			rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
431*4882a593Smuzhiyun 		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
432*4882a593Smuzhiyun 			     rdev->comp_tag & RIO_CTAG_UDEVID);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		if (do_enum)
435*4882a593Smuzhiyun 			rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0);
436*4882a593Smuzhiyun 	} else {
437*4882a593Smuzhiyun 		if (do_enum)
438*4882a593Smuzhiyun 			/*Enable Input Output Port (transmitter receiver)*/
439*4882a593Smuzhiyun 			rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
442*4882a593Smuzhiyun 			     rdev->comp_tag & RIO_CTAG_UDEVID);
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	rdev->dev.parent = &net->dev;
446*4882a593Smuzhiyun 	rio_attach_device(rdev);
447*4882a593Smuzhiyun 	rdev->dev.release = rio_release_dev;
448*4882a593Smuzhiyun 	rdev->dma_mask = DMA_BIT_MASK(32);
449*4882a593Smuzhiyun 	rdev->dev.dma_mask = &rdev->dma_mask;
450*4882a593Smuzhiyun 	rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
453*4882a593Smuzhiyun 		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
454*4882a593Smuzhiyun 				   0, 0xffff);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	ret = rio_add_device(rdev);
457*4882a593Smuzhiyun 	if (ret)
458*4882a593Smuzhiyun 		goto cleanup;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	rio_dev_get(rdev);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	return rdev;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun cleanup:
465*4882a593Smuzhiyun 	if (rswitch)
466*4882a593Smuzhiyun 		kfree(rswitch->route_table);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	kfree(rdev);
469*4882a593Smuzhiyun 	return NULL;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun  * rio_sport_is_active- Tests if a switch port has an active connection.
474*4882a593Smuzhiyun  * @rdev: RapidIO device object
475*4882a593Smuzhiyun  * @sp: Switch port number
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  * Reads the port error status CSR for a particular switch port to
478*4882a593Smuzhiyun  * determine if the port has an active link.  Returns
479*4882a593Smuzhiyun  * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
480*4882a593Smuzhiyun  * inactive.
481*4882a593Smuzhiyun  */
482*4882a593Smuzhiyun static int
rio_sport_is_active(struct rio_dev * rdev,int sp)483*4882a593Smuzhiyun rio_sport_is_active(struct rio_dev *rdev, int sp)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	u32 result = 0;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp),
488*4882a593Smuzhiyun 			   &result);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	return result & RIO_PORT_N_ERR_STS_PORT_OK;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /**
494*4882a593Smuzhiyun  * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device
495*4882a593Smuzhiyun  * @port: Master port to send transaction
496*4882a593Smuzhiyun  * @hopcount: Number of hops to the device
497*4882a593Smuzhiyun  *
498*4882a593Smuzhiyun  * Used during enumeration to read the Host Device ID Lock CSR on a
499*4882a593Smuzhiyun  * RIO device. Returns the value of the lock register.
500*4882a593Smuzhiyun  */
rio_get_host_deviceid_lock(struct rio_mport * port,u8 hopcount)501*4882a593Smuzhiyun static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	u32 result;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount,
506*4882a593Smuzhiyun 				 RIO_HOST_DID_LOCK_CSR, &result);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	return (u16) (result & 0xffff);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /**
512*4882a593Smuzhiyun  * rio_enum_peer- Recursively enumerate a RIO network through a master port
513*4882a593Smuzhiyun  * @net: RIO network being enumerated
514*4882a593Smuzhiyun  * @port: Master port to send transactions
515*4882a593Smuzhiyun  * @hopcount: Number of hops into the network
516*4882a593Smuzhiyun  * @prev: Previous RIO device connected to the enumerated one
517*4882a593Smuzhiyun  * @prev_port: Port on previous RIO device
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  * Recursively enumerates a RIO network.  Transactions are sent via the
520*4882a593Smuzhiyun  * master port passed in @port.
521*4882a593Smuzhiyun  */
rio_enum_peer(struct rio_net * net,struct rio_mport * port,u8 hopcount,struct rio_dev * prev,int prev_port)522*4882a593Smuzhiyun static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
523*4882a593Smuzhiyun 			 u8 hopcount, struct rio_dev *prev, int prev_port)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct rio_dev *rdev;
526*4882a593Smuzhiyun 	u32 regval;
527*4882a593Smuzhiyun 	int tmp;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (rio_mport_chk_dev_access(port,
530*4882a593Smuzhiyun 			RIO_ANY_DESTID(port->sys_size), hopcount)) {
531*4882a593Smuzhiyun 		pr_debug("RIO: device access check failed\n");
532*4882a593Smuzhiyun 		return -1;
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) {
536*4882a593Smuzhiyun 		pr_debug("RIO: PE already discovered by this host\n");
537*4882a593Smuzhiyun 		/*
538*4882a593Smuzhiyun 		 * Already discovered by this host. Add it as another
539*4882a593Smuzhiyun 		 * link to the existing device.
540*4882a593Smuzhiyun 		 */
541*4882a593Smuzhiyun 		rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size),
542*4882a593Smuzhiyun 				hopcount, RIO_COMPONENT_TAG_CSR, &regval);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		if (regval) {
545*4882a593Smuzhiyun 			rdev = rio_get_comptag((regval & 0xffff), NULL);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 			if (rdev && prev && rio_is_switch(prev)) {
548*4882a593Smuzhiyun 				pr_debug("RIO: redundant path to %s\n",
549*4882a593Smuzhiyun 					 rio_name(rdev));
550*4882a593Smuzhiyun 				prev->rswitch->nextdev[prev_port] = rdev;
551*4882a593Smuzhiyun 			}
552*4882a593Smuzhiyun 		}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		return 0;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* Attempt to acquire device lock */
558*4882a593Smuzhiyun 	rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
559*4882a593Smuzhiyun 				  hopcount,
560*4882a593Smuzhiyun 				  RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
561*4882a593Smuzhiyun 	while ((tmp = rio_get_host_deviceid_lock(port, hopcount))
562*4882a593Smuzhiyun 	       < port->host_deviceid) {
563*4882a593Smuzhiyun 		/* Delay a bit */
564*4882a593Smuzhiyun 		mdelay(1);
565*4882a593Smuzhiyun 		/* Attempt to acquire device lock again */
566*4882a593Smuzhiyun 		rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
567*4882a593Smuzhiyun 					  hopcount,
568*4882a593Smuzhiyun 					  RIO_HOST_DID_LOCK_CSR,
569*4882a593Smuzhiyun 					  port->host_deviceid);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (rio_get_host_deviceid_lock(port, hopcount) > port->host_deviceid) {
573*4882a593Smuzhiyun 		pr_debug(
574*4882a593Smuzhiyun 		    "RIO: PE locked by a higher priority host...retreating\n");
575*4882a593Smuzhiyun 		return -1;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Setup new RIO device */
579*4882a593Smuzhiyun 	rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size),
580*4882a593Smuzhiyun 					hopcount, 1);
581*4882a593Smuzhiyun 	if (rdev) {
582*4882a593Smuzhiyun 		rdev->prev = prev;
583*4882a593Smuzhiyun 		if (prev && rio_is_switch(prev))
584*4882a593Smuzhiyun 			prev->rswitch->nextdev[prev_port] = rdev;
585*4882a593Smuzhiyun 	} else
586*4882a593Smuzhiyun 		return -1;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (rio_is_switch(rdev)) {
589*4882a593Smuzhiyun 		int sw_destid;
590*4882a593Smuzhiyun 		int cur_destid;
591*4882a593Smuzhiyun 		int sw_inport;
592*4882a593Smuzhiyun 		u16 destid;
593*4882a593Smuzhiyun 		int port_num;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
596*4882a593Smuzhiyun 		rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
597*4882a593Smuzhiyun 				    port->host_deviceid, sw_inport, 0);
598*4882a593Smuzhiyun 		rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		destid = rio_destid_first(net);
601*4882a593Smuzhiyun 		while (destid != RIO_INVALID_DESTID && destid < next_destid) {
602*4882a593Smuzhiyun 			if (destid != port->host_deviceid) {
603*4882a593Smuzhiyun 				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
604*4882a593Smuzhiyun 						    destid, sw_inport, 0);
605*4882a593Smuzhiyun 				rdev->rswitch->route_table[destid] = sw_inport;
606*4882a593Smuzhiyun 			}
607*4882a593Smuzhiyun 			destid = rio_destid_next(net, destid + 1);
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 		pr_debug(
610*4882a593Smuzhiyun 		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
611*4882a593Smuzhiyun 		    rio_name(rdev), rdev->vid, rdev->did,
612*4882a593Smuzhiyun 		    RIO_GET_TOTAL_PORTS(rdev->swpinfo));
613*4882a593Smuzhiyun 		sw_destid = next_destid;
614*4882a593Smuzhiyun 		for (port_num = 0;
615*4882a593Smuzhiyun 		     port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
616*4882a593Smuzhiyun 		     port_num++) {
617*4882a593Smuzhiyun 			if (sw_inport == port_num) {
618*4882a593Smuzhiyun 				rio_enable_rx_tx_port(port, 0,
619*4882a593Smuzhiyun 					      RIO_ANY_DESTID(port->sys_size),
620*4882a593Smuzhiyun 					      hopcount, port_num);
621*4882a593Smuzhiyun 				rdev->rswitch->port_ok |= (1 << port_num);
622*4882a593Smuzhiyun 				continue;
623*4882a593Smuzhiyun 			}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 			cur_destid = next_destid;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 			if (rio_sport_is_active(rdev, port_num)) {
628*4882a593Smuzhiyun 				pr_debug(
629*4882a593Smuzhiyun 				    "RIO: scanning device on port %d\n",
630*4882a593Smuzhiyun 				    port_num);
631*4882a593Smuzhiyun 				rio_enable_rx_tx_port(port, 0,
632*4882a593Smuzhiyun 					      RIO_ANY_DESTID(port->sys_size),
633*4882a593Smuzhiyun 					      hopcount, port_num);
634*4882a593Smuzhiyun 				rdev->rswitch->port_ok |= (1 << port_num);
635*4882a593Smuzhiyun 				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
636*4882a593Smuzhiyun 						RIO_ANY_DESTID(port->sys_size),
637*4882a593Smuzhiyun 						port_num, 0);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 				if (rio_enum_peer(net, port, hopcount + 1,
640*4882a593Smuzhiyun 						  rdev, port_num) < 0)
641*4882a593Smuzhiyun 					return -1;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 				/* Update routing tables */
644*4882a593Smuzhiyun 				destid = rio_destid_next(net, cur_destid + 1);
645*4882a593Smuzhiyun 				if (destid != RIO_INVALID_DESTID) {
646*4882a593Smuzhiyun 					for (destid = cur_destid;
647*4882a593Smuzhiyun 					     destid < next_destid;) {
648*4882a593Smuzhiyun 						if (destid != port->host_deviceid) {
649*4882a593Smuzhiyun 							rio_route_add_entry(rdev,
650*4882a593Smuzhiyun 								    RIO_GLOBAL_TABLE,
651*4882a593Smuzhiyun 								    destid,
652*4882a593Smuzhiyun 								    port_num,
653*4882a593Smuzhiyun 								    0);
654*4882a593Smuzhiyun 							rdev->rswitch->
655*4882a593Smuzhiyun 								route_table[destid] =
656*4882a593Smuzhiyun 								port_num;
657*4882a593Smuzhiyun 						}
658*4882a593Smuzhiyun 						destid = rio_destid_next(net,
659*4882a593Smuzhiyun 								destid + 1);
660*4882a593Smuzhiyun 					}
661*4882a593Smuzhiyun 				}
662*4882a593Smuzhiyun 			} else {
663*4882a593Smuzhiyun 				/* If switch supports Error Management,
664*4882a593Smuzhiyun 				 * set PORT_LOCKOUT bit for unused port
665*4882a593Smuzhiyun 				 */
666*4882a593Smuzhiyun 				if (rdev->em_efptr)
667*4882a593Smuzhiyun 					rio_set_port_lockout(rdev, port_num, 1);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 				rdev->rswitch->port_ok &= ~(1 << port_num);
670*4882a593Smuzhiyun 			}
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		/* Direct Port-write messages to the enumeratiing host */
674*4882a593Smuzhiyun 		if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
675*4882a593Smuzhiyun 		    (rdev->em_efptr)) {
676*4882a593Smuzhiyun 			rio_write_config_32(rdev,
677*4882a593Smuzhiyun 					rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
678*4882a593Smuzhiyun 					(port->host_deviceid << 16) |
679*4882a593Smuzhiyun 					(port->sys_size << 15));
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		rio_init_em(rdev);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/* Check for empty switch */
685*4882a593Smuzhiyun 		if (next_destid == sw_destid)
686*4882a593Smuzhiyun 			next_destid = rio_destid_alloc(net);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		rdev->destid = sw_destid;
689*4882a593Smuzhiyun 	} else
690*4882a593Smuzhiyun 		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
691*4882a593Smuzhiyun 		    rio_name(rdev), rdev->vid, rdev->did);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun  * rio_enum_complete- Tests if enumeration of a network is complete
698*4882a593Smuzhiyun  * @port: Master port to send transaction
699*4882a593Smuzhiyun  *
700*4882a593Smuzhiyun  * Tests the PGCCSR discovered bit for non-zero value (enumeration
701*4882a593Smuzhiyun  * complete flag). Return %1 if enumeration is complete or %0 if
702*4882a593Smuzhiyun  * enumeration is incomplete.
703*4882a593Smuzhiyun  */
rio_enum_complete(struct rio_mport * port)704*4882a593Smuzhiyun static int rio_enum_complete(struct rio_mport *port)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	u32 regval;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
709*4882a593Smuzhiyun 				 &regval);
710*4882a593Smuzhiyun 	return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun  * rio_disc_peer- Recursively discovers a RIO network through a master port
715*4882a593Smuzhiyun  * @net: RIO network being discovered
716*4882a593Smuzhiyun  * @port: Master port to send transactions
717*4882a593Smuzhiyun  * @destid: Current destination ID in network
718*4882a593Smuzhiyun  * @hopcount: Number of hops into the network
719*4882a593Smuzhiyun  * @prev: previous rio_dev
720*4882a593Smuzhiyun  * @prev_port: previous port number
721*4882a593Smuzhiyun  *
722*4882a593Smuzhiyun  * Recursively discovers a RIO network.  Transactions are sent via the
723*4882a593Smuzhiyun  * master port passed in @port.
724*4882a593Smuzhiyun  */
725*4882a593Smuzhiyun static int
rio_disc_peer(struct rio_net * net,struct rio_mport * port,u16 destid,u8 hopcount,struct rio_dev * prev,int prev_port)726*4882a593Smuzhiyun rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
727*4882a593Smuzhiyun 	      u8 hopcount, struct rio_dev *prev, int prev_port)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	u8 port_num, route_port;
730*4882a593Smuzhiyun 	struct rio_dev *rdev;
731*4882a593Smuzhiyun 	u16 ndestid;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* Setup new RIO device */
734*4882a593Smuzhiyun 	if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
735*4882a593Smuzhiyun 		rdev->prev = prev;
736*4882a593Smuzhiyun 		if (prev && rio_is_switch(prev))
737*4882a593Smuzhiyun 			prev->rswitch->nextdev[prev_port] = rdev;
738*4882a593Smuzhiyun 	} else
739*4882a593Smuzhiyun 		return -1;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (rio_is_switch(rdev)) {
742*4882a593Smuzhiyun 		/* Associated destid is how we accessed this switch */
743*4882a593Smuzhiyun 		rdev->destid = destid;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		pr_debug(
746*4882a593Smuzhiyun 		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
747*4882a593Smuzhiyun 		    rio_name(rdev), rdev->vid, rdev->did,
748*4882a593Smuzhiyun 		    RIO_GET_TOTAL_PORTS(rdev->swpinfo));
749*4882a593Smuzhiyun 		for (port_num = 0;
750*4882a593Smuzhiyun 		     port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
751*4882a593Smuzhiyun 		     port_num++) {
752*4882a593Smuzhiyun 			if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
753*4882a593Smuzhiyun 				continue;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 			if (rio_sport_is_active(rdev, port_num)) {
756*4882a593Smuzhiyun 				pr_debug(
757*4882a593Smuzhiyun 				    "RIO: scanning device on port %d\n",
758*4882a593Smuzhiyun 				    port_num);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 				rio_lock_device(port, destid, hopcount, 1000);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 				for (ndestid = 0;
763*4882a593Smuzhiyun 				     ndestid < RIO_ANY_DESTID(port->sys_size);
764*4882a593Smuzhiyun 				     ndestid++) {
765*4882a593Smuzhiyun 					rio_route_get_entry(rdev,
766*4882a593Smuzhiyun 							    RIO_GLOBAL_TABLE,
767*4882a593Smuzhiyun 							    ndestid,
768*4882a593Smuzhiyun 							    &route_port, 0);
769*4882a593Smuzhiyun 					if (route_port == port_num)
770*4882a593Smuzhiyun 						break;
771*4882a593Smuzhiyun 				}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 				if (ndestid == RIO_ANY_DESTID(port->sys_size))
774*4882a593Smuzhiyun 					continue;
775*4882a593Smuzhiyun 				rio_unlock_device(port, destid, hopcount);
776*4882a593Smuzhiyun 				if (rio_disc_peer(net, port, ndestid,
777*4882a593Smuzhiyun 					hopcount + 1, rdev, port_num) < 0)
778*4882a593Smuzhiyun 					return -1;
779*4882a593Smuzhiyun 			}
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 	} else
782*4882a593Smuzhiyun 		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
783*4882a593Smuzhiyun 		    rio_name(rdev), rdev->vid, rdev->did);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun /**
789*4882a593Smuzhiyun  * rio_mport_is_active- Tests if master port link is active
790*4882a593Smuzhiyun  * @port: Master port to test
791*4882a593Smuzhiyun  *
792*4882a593Smuzhiyun  * Reads the port error status CSR for the master port to
793*4882a593Smuzhiyun  * determine if the port has an active link.  Returns
794*4882a593Smuzhiyun  * %RIO_PORT_N_ERR_STS_PORT_OK if the  master port is active
795*4882a593Smuzhiyun  * or %0 if it is inactive.
796*4882a593Smuzhiyun  */
rio_mport_is_active(struct rio_mport * port)797*4882a593Smuzhiyun static int rio_mport_is_active(struct rio_mport *port)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	u32 result = 0;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	rio_local_read_config_32(port,
802*4882a593Smuzhiyun 		port->phys_efptr +
803*4882a593Smuzhiyun 			RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap),
804*4882a593Smuzhiyun 		&result);
805*4882a593Smuzhiyun 	return result & RIO_PORT_N_ERR_STS_PORT_OK;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
rio_scan_release_net(struct rio_net * net)808*4882a593Smuzhiyun static void rio_scan_release_net(struct rio_net *net)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id);
811*4882a593Smuzhiyun 	kfree(net->enum_data);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
rio_scan_release_dev(struct device * dev)814*4882a593Smuzhiyun static void rio_scan_release_dev(struct device *dev)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	struct rio_net *net;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	net = to_rio_net(dev);
819*4882a593Smuzhiyun 	pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id);
820*4882a593Smuzhiyun 	kfree(net);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /*
824*4882a593Smuzhiyun  * rio_scan_alloc_net - Allocate and configure a new RIO network
825*4882a593Smuzhiyun  * @mport: Master port associated with the RIO network
826*4882a593Smuzhiyun  * @do_enum: Enumeration/Discovery mode flag
827*4882a593Smuzhiyun  * @start: logical minimal start id for new net
828*4882a593Smuzhiyun  *
829*4882a593Smuzhiyun  * Allocates a new RIO network structure and initializes enumerator-specific
830*4882a593Smuzhiyun  * part of it (if required).
831*4882a593Smuzhiyun  * Returns a RIO network pointer on success or %NULL on failure.
832*4882a593Smuzhiyun  */
rio_scan_alloc_net(struct rio_mport * mport,int do_enum,u16 start)833*4882a593Smuzhiyun static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
834*4882a593Smuzhiyun 					  int do_enum, u16 start)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	struct rio_net *net;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	net = rio_alloc_net(mport);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (net && do_enum) {
841*4882a593Smuzhiyun 		struct rio_id_table *idtab;
842*4882a593Smuzhiyun 		size_t size;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		size = sizeof(struct rio_id_table) +
845*4882a593Smuzhiyun 				BITS_TO_LONGS(
846*4882a593Smuzhiyun 					RIO_MAX_ROUTE_ENTRIES(mport->sys_size)
847*4882a593Smuzhiyun 					) * sizeof(long);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		idtab = kzalloc(size, GFP_KERNEL);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		if (idtab == NULL) {
852*4882a593Smuzhiyun 			pr_err("RIO: failed to allocate destID table\n");
853*4882a593Smuzhiyun 			rio_free_net(net);
854*4882a593Smuzhiyun 			net = NULL;
855*4882a593Smuzhiyun 		} else {
856*4882a593Smuzhiyun 			net->enum_data = idtab;
857*4882a593Smuzhiyun 			net->release = rio_scan_release_net;
858*4882a593Smuzhiyun 			idtab->start = start;
859*4882a593Smuzhiyun 			idtab->max = RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
860*4882a593Smuzhiyun 			spin_lock_init(&idtab->lock);
861*4882a593Smuzhiyun 		}
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (net) {
865*4882a593Smuzhiyun 		net->id = mport->id;
866*4882a593Smuzhiyun 		net->hport = mport;
867*4882a593Smuzhiyun 		dev_set_name(&net->dev, "rnet_%d", net->id);
868*4882a593Smuzhiyun 		net->dev.parent = &mport->dev;
869*4882a593Smuzhiyun 		net->dev.release = rio_scan_release_dev;
870*4882a593Smuzhiyun 		rio_add_net(net);
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	return net;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun /**
877*4882a593Smuzhiyun  * rio_update_route_tables- Updates route tables in switches
878*4882a593Smuzhiyun  * @net: RIO network to run update on
879*4882a593Smuzhiyun  *
880*4882a593Smuzhiyun  * For each enumerated device, ensure that each switch in a system
881*4882a593Smuzhiyun  * has correct routing entries. Add routes for devices that where
882*4882a593Smuzhiyun  * unknown during the first enumeration pass through the switch.
883*4882a593Smuzhiyun  */
rio_update_route_tables(struct rio_net * net)884*4882a593Smuzhiyun static void rio_update_route_tables(struct rio_net *net)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct rio_dev *rdev, *swrdev;
887*4882a593Smuzhiyun 	struct rio_switch *rswitch;
888*4882a593Smuzhiyun 	u8 sport;
889*4882a593Smuzhiyun 	u16 destid;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	list_for_each_entry(rdev, &net->devices, net_list) {
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 		destid = rdev->destid;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 		list_for_each_entry(rswitch, &net->switches, node) {
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 			if (rio_is_switch(rdev)	&& (rdev->rswitch == rswitch))
898*4882a593Smuzhiyun 				continue;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
901*4882a593Smuzhiyun 				swrdev = sw_to_rio_dev(rswitch);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 				/* Skip if destid ends in empty switch*/
904*4882a593Smuzhiyun 				if (swrdev->destid == destid)
905*4882a593Smuzhiyun 					continue;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 				sport = RIO_GET_PORT_NUM(swrdev->swpinfo);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 				rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE,
910*4882a593Smuzhiyun 						    destid, sport, 0);
911*4882a593Smuzhiyun 				rswitch->route_table[destid] = sport;
912*4882a593Smuzhiyun 			}
913*4882a593Smuzhiyun 		}
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun /**
918*4882a593Smuzhiyun  * rio_init_em - Initializes RIO Error Management (for switches)
919*4882a593Smuzhiyun  * @rdev: RIO device
920*4882a593Smuzhiyun  *
921*4882a593Smuzhiyun  * For each enumerated switch, call device-specific error management
922*4882a593Smuzhiyun  * initialization routine (if supplied by the switch driver).
923*4882a593Smuzhiyun  */
rio_init_em(struct rio_dev * rdev)924*4882a593Smuzhiyun static void rio_init_em(struct rio_dev *rdev)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	if (rio_is_switch(rdev) && (rdev->em_efptr) &&
927*4882a593Smuzhiyun 	    rdev->rswitch->ops && rdev->rswitch->ops->em_init) {
928*4882a593Smuzhiyun 		rdev->rswitch->ops->em_init(rdev);
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun /**
933*4882a593Smuzhiyun  * rio_enum_mport- Start enumeration through a master port
934*4882a593Smuzhiyun  * @mport: Master port to send transactions
935*4882a593Smuzhiyun  * @flags: Enumeration control flags
936*4882a593Smuzhiyun  *
937*4882a593Smuzhiyun  * Starts the enumeration process. If somebody has enumerated our
938*4882a593Smuzhiyun  * master port device, then give up. If not and we have an active
939*4882a593Smuzhiyun  * link, then start recursive peer enumeration. Returns %0 if
940*4882a593Smuzhiyun  * enumeration succeeds or %-EBUSY if enumeration fails.
941*4882a593Smuzhiyun  */
rio_enum_mport(struct rio_mport * mport,u32 flags)942*4882a593Smuzhiyun static int rio_enum_mport(struct rio_mport *mport, u32 flags)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	struct rio_net *net = NULL;
945*4882a593Smuzhiyun 	int rc = 0;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,
948*4882a593Smuzhiyun 	       mport->name);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	/*
951*4882a593Smuzhiyun 	 * To avoid multiple start requests (repeat enumeration is not supported
952*4882a593Smuzhiyun 	 * by this method) check if enumeration/discovery was performed for this
953*4882a593Smuzhiyun 	 * mport: if mport was added into the list of mports for a net exit
954*4882a593Smuzhiyun 	 * with error.
955*4882a593Smuzhiyun 	 */
956*4882a593Smuzhiyun 	if (mport->nnode.next || mport->nnode.prev)
957*4882a593Smuzhiyun 		return -EBUSY;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	/* If somebody else enumerated our master port device, bail. */
960*4882a593Smuzhiyun 	if (rio_enum_host(mport) < 0) {
961*4882a593Smuzhiyun 		printk(KERN_INFO
962*4882a593Smuzhiyun 		       "RIO: master port %d device has been enumerated by a remote host\n",
963*4882a593Smuzhiyun 		       mport->id);
964*4882a593Smuzhiyun 		rc = -EBUSY;
965*4882a593Smuzhiyun 		goto out;
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	/* If master port has an active link, allocate net and enum peers */
969*4882a593Smuzhiyun 	if (rio_mport_is_active(mport)) {
970*4882a593Smuzhiyun 		net = rio_scan_alloc_net(mport, 1, 0);
971*4882a593Smuzhiyun 		if (!net) {
972*4882a593Smuzhiyun 			printk(KERN_ERR "RIO: failed to allocate new net\n");
973*4882a593Smuzhiyun 			rc = -ENOMEM;
974*4882a593Smuzhiyun 			goto out;
975*4882a593Smuzhiyun 		}
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 		/* reserve mport destID in new net */
978*4882a593Smuzhiyun 		rio_destid_reserve(net, mport->host_deviceid);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 		/* Enable Input Output Port (transmitter receiver) */
981*4882a593Smuzhiyun 		rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 		/* Set component tag for host */
984*4882a593Smuzhiyun 		rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,
985*4882a593Smuzhiyun 					  next_comptag++);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 		next_destid = rio_destid_alloc(net);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 		if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {
990*4882a593Smuzhiyun 			/* A higher priority host won enumeration, bail. */
991*4882a593Smuzhiyun 			printk(KERN_INFO
992*4882a593Smuzhiyun 			       "RIO: master port %d device has lost enumeration to a remote host\n",
993*4882a593Smuzhiyun 			       mport->id);
994*4882a593Smuzhiyun 			rio_clear_locks(net);
995*4882a593Smuzhiyun 			rc = -EBUSY;
996*4882a593Smuzhiyun 			goto out;
997*4882a593Smuzhiyun 		}
998*4882a593Smuzhiyun 		/* free the last allocated destID (unused) */
999*4882a593Smuzhiyun 		rio_destid_free(net, next_destid);
1000*4882a593Smuzhiyun 		rio_update_route_tables(net);
1001*4882a593Smuzhiyun 		rio_clear_locks(net);
1002*4882a593Smuzhiyun 		rio_pw_enable(mport, 1);
1003*4882a593Smuzhiyun 	} else {
1004*4882a593Smuzhiyun 		printk(KERN_INFO "RIO: master port %d link inactive\n",
1005*4882a593Smuzhiyun 		       mport->id);
1006*4882a593Smuzhiyun 		rc = -EINVAL;
1007*4882a593Smuzhiyun 	}
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun       out:
1010*4882a593Smuzhiyun 	return rc;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun /**
1014*4882a593Smuzhiyun  * rio_build_route_tables- Generate route tables from switch route entries
1015*4882a593Smuzhiyun  * @net: RIO network to run route tables scan on
1016*4882a593Smuzhiyun  *
1017*4882a593Smuzhiyun  * For each switch device, generate a route table by copying existing
1018*4882a593Smuzhiyun  * route entries from the switch.
1019*4882a593Smuzhiyun  */
rio_build_route_tables(struct rio_net * net)1020*4882a593Smuzhiyun static void rio_build_route_tables(struct rio_net *net)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	struct rio_switch *rswitch;
1023*4882a593Smuzhiyun 	struct rio_dev *rdev;
1024*4882a593Smuzhiyun 	int i;
1025*4882a593Smuzhiyun 	u8 sport;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	list_for_each_entry(rswitch, &net->switches, node) {
1028*4882a593Smuzhiyun 		rdev = sw_to_rio_dev(rswitch);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 		rio_lock_device(net->hport, rdev->destid,
1031*4882a593Smuzhiyun 				rdev->hopcount, 1000);
1032*4882a593Smuzhiyun 		for (i = 0;
1033*4882a593Smuzhiyun 		     i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size);
1034*4882a593Smuzhiyun 		     i++) {
1035*4882a593Smuzhiyun 			if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE,
1036*4882a593Smuzhiyun 						i, &sport, 0) < 0)
1037*4882a593Smuzhiyun 				continue;
1038*4882a593Smuzhiyun 			rswitch->route_table[i] = sport;
1039*4882a593Smuzhiyun 		}
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		rio_unlock_device(net->hport, rdev->destid, rdev->hopcount);
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun /**
1046*4882a593Smuzhiyun  * rio_disc_mport- Start discovery through a master port
1047*4882a593Smuzhiyun  * @mport: Master port to send transactions
1048*4882a593Smuzhiyun  * @flags: discovery control flags
1049*4882a593Smuzhiyun  *
1050*4882a593Smuzhiyun  * Starts the discovery process. If we have an active link,
1051*4882a593Smuzhiyun  * then wait for the signal that enumeration is complete (if wait
1052*4882a593Smuzhiyun  * is allowed).
1053*4882a593Smuzhiyun  * When enumeration completion is signaled, start recursive
1054*4882a593Smuzhiyun  * peer discovery. Returns %0 if discovery succeeds or %-EBUSY
1055*4882a593Smuzhiyun  * on failure.
1056*4882a593Smuzhiyun  */
rio_disc_mport(struct rio_mport * mport,u32 flags)1057*4882a593Smuzhiyun static int rio_disc_mport(struct rio_mport *mport, u32 flags)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	struct rio_net *net = NULL;
1060*4882a593Smuzhiyun 	unsigned long to_end;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id,
1063*4882a593Smuzhiyun 	       mport->name);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	/* If master port has an active link, allocate net and discover peers */
1066*4882a593Smuzhiyun 	if (rio_mport_is_active(mport)) {
1067*4882a593Smuzhiyun 		if (rio_enum_complete(mport))
1068*4882a593Smuzhiyun 			goto enum_done;
1069*4882a593Smuzhiyun 		else if (flags & RIO_SCAN_ENUM_NO_WAIT)
1070*4882a593Smuzhiyun 			return -EAGAIN;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 		pr_debug("RIO: wait for enumeration to complete...\n");
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 		to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ;
1075*4882a593Smuzhiyun 		while (time_before(jiffies, to_end)) {
1076*4882a593Smuzhiyun 			if (rio_enum_complete(mport))
1077*4882a593Smuzhiyun 				goto enum_done;
1078*4882a593Smuzhiyun 			msleep(10);
1079*4882a593Smuzhiyun 		}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		pr_debug("RIO: discovery timeout on mport %d %s\n",
1082*4882a593Smuzhiyun 			 mport->id, mport->name);
1083*4882a593Smuzhiyun 		goto bail;
1084*4882a593Smuzhiyun enum_done:
1085*4882a593Smuzhiyun 		pr_debug("RIO: ... enumeration done\n");
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 		net = rio_scan_alloc_net(mport, 0, 0);
1088*4882a593Smuzhiyun 		if (!net) {
1089*4882a593Smuzhiyun 			printk(KERN_ERR "RIO: Failed to allocate new net\n");
1090*4882a593Smuzhiyun 			goto bail;
1091*4882a593Smuzhiyun 		}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		/* Read DestID assigned by enumerator */
1094*4882a593Smuzhiyun 		rio_local_read_config_32(mport, RIO_DID_CSR,
1095*4882a593Smuzhiyun 					 &mport->host_deviceid);
1096*4882a593Smuzhiyun 		mport->host_deviceid = RIO_GET_DID(mport->sys_size,
1097*4882a593Smuzhiyun 						   mport->host_deviceid);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1100*4882a593Smuzhiyun 					0, NULL, 0) < 0) {
1101*4882a593Smuzhiyun 			printk(KERN_INFO
1102*4882a593Smuzhiyun 			       "RIO: master port %d device has failed discovery\n",
1103*4882a593Smuzhiyun 			       mport->id);
1104*4882a593Smuzhiyun 			goto bail;
1105*4882a593Smuzhiyun 		}
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 		rio_build_route_tables(net);
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	return 0;
1111*4882a593Smuzhiyun bail:
1112*4882a593Smuzhiyun 	return -EBUSY;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun static struct rio_scan rio_scan_ops = {
1116*4882a593Smuzhiyun 	.owner = THIS_MODULE,
1117*4882a593Smuzhiyun 	.enumerate = rio_enum_mport,
1118*4882a593Smuzhiyun 	.discover = rio_disc_mport,
1119*4882a593Smuzhiyun };
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun static bool scan;
1122*4882a593Smuzhiyun module_param(scan, bool, 0);
1123*4882a593Smuzhiyun MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery "
1124*4882a593Smuzhiyun 			"(default = 0)");
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun /**
1127*4882a593Smuzhiyun  * rio_basic_attach:
1128*4882a593Smuzhiyun  *
1129*4882a593Smuzhiyun  * When this enumeration/discovery method is loaded as a module this function
1130*4882a593Smuzhiyun  * registers its specific enumeration and discover routines for all available
1131*4882a593Smuzhiyun  * RapidIO mport devices. The "scan" command line parameter controls ability of
1132*4882a593Smuzhiyun  * the module to start RapidIO enumeration/discovery automatically.
1133*4882a593Smuzhiyun  *
1134*4882a593Smuzhiyun  * Returns 0 for success or -EIO if unable to register itself.
1135*4882a593Smuzhiyun  *
1136*4882a593Smuzhiyun  * This enumeration/discovery method cannot be unloaded and therefore does not
1137*4882a593Smuzhiyun  * provide a matching cleanup_module routine.
1138*4882a593Smuzhiyun  */
1139*4882a593Smuzhiyun 
rio_basic_attach(void)1140*4882a593Smuzhiyun static int __init rio_basic_attach(void)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops))
1143*4882a593Smuzhiyun 		return -EIO;
1144*4882a593Smuzhiyun 	if (scan)
1145*4882a593Smuzhiyun 		rio_init_mports();
1146*4882a593Smuzhiyun 	return 0;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun late_initcall(rio_basic_attach);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery");
1152*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1153