xref: /OK3568_Linux_fs/kernel/drivers/firewire/core-topology.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Incremental bus scan, based on bus topology
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/bug.h>
9*4882a593Smuzhiyun #include <linux/errno.h>
10*4882a593Smuzhiyun #include <linux/firewire.h>
11*4882a593Smuzhiyun #include <linux/firewire-constants.h>
12*4882a593Smuzhiyun #include <linux/jiffies.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/atomic.h>
20*4882a593Smuzhiyun #include <asm/byteorder.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "core.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define SELF_ID_PHY_ID(q)		(((q) >> 24) & 0x3f)
25*4882a593Smuzhiyun #define SELF_ID_EXTENDED(q)		(((q) >> 23) & 0x01)
26*4882a593Smuzhiyun #define SELF_ID_LINK_ON(q)		(((q) >> 22) & 0x01)
27*4882a593Smuzhiyun #define SELF_ID_GAP_COUNT(q)		(((q) >> 16) & 0x3f)
28*4882a593Smuzhiyun #define SELF_ID_PHY_SPEED(q)		(((q) >> 14) & 0x03)
29*4882a593Smuzhiyun #define SELF_ID_CONTENDER(q)		(((q) >> 11) & 0x01)
30*4882a593Smuzhiyun #define SELF_ID_PHY_INITIATOR(q)	(((q) >>  1) & 0x01)
31*4882a593Smuzhiyun #define SELF_ID_MORE_PACKETS(q)		(((q) >>  0) & 0x01)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define SELF_ID_EXT_SEQUENCE(q)		(((q) >> 20) & 0x07)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define SELFID_PORT_CHILD	0x3
36*4882a593Smuzhiyun #define SELFID_PORT_PARENT	0x2
37*4882a593Smuzhiyun #define SELFID_PORT_NCONN	0x1
38*4882a593Smuzhiyun #define SELFID_PORT_NONE	0x0
39*4882a593Smuzhiyun 
count_ports(u32 * sid,int * total_port_count,int * child_port_count)40*4882a593Smuzhiyun static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	u32 q;
43*4882a593Smuzhiyun 	int port_type, shift, seq;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	*total_port_count = 0;
46*4882a593Smuzhiyun 	*child_port_count = 0;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	shift = 6;
49*4882a593Smuzhiyun 	q = *sid;
50*4882a593Smuzhiyun 	seq = 0;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	while (1) {
53*4882a593Smuzhiyun 		port_type = (q >> shift) & 0x03;
54*4882a593Smuzhiyun 		switch (port_type) {
55*4882a593Smuzhiyun 		case SELFID_PORT_CHILD:
56*4882a593Smuzhiyun 			(*child_port_count)++;
57*4882a593Smuzhiyun 			fallthrough;
58*4882a593Smuzhiyun 		case SELFID_PORT_PARENT:
59*4882a593Smuzhiyun 		case SELFID_PORT_NCONN:
60*4882a593Smuzhiyun 			(*total_port_count)++;
61*4882a593Smuzhiyun 		case SELFID_PORT_NONE:
62*4882a593Smuzhiyun 			break;
63*4882a593Smuzhiyun 		}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		shift -= 2;
66*4882a593Smuzhiyun 		if (shift == 0) {
67*4882a593Smuzhiyun 			if (!SELF_ID_MORE_PACKETS(q))
68*4882a593Smuzhiyun 				return sid + 1;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 			shift = 16;
71*4882a593Smuzhiyun 			sid++;
72*4882a593Smuzhiyun 			q = *sid;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 			/*
75*4882a593Smuzhiyun 			 * Check that the extra packets actually are
76*4882a593Smuzhiyun 			 * extended self ID packets and that the
77*4882a593Smuzhiyun 			 * sequence numbers in the extended self ID
78*4882a593Smuzhiyun 			 * packets increase as expected.
79*4882a593Smuzhiyun 			 */
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 			if (!SELF_ID_EXTENDED(q) ||
82*4882a593Smuzhiyun 			    seq != SELF_ID_EXT_SEQUENCE(q))
83*4882a593Smuzhiyun 				return NULL;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 			seq++;
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
get_port_type(u32 * sid,int port_index)90*4882a593Smuzhiyun static int get_port_type(u32 *sid, int port_index)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	int index, shift;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	index = (port_index + 5) / 8;
95*4882a593Smuzhiyun 	shift = 16 - ((port_index + 5) & 7) * 2;
96*4882a593Smuzhiyun 	return (sid[index] >> shift) & 0x03;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
fw_node_create(u32 sid,int port_count,int color)99*4882a593Smuzhiyun static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct fw_node *node;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
104*4882a593Smuzhiyun 	if (node == NULL)
105*4882a593Smuzhiyun 		return NULL;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	node->color = color;
108*4882a593Smuzhiyun 	node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
109*4882a593Smuzhiyun 	node->link_on = SELF_ID_LINK_ON(sid);
110*4882a593Smuzhiyun 	node->phy_speed = SELF_ID_PHY_SPEED(sid);
111*4882a593Smuzhiyun 	node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
112*4882a593Smuzhiyun 	node->port_count = port_count;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	refcount_set(&node->ref_count, 1);
115*4882a593Smuzhiyun 	INIT_LIST_HEAD(&node->link);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return node;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * Compute the maximum hop count for this node and it's children.  The
122*4882a593Smuzhiyun  * maximum hop count is the maximum number of connections between any
123*4882a593Smuzhiyun  * two nodes in the subtree rooted at this node.  We need this for
124*4882a593Smuzhiyun  * setting the gap count.  As we build the tree bottom up in
125*4882a593Smuzhiyun  * build_tree() below, this is fairly easy to do: for each node we
126*4882a593Smuzhiyun  * maintain the max hop count and the max depth, ie the number of hops
127*4882a593Smuzhiyun  * to the furthest leaf.  Computing the max hop count breaks down into
128*4882a593Smuzhiyun  * two cases: either the path goes through this node, in which case
129*4882a593Smuzhiyun  * the hop count is the sum of the two biggest child depths plus 2.
130*4882a593Smuzhiyun  * Or it could be the case that the max hop path is entirely
131*4882a593Smuzhiyun  * containted in a child tree, in which case the max hop count is just
132*4882a593Smuzhiyun  * the max hop count of this child.
133*4882a593Smuzhiyun  */
update_hop_count(struct fw_node * node)134*4882a593Smuzhiyun static void update_hop_count(struct fw_node *node)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	int depths[2] = { -1, -1 };
137*4882a593Smuzhiyun 	int max_child_hops = 0;
138*4882a593Smuzhiyun 	int i;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	for (i = 0; i < node->port_count; i++) {
141*4882a593Smuzhiyun 		if (node->ports[i] == NULL)
142*4882a593Smuzhiyun 			continue;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		if (node->ports[i]->max_hops > max_child_hops)
145*4882a593Smuzhiyun 			max_child_hops = node->ports[i]->max_hops;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		if (node->ports[i]->max_depth > depths[0]) {
148*4882a593Smuzhiyun 			depths[1] = depths[0];
149*4882a593Smuzhiyun 			depths[0] = node->ports[i]->max_depth;
150*4882a593Smuzhiyun 		} else if (node->ports[i]->max_depth > depths[1])
151*4882a593Smuzhiyun 			depths[1] = node->ports[i]->max_depth;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	node->max_depth = depths[0] + 1;
155*4882a593Smuzhiyun 	node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
fw_node(struct list_head * l)158*4882a593Smuzhiyun static inline struct fw_node *fw_node(struct list_head *l)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return list_entry(l, struct fw_node, link);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun  * This function builds the tree representation of the topology given
165*4882a593Smuzhiyun  * by the self IDs from the latest bus reset.  During the construction
166*4882a593Smuzhiyun  * of the tree, the function checks that the self IDs are valid and
167*4882a593Smuzhiyun  * internally consistent.  On success this function returns the
168*4882a593Smuzhiyun  * fw_node corresponding to the local card otherwise NULL.
169*4882a593Smuzhiyun  */
build_tree(struct fw_card * card,u32 * sid,int self_id_count)170*4882a593Smuzhiyun static struct fw_node *build_tree(struct fw_card *card,
171*4882a593Smuzhiyun 				  u32 *sid, int self_id_count)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct fw_node *node, *child, *local_node, *irm_node;
174*4882a593Smuzhiyun 	struct list_head stack, *h;
175*4882a593Smuzhiyun 	u32 *next_sid, *end, q;
176*4882a593Smuzhiyun 	int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
177*4882a593Smuzhiyun 	int gap_count;
178*4882a593Smuzhiyun 	bool beta_repeaters_present;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	local_node = NULL;
181*4882a593Smuzhiyun 	node = NULL;
182*4882a593Smuzhiyun 	INIT_LIST_HEAD(&stack);
183*4882a593Smuzhiyun 	stack_depth = 0;
184*4882a593Smuzhiyun 	end = sid + self_id_count;
185*4882a593Smuzhiyun 	phy_id = 0;
186*4882a593Smuzhiyun 	irm_node = NULL;
187*4882a593Smuzhiyun 	gap_count = SELF_ID_GAP_COUNT(*sid);
188*4882a593Smuzhiyun 	beta_repeaters_present = false;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	while (sid < end) {
191*4882a593Smuzhiyun 		next_sid = count_ports(sid, &port_count, &child_port_count);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		if (next_sid == NULL) {
194*4882a593Smuzhiyun 			fw_err(card, "inconsistent extended self IDs\n");
195*4882a593Smuzhiyun 			return NULL;
196*4882a593Smuzhiyun 		}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		q = *sid;
199*4882a593Smuzhiyun 		if (phy_id != SELF_ID_PHY_ID(q)) {
200*4882a593Smuzhiyun 			fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
201*4882a593Smuzhiyun 			       phy_id, SELF_ID_PHY_ID(q));
202*4882a593Smuzhiyun 			return NULL;
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		if (child_port_count > stack_depth) {
206*4882a593Smuzhiyun 			fw_err(card, "topology stack underflow\n");
207*4882a593Smuzhiyun 			return NULL;
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		/*
211*4882a593Smuzhiyun 		 * Seek back from the top of our stack to find the
212*4882a593Smuzhiyun 		 * start of the child nodes for this node.
213*4882a593Smuzhiyun 		 */
214*4882a593Smuzhiyun 		for (i = 0, h = &stack; i < child_port_count; i++)
215*4882a593Smuzhiyun 			h = h->prev;
216*4882a593Smuzhiyun 		/*
217*4882a593Smuzhiyun 		 * When the stack is empty, this yields an invalid value,
218*4882a593Smuzhiyun 		 * but that pointer will never be dereferenced.
219*4882a593Smuzhiyun 		 */
220*4882a593Smuzhiyun 		child = fw_node(h);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		node = fw_node_create(q, port_count, card->color);
223*4882a593Smuzhiyun 		if (node == NULL) {
224*4882a593Smuzhiyun 			fw_err(card, "out of memory while building topology\n");
225*4882a593Smuzhiyun 			return NULL;
226*4882a593Smuzhiyun 		}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		if (phy_id == (card->node_id & 0x3f))
229*4882a593Smuzhiyun 			local_node = node;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		if (SELF_ID_CONTENDER(q))
232*4882a593Smuzhiyun 			irm_node = node;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		parent_count = 0;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		for (i = 0; i < port_count; i++) {
237*4882a593Smuzhiyun 			switch (get_port_type(sid, i)) {
238*4882a593Smuzhiyun 			case SELFID_PORT_PARENT:
239*4882a593Smuzhiyun 				/*
240*4882a593Smuzhiyun 				 * Who's your daddy?  We dont know the
241*4882a593Smuzhiyun 				 * parent node at this time, so we
242*4882a593Smuzhiyun 				 * temporarily abuse node->color for
243*4882a593Smuzhiyun 				 * remembering the entry in the
244*4882a593Smuzhiyun 				 * node->ports array where the parent
245*4882a593Smuzhiyun 				 * node should be.  Later, when we
246*4882a593Smuzhiyun 				 * handle the parent node, we fix up
247*4882a593Smuzhiyun 				 * the reference.
248*4882a593Smuzhiyun 				 */
249*4882a593Smuzhiyun 				parent_count++;
250*4882a593Smuzhiyun 				node->color = i;
251*4882a593Smuzhiyun 				break;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 			case SELFID_PORT_CHILD:
254*4882a593Smuzhiyun 				node->ports[i] = child;
255*4882a593Smuzhiyun 				/*
256*4882a593Smuzhiyun 				 * Fix up parent reference for this
257*4882a593Smuzhiyun 				 * child node.
258*4882a593Smuzhiyun 				 */
259*4882a593Smuzhiyun 				child->ports[child->color] = node;
260*4882a593Smuzhiyun 				child->color = card->color;
261*4882a593Smuzhiyun 				child = fw_node(child->link.next);
262*4882a593Smuzhiyun 				break;
263*4882a593Smuzhiyun 			}
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		/*
267*4882a593Smuzhiyun 		 * Check that the node reports exactly one parent
268*4882a593Smuzhiyun 		 * port, except for the root, which of course should
269*4882a593Smuzhiyun 		 * have no parents.
270*4882a593Smuzhiyun 		 */
271*4882a593Smuzhiyun 		if ((next_sid == end && parent_count != 0) ||
272*4882a593Smuzhiyun 		    (next_sid < end && parent_count != 1)) {
273*4882a593Smuzhiyun 			fw_err(card, "parent port inconsistency for node %d: "
274*4882a593Smuzhiyun 			       "parent_count=%d\n", phy_id, parent_count);
275*4882a593Smuzhiyun 			return NULL;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		/* Pop the child nodes off the stack and push the new node. */
279*4882a593Smuzhiyun 		__list_del(h->prev, &stack);
280*4882a593Smuzhiyun 		list_add_tail(&node->link, &stack);
281*4882a593Smuzhiyun 		stack_depth += 1 - child_port_count;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		if (node->phy_speed == SCODE_BETA &&
284*4882a593Smuzhiyun 		    parent_count + child_port_count > 1)
285*4882a593Smuzhiyun 			beta_repeaters_present = true;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		/*
288*4882a593Smuzhiyun 		 * If PHYs report different gap counts, set an invalid count
289*4882a593Smuzhiyun 		 * which will force a gap count reconfiguration and a reset.
290*4882a593Smuzhiyun 		 */
291*4882a593Smuzhiyun 		if (SELF_ID_GAP_COUNT(q) != gap_count)
292*4882a593Smuzhiyun 			gap_count = 0;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		update_hop_count(node);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		sid = next_sid;
297*4882a593Smuzhiyun 		phy_id++;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	card->root_node = node;
301*4882a593Smuzhiyun 	card->irm_node = irm_node;
302*4882a593Smuzhiyun 	card->gap_count = gap_count;
303*4882a593Smuzhiyun 	card->beta_repeaters_present = beta_repeaters_present;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return local_node;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun typedef void (*fw_node_callback_t)(struct fw_card * card,
309*4882a593Smuzhiyun 				   struct fw_node * node,
310*4882a593Smuzhiyun 				   struct fw_node * parent);
311*4882a593Smuzhiyun 
for_each_fw_node(struct fw_card * card,struct fw_node * root,fw_node_callback_t callback)312*4882a593Smuzhiyun static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
313*4882a593Smuzhiyun 			     fw_node_callback_t callback)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct list_head list;
316*4882a593Smuzhiyun 	struct fw_node *node, *next, *child, *parent;
317*4882a593Smuzhiyun 	int i;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	INIT_LIST_HEAD(&list);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	fw_node_get(root);
322*4882a593Smuzhiyun 	list_add_tail(&root->link, &list);
323*4882a593Smuzhiyun 	parent = NULL;
324*4882a593Smuzhiyun 	list_for_each_entry(node, &list, link) {
325*4882a593Smuzhiyun 		node->color = card->color;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		for (i = 0; i < node->port_count; i++) {
328*4882a593Smuzhiyun 			child = node->ports[i];
329*4882a593Smuzhiyun 			if (!child)
330*4882a593Smuzhiyun 				continue;
331*4882a593Smuzhiyun 			if (child->color == card->color)
332*4882a593Smuzhiyun 				parent = child;
333*4882a593Smuzhiyun 			else {
334*4882a593Smuzhiyun 				fw_node_get(child);
335*4882a593Smuzhiyun 				list_add_tail(&child->link, &list);
336*4882a593Smuzhiyun 			}
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		callback(card, node, parent);
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	list_for_each_entry_safe(node, next, &list, link)
343*4882a593Smuzhiyun 		fw_node_put(node);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
report_lost_node(struct fw_card * card,struct fw_node * node,struct fw_node * parent)346*4882a593Smuzhiyun static void report_lost_node(struct fw_card *card,
347*4882a593Smuzhiyun 			     struct fw_node *node, struct fw_node *parent)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	fw_node_event(card, node, FW_NODE_DESTROYED);
350*4882a593Smuzhiyun 	fw_node_put(node);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Topology has changed - reset bus manager retry counter */
353*4882a593Smuzhiyun 	card->bm_retries = 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
report_found_node(struct fw_card * card,struct fw_node * node,struct fw_node * parent)356*4882a593Smuzhiyun static void report_found_node(struct fw_card *card,
357*4882a593Smuzhiyun 			      struct fw_node *node, struct fw_node *parent)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	int b_path = (node->phy_speed == SCODE_BETA);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (parent != NULL) {
362*4882a593Smuzhiyun 		/* min() macro doesn't work here with gcc 3.4 */
363*4882a593Smuzhiyun 		node->max_speed = parent->max_speed < node->phy_speed ?
364*4882a593Smuzhiyun 					parent->max_speed : node->phy_speed;
365*4882a593Smuzhiyun 		node->b_path = parent->b_path && b_path;
366*4882a593Smuzhiyun 	} else {
367*4882a593Smuzhiyun 		node->max_speed = node->phy_speed;
368*4882a593Smuzhiyun 		node->b_path = b_path;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	fw_node_event(card, node, FW_NODE_CREATED);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Topology has changed - reset bus manager retry counter */
374*4882a593Smuzhiyun 	card->bm_retries = 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /* Must be called with card->lock held */
fw_destroy_nodes(struct fw_card * card)378*4882a593Smuzhiyun void fw_destroy_nodes(struct fw_card *card)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	card->color++;
381*4882a593Smuzhiyun 	if (card->local_node != NULL)
382*4882a593Smuzhiyun 		for_each_fw_node(card, card->local_node, report_lost_node);
383*4882a593Smuzhiyun 	card->local_node = NULL;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
move_tree(struct fw_node * node0,struct fw_node * node1,int port)386*4882a593Smuzhiyun static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct fw_node *tree;
389*4882a593Smuzhiyun 	int i;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	tree = node1->ports[port];
392*4882a593Smuzhiyun 	node0->ports[port] = tree;
393*4882a593Smuzhiyun 	for (i = 0; i < tree->port_count; i++) {
394*4882a593Smuzhiyun 		if (tree->ports[i] == node1) {
395*4882a593Smuzhiyun 			tree->ports[i] = node0;
396*4882a593Smuzhiyun 			break;
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun  * Compare the old topology tree for card with the new one specified by root.
403*4882a593Smuzhiyun  * Queue the nodes and mark them as either found, lost or updated.
404*4882a593Smuzhiyun  * Update the nodes in the card topology tree as we go.
405*4882a593Smuzhiyun  */
update_tree(struct fw_card * card,struct fw_node * root)406*4882a593Smuzhiyun static void update_tree(struct fw_card *card, struct fw_node *root)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct list_head list0, list1;
409*4882a593Smuzhiyun 	struct fw_node *node0, *node1, *next1;
410*4882a593Smuzhiyun 	int i, event;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	INIT_LIST_HEAD(&list0);
413*4882a593Smuzhiyun 	list_add_tail(&card->local_node->link, &list0);
414*4882a593Smuzhiyun 	INIT_LIST_HEAD(&list1);
415*4882a593Smuzhiyun 	list_add_tail(&root->link, &list1);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	node0 = fw_node(list0.next);
418*4882a593Smuzhiyun 	node1 = fw_node(list1.next);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	while (&node0->link != &list0) {
421*4882a593Smuzhiyun 		WARN_ON(node0->port_count != node1->port_count);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		if (node0->link_on && !node1->link_on)
424*4882a593Smuzhiyun 			event = FW_NODE_LINK_OFF;
425*4882a593Smuzhiyun 		else if (!node0->link_on && node1->link_on)
426*4882a593Smuzhiyun 			event = FW_NODE_LINK_ON;
427*4882a593Smuzhiyun 		else if (node1->initiated_reset && node1->link_on)
428*4882a593Smuzhiyun 			event = FW_NODE_INITIATED_RESET;
429*4882a593Smuzhiyun 		else
430*4882a593Smuzhiyun 			event = FW_NODE_UPDATED;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		node0->node_id = node1->node_id;
433*4882a593Smuzhiyun 		node0->color = card->color;
434*4882a593Smuzhiyun 		node0->link_on = node1->link_on;
435*4882a593Smuzhiyun 		node0->initiated_reset = node1->initiated_reset;
436*4882a593Smuzhiyun 		node0->max_hops = node1->max_hops;
437*4882a593Smuzhiyun 		node1->color = card->color;
438*4882a593Smuzhiyun 		fw_node_event(card, node0, event);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		if (card->root_node == node1)
441*4882a593Smuzhiyun 			card->root_node = node0;
442*4882a593Smuzhiyun 		if (card->irm_node == node1)
443*4882a593Smuzhiyun 			card->irm_node = node0;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		for (i = 0; i < node0->port_count; i++) {
446*4882a593Smuzhiyun 			if (node0->ports[i] && node1->ports[i]) {
447*4882a593Smuzhiyun 				/*
448*4882a593Smuzhiyun 				 * This port didn't change, queue the
449*4882a593Smuzhiyun 				 * connected node for further
450*4882a593Smuzhiyun 				 * investigation.
451*4882a593Smuzhiyun 				 */
452*4882a593Smuzhiyun 				if (node0->ports[i]->color == card->color)
453*4882a593Smuzhiyun 					continue;
454*4882a593Smuzhiyun 				list_add_tail(&node0->ports[i]->link, &list0);
455*4882a593Smuzhiyun 				list_add_tail(&node1->ports[i]->link, &list1);
456*4882a593Smuzhiyun 			} else if (node0->ports[i]) {
457*4882a593Smuzhiyun 				/*
458*4882a593Smuzhiyun 				 * The nodes connected here were
459*4882a593Smuzhiyun 				 * unplugged; unref the lost nodes and
460*4882a593Smuzhiyun 				 * queue FW_NODE_LOST callbacks for
461*4882a593Smuzhiyun 				 * them.
462*4882a593Smuzhiyun 				 */
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 				for_each_fw_node(card, node0->ports[i],
465*4882a593Smuzhiyun 						 report_lost_node);
466*4882a593Smuzhiyun 				node0->ports[i] = NULL;
467*4882a593Smuzhiyun 			} else if (node1->ports[i]) {
468*4882a593Smuzhiyun 				/*
469*4882a593Smuzhiyun 				 * One or more node were connected to
470*4882a593Smuzhiyun 				 * this port. Move the new nodes into
471*4882a593Smuzhiyun 				 * the tree and queue FW_NODE_CREATED
472*4882a593Smuzhiyun 				 * callbacks for them.
473*4882a593Smuzhiyun 				 */
474*4882a593Smuzhiyun 				move_tree(node0, node1, i);
475*4882a593Smuzhiyun 				for_each_fw_node(card, node0->ports[i],
476*4882a593Smuzhiyun 						 report_found_node);
477*4882a593Smuzhiyun 			}
478*4882a593Smuzhiyun 		}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		node0 = fw_node(node0->link.next);
481*4882a593Smuzhiyun 		next1 = fw_node(node1->link.next);
482*4882a593Smuzhiyun 		fw_node_put(node1);
483*4882a593Smuzhiyun 		node1 = next1;
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
update_topology_map(struct fw_card * card,u32 * self_ids,int self_id_count)487*4882a593Smuzhiyun static void update_topology_map(struct fw_card *card,
488*4882a593Smuzhiyun 				u32 *self_ids, int self_id_count)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	int node_count = (card->root_node->node_id & 0x3f) + 1;
491*4882a593Smuzhiyun 	__be32 *map = card->topology_map;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	*map++ = cpu_to_be32((self_id_count + 2) << 16);
494*4882a593Smuzhiyun 	*map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
495*4882a593Smuzhiyun 	*map++ = cpu_to_be32((node_count << 16) | self_id_count);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	while (self_id_count--)
498*4882a593Smuzhiyun 		*map++ = cpu_to_be32p(self_ids++);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	fw_compute_block_crc(card->topology_map);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
fw_core_handle_bus_reset(struct fw_card * card,int node_id,int generation,int self_id_count,u32 * self_ids,bool bm_abdicate)503*4882a593Smuzhiyun void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
504*4882a593Smuzhiyun 			      int self_id_count, u32 *self_ids, bool bm_abdicate)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct fw_node *local_node;
507*4882a593Smuzhiyun 	unsigned long flags;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	spin_lock_irqsave(&card->lock, flags);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/*
512*4882a593Smuzhiyun 	 * If the selfID buffer is not the immediate successor of the
513*4882a593Smuzhiyun 	 * previously processed one, we cannot reliably compare the
514*4882a593Smuzhiyun 	 * old and new topologies.
515*4882a593Smuzhiyun 	 */
516*4882a593Smuzhiyun 	if (!is_next_generation(generation, card->generation) &&
517*4882a593Smuzhiyun 	    card->local_node != NULL) {
518*4882a593Smuzhiyun 		fw_destroy_nodes(card);
519*4882a593Smuzhiyun 		card->bm_retries = 0;
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
523*4882a593Smuzhiyun 	card->node_id = node_id;
524*4882a593Smuzhiyun 	/*
525*4882a593Smuzhiyun 	 * Update node_id before generation to prevent anybody from using
526*4882a593Smuzhiyun 	 * a stale node_id together with a current generation.
527*4882a593Smuzhiyun 	 */
528*4882a593Smuzhiyun 	smp_wmb();
529*4882a593Smuzhiyun 	card->generation = generation;
530*4882a593Smuzhiyun 	card->reset_jiffies = get_jiffies_64();
531*4882a593Smuzhiyun 	card->bm_node_id  = 0xffff;
532*4882a593Smuzhiyun 	card->bm_abdicate = bm_abdicate;
533*4882a593Smuzhiyun 	fw_schedule_bm_work(card, 0);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	local_node = build_tree(card, self_ids, self_id_count);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	update_topology_map(card, self_ids, self_id_count);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	card->color++;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (local_node == NULL) {
542*4882a593Smuzhiyun 		fw_err(card, "topology build failed\n");
543*4882a593Smuzhiyun 		/* FIXME: We need to issue a bus reset in this case. */
544*4882a593Smuzhiyun 	} else if (card->local_node == NULL) {
545*4882a593Smuzhiyun 		card->local_node = local_node;
546*4882a593Smuzhiyun 		for_each_fw_node(card, local_node, report_found_node);
547*4882a593Smuzhiyun 	} else {
548*4882a593Smuzhiyun 		update_tree(card, local_node);
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	spin_unlock_irqrestore(&card->lock, flags);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_handle_bus_reset);
554