xref: /OK3568_Linux_fs/kernel/drivers/thunderbolt/tunnel.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Thunderbolt driver - Tunneling support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6*4882a593Smuzhiyun  * Copyright (C) 2019, Intel Corporation
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "tunnel.h"
14*4882a593Smuzhiyun #include "tb.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* PCIe adapters use always HopID of 8 for both directions */
17*4882a593Smuzhiyun #define TB_PCI_HOPID			8
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define TB_PCI_PATH_DOWN		0
20*4882a593Smuzhiyun #define TB_PCI_PATH_UP			1
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* USB3 adapters use always HopID of 8 for both directions */
23*4882a593Smuzhiyun #define TB_USB3_HOPID			8
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define TB_USB3_PATH_DOWN		0
26*4882a593Smuzhiyun #define TB_USB3_PATH_UP			1
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* DP adapters use HopID 8 for AUX and 9 for Video */
29*4882a593Smuzhiyun #define TB_DP_AUX_TX_HOPID		8
30*4882a593Smuzhiyun #define TB_DP_AUX_RX_HOPID		8
31*4882a593Smuzhiyun #define TB_DP_VIDEO_HOPID		9
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define TB_DP_VIDEO_PATH_OUT		0
34*4882a593Smuzhiyun #define TB_DP_AUX_PATH_OUT		1
35*4882a593Smuzhiyun #define TB_DP_AUX_PATH_IN		2
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define TB_DMA_PATH_OUT			0
38*4882a593Smuzhiyun #define TB_DMA_PATH_IN			1
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
43*4882a593Smuzhiyun 	do {                                                            \
44*4882a593Smuzhiyun 		struct tb_tunnel *__tunnel = (tunnel);                  \
45*4882a593Smuzhiyun 		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
46*4882a593Smuzhiyun 		      tb_route(__tunnel->src_port->sw),                 \
47*4882a593Smuzhiyun 		      __tunnel->src_port->port,                         \
48*4882a593Smuzhiyun 		      tb_route(__tunnel->dst_port->sw),                 \
49*4882a593Smuzhiyun 		      __tunnel->dst_port->port,                         \
50*4882a593Smuzhiyun 		      tb_tunnel_names[__tunnel->type],			\
51*4882a593Smuzhiyun 		      ## arg);                                          \
52*4882a593Smuzhiyun 	} while (0)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define tb_tunnel_WARN(tunnel, fmt, arg...) \
55*4882a593Smuzhiyun 	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
56*4882a593Smuzhiyun #define tb_tunnel_warn(tunnel, fmt, arg...) \
57*4882a593Smuzhiyun 	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
58*4882a593Smuzhiyun #define tb_tunnel_info(tunnel, fmt, arg...) \
59*4882a593Smuzhiyun 	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
60*4882a593Smuzhiyun #define tb_tunnel_dbg(tunnel, fmt, arg...) \
61*4882a593Smuzhiyun 	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
62*4882a593Smuzhiyun 
tb_tunnel_alloc(struct tb * tb,size_t npaths,enum tb_tunnel_type type)63*4882a593Smuzhiyun static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
64*4882a593Smuzhiyun 					 enum tb_tunnel_type type)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69*4882a593Smuzhiyun 	if (!tunnel)
70*4882a593Smuzhiyun 		return NULL;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
73*4882a593Smuzhiyun 	if (!tunnel->paths) {
74*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
75*4882a593Smuzhiyun 		return NULL;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tunnel->list);
79*4882a593Smuzhiyun 	tunnel->tb = tb;
80*4882a593Smuzhiyun 	tunnel->npaths = npaths;
81*4882a593Smuzhiyun 	tunnel->type = type;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return tunnel;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
tb_pci_activate(struct tb_tunnel * tunnel,bool activate)86*4882a593Smuzhiyun static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int res;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	res = tb_pci_port_enable(tunnel->src_port, activate);
91*4882a593Smuzhiyun 	if (res)
92*4882a593Smuzhiyun 		return res;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (tb_port_is_pcie_up(tunnel->dst_port))
95*4882a593Smuzhiyun 		return tb_pci_port_enable(tunnel->dst_port, activate);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
tb_initial_credits(const struct tb_switch * sw)100*4882a593Smuzhiyun static int tb_initial_credits(const struct tb_switch *sw)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	/* If the path is complete sw is not NULL */
103*4882a593Smuzhiyun 	if (sw) {
104*4882a593Smuzhiyun 		/* More credits for faster link */
105*4882a593Smuzhiyun 		switch (sw->link_speed * sw->link_width) {
106*4882a593Smuzhiyun 		case 40:
107*4882a593Smuzhiyun 			return 32;
108*4882a593Smuzhiyun 		case 20:
109*4882a593Smuzhiyun 			return 24;
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return 16;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
tb_pci_init_path(struct tb_path * path)116*4882a593Smuzhiyun static void tb_pci_init_path(struct tb_path *path)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
119*4882a593Smuzhiyun 	path->egress_shared_buffer = TB_PATH_NONE;
120*4882a593Smuzhiyun 	path->ingress_fc_enable = TB_PATH_ALL;
121*4882a593Smuzhiyun 	path->ingress_shared_buffer = TB_PATH_NONE;
122*4882a593Smuzhiyun 	path->priority = 3;
123*4882a593Smuzhiyun 	path->weight = 1;
124*4882a593Smuzhiyun 	path->drop_packages = 0;
125*4882a593Smuzhiyun 	path->nfc_credits = 0;
126*4882a593Smuzhiyun 	path->hops[0].initial_credits = 7;
127*4882a593Smuzhiyun 	if (path->path_length > 1)
128*4882a593Smuzhiyun 		path->hops[1].initial_credits =
129*4882a593Smuzhiyun 			tb_initial_credits(path->hops[1].in_port->sw);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
134*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
135*4882a593Smuzhiyun  * @down: PCIe downstream adapter
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * If @down adapter is active, follows the tunnel to the PCIe upstream
138*4882a593Smuzhiyun  * adapter and back. Returns the discovered tunnel or %NULL if there was
139*4882a593Smuzhiyun  * no tunnel.
140*4882a593Smuzhiyun  */
tb_tunnel_discover_pci(struct tb * tb,struct tb_port * down)141*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
144*4882a593Smuzhiyun 	struct tb_path *path;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (!tb_pci_port_is_enabled(down))
147*4882a593Smuzhiyun 		return NULL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
150*4882a593Smuzhiyun 	if (!tunnel)
151*4882a593Smuzhiyun 		return NULL;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	tunnel->activate = tb_pci_activate;
154*4882a593Smuzhiyun 	tunnel->src_port = down;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/*
157*4882a593Smuzhiyun 	 * Discover both paths even if they are not complete. We will
158*4882a593Smuzhiyun 	 * clean them up by calling tb_tunnel_deactivate() below in that
159*4882a593Smuzhiyun 	 * case.
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
162*4882a593Smuzhiyun 				&tunnel->dst_port, "PCIe Up");
163*4882a593Smuzhiyun 	if (!path) {
164*4882a593Smuzhiyun 		/* Just disable the downstream port */
165*4882a593Smuzhiyun 		tb_pci_port_enable(down, false);
166*4882a593Smuzhiyun 		goto err_free;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 	tunnel->paths[TB_PCI_PATH_UP] = path;
169*4882a593Smuzhiyun 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
172*4882a593Smuzhiyun 				"PCIe Down");
173*4882a593Smuzhiyun 	if (!path)
174*4882a593Smuzhiyun 		goto err_deactivate;
175*4882a593Smuzhiyun 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
176*4882a593Smuzhiyun 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Validate that the tunnel is complete */
179*4882a593Smuzhiyun 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
180*4882a593Smuzhiyun 		tb_port_warn(tunnel->dst_port,
181*4882a593Smuzhiyun 			     "path does not end on a PCIe adapter, cleaning up\n");
182*4882a593Smuzhiyun 		goto err_deactivate;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (down != tunnel->src_port) {
186*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
187*4882a593Smuzhiyun 		goto err_deactivate;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
191*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel,
192*4882a593Smuzhiyun 			       "tunnel is not fully activated, cleaning up\n");
193*4882a593Smuzhiyun 		goto err_deactivate;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "discovered\n");
197*4882a593Smuzhiyun 	return tunnel;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun err_deactivate:
200*4882a593Smuzhiyun 	tb_tunnel_deactivate(tunnel);
201*4882a593Smuzhiyun err_free:
202*4882a593Smuzhiyun 	tb_tunnel_free(tunnel);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	return NULL;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun  * tb_tunnel_alloc_pci() - allocate a pci tunnel
209*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
210*4882a593Smuzhiyun  * @up: PCIe upstream adapter port
211*4882a593Smuzhiyun  * @down: PCIe downstream adapter port
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
214*4882a593Smuzhiyun  * TB_TYPE_PCIE_DOWN.
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  * Return: Returns a tb_tunnel on success or NULL on failure.
217*4882a593Smuzhiyun  */
tb_tunnel_alloc_pci(struct tb * tb,struct tb_port * up,struct tb_port * down)218*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
219*4882a593Smuzhiyun 				      struct tb_port *down)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
222*4882a593Smuzhiyun 	struct tb_path *path;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
225*4882a593Smuzhiyun 	if (!tunnel)
226*4882a593Smuzhiyun 		return NULL;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	tunnel->activate = tb_pci_activate;
229*4882a593Smuzhiyun 	tunnel->src_port = down;
230*4882a593Smuzhiyun 	tunnel->dst_port = up;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
233*4882a593Smuzhiyun 			     "PCIe Down");
234*4882a593Smuzhiyun 	if (!path) {
235*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
236*4882a593Smuzhiyun 		return NULL;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	tb_pci_init_path(path);
239*4882a593Smuzhiyun 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
242*4882a593Smuzhiyun 			     "PCIe Up");
243*4882a593Smuzhiyun 	if (!path) {
244*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
245*4882a593Smuzhiyun 		return NULL;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 	tb_pci_init_path(path);
248*4882a593Smuzhiyun 	tunnel->paths[TB_PCI_PATH_UP] = path;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	return tunnel;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
tb_dp_is_usb4(const struct tb_switch * sw)253*4882a593Smuzhiyun static bool tb_dp_is_usb4(const struct tb_switch *sw)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	/* Titan Ridge DP adapters need the same treatment as USB4 */
256*4882a593Smuzhiyun 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
tb_dp_cm_handshake(struct tb_port * in,struct tb_port * out)259*4882a593Smuzhiyun static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	int timeout = 10;
262*4882a593Smuzhiyun 	u32 val;
263*4882a593Smuzhiyun 	int ret;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Both ends need to support this */
266*4882a593Smuzhiyun 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
267*4882a593Smuzhiyun 		return 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ret = tb_port_read(out, &val, TB_CFG_PORT,
270*4882a593Smuzhiyun 			   out->cap_adap + DP_STATUS_CTRL, 1);
271*4882a593Smuzhiyun 	if (ret)
272*4882a593Smuzhiyun 		return ret;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	ret = tb_port_write(out, &val, TB_CFG_PORT,
277*4882a593Smuzhiyun 			    out->cap_adap + DP_STATUS_CTRL, 1);
278*4882a593Smuzhiyun 	if (ret)
279*4882a593Smuzhiyun 		return ret;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	do {
282*4882a593Smuzhiyun 		ret = tb_port_read(out, &val, TB_CFG_PORT,
283*4882a593Smuzhiyun 				   out->cap_adap + DP_STATUS_CTRL, 1);
284*4882a593Smuzhiyun 		if (ret)
285*4882a593Smuzhiyun 			return ret;
286*4882a593Smuzhiyun 		if (!(val & DP_STATUS_CTRL_CMHS))
287*4882a593Smuzhiyun 			return 0;
288*4882a593Smuzhiyun 		usleep_range(10, 100);
289*4882a593Smuzhiyun 	} while (timeout--);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return -ETIMEDOUT;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
tb_dp_cap_get_rate(u32 val)294*4882a593Smuzhiyun static inline u32 tb_dp_cap_get_rate(u32 val)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	switch (rate) {
299*4882a593Smuzhiyun 	case DP_COMMON_CAP_RATE_RBR:
300*4882a593Smuzhiyun 		return 1620;
301*4882a593Smuzhiyun 	case DP_COMMON_CAP_RATE_HBR:
302*4882a593Smuzhiyun 		return 2700;
303*4882a593Smuzhiyun 	case DP_COMMON_CAP_RATE_HBR2:
304*4882a593Smuzhiyun 		return 5400;
305*4882a593Smuzhiyun 	case DP_COMMON_CAP_RATE_HBR3:
306*4882a593Smuzhiyun 		return 8100;
307*4882a593Smuzhiyun 	default:
308*4882a593Smuzhiyun 		return 0;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
tb_dp_cap_set_rate(u32 val,u32 rate)312*4882a593Smuzhiyun static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	val &= ~DP_COMMON_CAP_RATE_MASK;
315*4882a593Smuzhiyun 	switch (rate) {
316*4882a593Smuzhiyun 	default:
317*4882a593Smuzhiyun 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
318*4882a593Smuzhiyun 		fallthrough;
319*4882a593Smuzhiyun 	case 1620:
320*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
321*4882a593Smuzhiyun 		break;
322*4882a593Smuzhiyun 	case 2700:
323*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
324*4882a593Smuzhiyun 		break;
325*4882a593Smuzhiyun 	case 5400:
326*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
327*4882a593Smuzhiyun 		break;
328*4882a593Smuzhiyun 	case 8100:
329*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
330*4882a593Smuzhiyun 		break;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 	return val;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
tb_dp_cap_get_lanes(u32 val)335*4882a593Smuzhiyun static inline u32 tb_dp_cap_get_lanes(u32 val)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	switch (lanes) {
340*4882a593Smuzhiyun 	case DP_COMMON_CAP_1_LANE:
341*4882a593Smuzhiyun 		return 1;
342*4882a593Smuzhiyun 	case DP_COMMON_CAP_2_LANES:
343*4882a593Smuzhiyun 		return 2;
344*4882a593Smuzhiyun 	case DP_COMMON_CAP_4_LANES:
345*4882a593Smuzhiyun 		return 4;
346*4882a593Smuzhiyun 	default:
347*4882a593Smuzhiyun 		return 0;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
tb_dp_cap_set_lanes(u32 val,u32 lanes)351*4882a593Smuzhiyun static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	val &= ~DP_COMMON_CAP_LANES_MASK;
354*4882a593Smuzhiyun 	switch (lanes) {
355*4882a593Smuzhiyun 	default:
356*4882a593Smuzhiyun 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
357*4882a593Smuzhiyun 		     lanes);
358*4882a593Smuzhiyun 		fallthrough;
359*4882a593Smuzhiyun 	case 1:
360*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
361*4882a593Smuzhiyun 		break;
362*4882a593Smuzhiyun 	case 2:
363*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
364*4882a593Smuzhiyun 		break;
365*4882a593Smuzhiyun 	case 4:
366*4882a593Smuzhiyun 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
367*4882a593Smuzhiyun 		break;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 	return val;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
tb_dp_bandwidth(unsigned int rate,unsigned int lanes)372*4882a593Smuzhiyun static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	/* Tunneling removes the DP 8b/10b encoding */
375*4882a593Smuzhiyun 	return rate * lanes * 8 / 10;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
tb_dp_reduce_bandwidth(int max_bw,u32 in_rate,u32 in_lanes,u32 out_rate,u32 out_lanes,u32 * new_rate,u32 * new_lanes)378*4882a593Smuzhiyun static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
379*4882a593Smuzhiyun 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
380*4882a593Smuzhiyun 				  u32 *new_lanes)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	static const u32 dp_bw[][2] = {
383*4882a593Smuzhiyun 		/* Mb/s, lanes */
384*4882a593Smuzhiyun 		{ 8100, 4 }, /* 25920 Mb/s */
385*4882a593Smuzhiyun 		{ 5400, 4 }, /* 17280 Mb/s */
386*4882a593Smuzhiyun 		{ 8100, 2 }, /* 12960 Mb/s */
387*4882a593Smuzhiyun 		{ 2700, 4 }, /* 8640 Mb/s */
388*4882a593Smuzhiyun 		{ 5400, 2 }, /* 8640 Mb/s */
389*4882a593Smuzhiyun 		{ 8100, 1 }, /* 6480 Mb/s */
390*4882a593Smuzhiyun 		{ 1620, 4 }, /* 5184 Mb/s */
391*4882a593Smuzhiyun 		{ 5400, 1 }, /* 4320 Mb/s */
392*4882a593Smuzhiyun 		{ 2700, 2 }, /* 4320 Mb/s */
393*4882a593Smuzhiyun 		{ 1620, 2 }, /* 2592 Mb/s */
394*4882a593Smuzhiyun 		{ 2700, 1 }, /* 2160 Mb/s */
395*4882a593Smuzhiyun 		{ 1620, 1 }, /* 1296 Mb/s */
396*4882a593Smuzhiyun 	};
397*4882a593Smuzhiyun 	unsigned int i;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/*
400*4882a593Smuzhiyun 	 * Find a combination that can fit into max_bw and does not
401*4882a593Smuzhiyun 	 * exceed the maximum rate and lanes supported by the DP OUT and
402*4882a593Smuzhiyun 	 * DP IN adapters.
403*4882a593Smuzhiyun 	 */
404*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
405*4882a593Smuzhiyun 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
406*4882a593Smuzhiyun 			continue;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
409*4882a593Smuzhiyun 			continue;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
412*4882a593Smuzhiyun 			*new_rate = dp_bw[i][0];
413*4882a593Smuzhiyun 			*new_lanes = dp_bw[i][1];
414*4882a593Smuzhiyun 			return 0;
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return -ENOSR;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
tb_dp_xchg_caps(struct tb_tunnel * tunnel)421*4882a593Smuzhiyun static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
424*4882a593Smuzhiyun 	struct tb_port *out = tunnel->dst_port;
425*4882a593Smuzhiyun 	struct tb_port *in = tunnel->src_port;
426*4882a593Smuzhiyun 	int ret, max_bw;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/*
429*4882a593Smuzhiyun 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
430*4882a593Smuzhiyun 	 * newer generation hardware.
431*4882a593Smuzhiyun 	 */
432*4882a593Smuzhiyun 	if (in->sw->generation < 2 || out->sw->generation < 2)
433*4882a593Smuzhiyun 		return 0;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	/*
436*4882a593Smuzhiyun 	 * Perform connection manager handshake between IN and OUT ports
437*4882a593Smuzhiyun 	 * before capabilities exchange can take place.
438*4882a593Smuzhiyun 	 */
439*4882a593Smuzhiyun 	ret = tb_dp_cm_handshake(in, out);
440*4882a593Smuzhiyun 	if (ret)
441*4882a593Smuzhiyun 		return ret;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/* Read both DP_LOCAL_CAP registers */
444*4882a593Smuzhiyun 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
445*4882a593Smuzhiyun 			   in->cap_adap + DP_LOCAL_CAP, 1);
446*4882a593Smuzhiyun 	if (ret)
447*4882a593Smuzhiyun 		return ret;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
450*4882a593Smuzhiyun 			   out->cap_adap + DP_LOCAL_CAP, 1);
451*4882a593Smuzhiyun 	if (ret)
452*4882a593Smuzhiyun 		return ret;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* Write IN local caps to OUT remote caps */
455*4882a593Smuzhiyun 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
456*4882a593Smuzhiyun 			    out->cap_adap + DP_REMOTE_CAP, 1);
457*4882a593Smuzhiyun 	if (ret)
458*4882a593Smuzhiyun 		return ret;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
461*4882a593Smuzhiyun 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
462*4882a593Smuzhiyun 	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
463*4882a593Smuzhiyun 		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/*
466*4882a593Smuzhiyun 	 * If the tunnel bandwidth is limited (max_bw is set) then see
467*4882a593Smuzhiyun 	 * if we need to reduce bandwidth to fit there.
468*4882a593Smuzhiyun 	 */
469*4882a593Smuzhiyun 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
470*4882a593Smuzhiyun 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
471*4882a593Smuzhiyun 	bw = tb_dp_bandwidth(out_rate, out_lanes);
472*4882a593Smuzhiyun 	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
473*4882a593Smuzhiyun 		    out_rate, out_lanes, bw);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (in->sw->config.depth < out->sw->config.depth)
476*4882a593Smuzhiyun 		max_bw = tunnel->max_down;
477*4882a593Smuzhiyun 	else
478*4882a593Smuzhiyun 		max_bw = tunnel->max_up;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (max_bw && bw > max_bw) {
481*4882a593Smuzhiyun 		u32 new_rate, new_lanes, new_bw;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
484*4882a593Smuzhiyun 					     out_rate, out_lanes, &new_rate,
485*4882a593Smuzhiyun 					     &new_lanes);
486*4882a593Smuzhiyun 		if (ret) {
487*4882a593Smuzhiyun 			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
488*4882a593Smuzhiyun 			return ret;
489*4882a593Smuzhiyun 		}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
492*4882a593Smuzhiyun 		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
493*4882a593Smuzhiyun 			    new_rate, new_lanes, new_bw);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		/*
496*4882a593Smuzhiyun 		 * Set new rate and number of lanes before writing it to
497*4882a593Smuzhiyun 		 * the IN port remote caps.
498*4882a593Smuzhiyun 		 */
499*4882a593Smuzhiyun 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
500*4882a593Smuzhiyun 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
504*4882a593Smuzhiyun 			     in->cap_adap + DP_REMOTE_CAP, 1);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
tb_dp_activate(struct tb_tunnel * tunnel,bool active)507*4882a593Smuzhiyun static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	int ret;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (active) {
512*4882a593Smuzhiyun 		struct tb_path **paths;
513*4882a593Smuzhiyun 		int last;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		paths = tunnel->paths;
516*4882a593Smuzhiyun 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 		tb_dp_port_set_hops(tunnel->src_port,
519*4882a593Smuzhiyun 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
520*4882a593Smuzhiyun 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
521*4882a593Smuzhiyun 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		tb_dp_port_set_hops(tunnel->dst_port,
524*4882a593Smuzhiyun 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
525*4882a593Smuzhiyun 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
526*4882a593Smuzhiyun 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
527*4882a593Smuzhiyun 	} else {
528*4882a593Smuzhiyun 		tb_dp_port_hpd_clear(tunnel->src_port);
529*4882a593Smuzhiyun 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
530*4882a593Smuzhiyun 		if (tb_port_is_dpout(tunnel->dst_port))
531*4882a593Smuzhiyun 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	ret = tb_dp_port_enable(tunnel->src_port, active);
535*4882a593Smuzhiyun 	if (ret)
536*4882a593Smuzhiyun 		return ret;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (tb_port_is_dpout(tunnel->dst_port))
539*4882a593Smuzhiyun 		return tb_dp_port_enable(tunnel->dst_port, active);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return 0;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
tb_dp_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)544*4882a593Smuzhiyun static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
545*4882a593Smuzhiyun 				    int *consumed_down)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	struct tb_port *in = tunnel->src_port;
548*4882a593Smuzhiyun 	const struct tb_switch *sw = in->sw;
549*4882a593Smuzhiyun 	u32 val, rate = 0, lanes = 0;
550*4882a593Smuzhiyun 	int ret;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (tb_dp_is_usb4(sw)) {
553*4882a593Smuzhiyun 		int timeout = 20;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		/*
556*4882a593Smuzhiyun 		 * Wait for DPRX done. Normally it should be already set
557*4882a593Smuzhiyun 		 * for active tunnel.
558*4882a593Smuzhiyun 		 */
559*4882a593Smuzhiyun 		do {
560*4882a593Smuzhiyun 			ret = tb_port_read(in, &val, TB_CFG_PORT,
561*4882a593Smuzhiyun 					   in->cap_adap + DP_COMMON_CAP, 1);
562*4882a593Smuzhiyun 			if (ret)
563*4882a593Smuzhiyun 				return ret;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 			if (val & DP_COMMON_CAP_DPRX_DONE) {
566*4882a593Smuzhiyun 				rate = tb_dp_cap_get_rate(val);
567*4882a593Smuzhiyun 				lanes = tb_dp_cap_get_lanes(val);
568*4882a593Smuzhiyun 				break;
569*4882a593Smuzhiyun 			}
570*4882a593Smuzhiyun 			msleep(250);
571*4882a593Smuzhiyun 		} while (timeout--);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		if (!timeout)
574*4882a593Smuzhiyun 			return -ETIMEDOUT;
575*4882a593Smuzhiyun 	} else if (sw->generation >= 2) {
576*4882a593Smuzhiyun 		/*
577*4882a593Smuzhiyun 		 * Read from the copied remote cap so that we take into
578*4882a593Smuzhiyun 		 * account if capabilities were reduced during exchange.
579*4882a593Smuzhiyun 		 */
580*4882a593Smuzhiyun 		ret = tb_port_read(in, &val, TB_CFG_PORT,
581*4882a593Smuzhiyun 				   in->cap_adap + DP_REMOTE_CAP, 1);
582*4882a593Smuzhiyun 		if (ret)
583*4882a593Smuzhiyun 			return ret;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		rate = tb_dp_cap_get_rate(val);
586*4882a593Smuzhiyun 		lanes = tb_dp_cap_get_lanes(val);
587*4882a593Smuzhiyun 	} else {
588*4882a593Smuzhiyun 		/* No bandwidth management for legacy devices  */
589*4882a593Smuzhiyun 		*consumed_up = 0;
590*4882a593Smuzhiyun 		*consumed_down = 0;
591*4882a593Smuzhiyun 		return 0;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
595*4882a593Smuzhiyun 		*consumed_up = 0;
596*4882a593Smuzhiyun 		*consumed_down = tb_dp_bandwidth(rate, lanes);
597*4882a593Smuzhiyun 	} else {
598*4882a593Smuzhiyun 		*consumed_up = tb_dp_bandwidth(rate, lanes);
599*4882a593Smuzhiyun 		*consumed_down = 0;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	return 0;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
tb_dp_init_aux_path(struct tb_path * path)605*4882a593Smuzhiyun static void tb_dp_init_aux_path(struct tb_path *path)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	int i;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
610*4882a593Smuzhiyun 	path->egress_shared_buffer = TB_PATH_NONE;
611*4882a593Smuzhiyun 	path->ingress_fc_enable = TB_PATH_ALL;
612*4882a593Smuzhiyun 	path->ingress_shared_buffer = TB_PATH_NONE;
613*4882a593Smuzhiyun 	path->priority = 2;
614*4882a593Smuzhiyun 	path->weight = 1;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	for (i = 0; i < path->path_length; i++)
617*4882a593Smuzhiyun 		path->hops[i].initial_credits = 1;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
tb_dp_init_video_path(struct tb_path * path,bool discover)620*4882a593Smuzhiyun static void tb_dp_init_video_path(struct tb_path *path, bool discover)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	path->egress_fc_enable = TB_PATH_NONE;
625*4882a593Smuzhiyun 	path->egress_shared_buffer = TB_PATH_NONE;
626*4882a593Smuzhiyun 	path->ingress_fc_enable = TB_PATH_NONE;
627*4882a593Smuzhiyun 	path->ingress_shared_buffer = TB_PATH_NONE;
628*4882a593Smuzhiyun 	path->priority = 1;
629*4882a593Smuzhiyun 	path->weight = 1;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	if (discover) {
632*4882a593Smuzhiyun 		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
633*4882a593Smuzhiyun 	} else {
634*4882a593Smuzhiyun 		u32 max_credits;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
637*4882a593Smuzhiyun 			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
638*4882a593Smuzhiyun 		/* Leave some credits for AUX path */
639*4882a593Smuzhiyun 		path->nfc_credits = min(max_credits - 2, 12U);
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /**
644*4882a593Smuzhiyun  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
645*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
646*4882a593Smuzhiyun  * @in: DP in adapter
647*4882a593Smuzhiyun  *
648*4882a593Smuzhiyun  * If @in adapter is active, follows the tunnel to the DP out adapter
649*4882a593Smuzhiyun  * and back. Returns the discovered tunnel or %NULL if there was no
650*4882a593Smuzhiyun  * tunnel.
651*4882a593Smuzhiyun  *
652*4882a593Smuzhiyun  * Return: DP tunnel or %NULL if no tunnel found.
653*4882a593Smuzhiyun  */
tb_tunnel_discover_dp(struct tb * tb,struct tb_port * in)654*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
657*4882a593Smuzhiyun 	struct tb_port *port;
658*4882a593Smuzhiyun 	struct tb_path *path;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (!tb_dp_port_is_enabled(in))
661*4882a593Smuzhiyun 		return NULL;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
664*4882a593Smuzhiyun 	if (!tunnel)
665*4882a593Smuzhiyun 		return NULL;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	tunnel->init = tb_dp_xchg_caps;
668*4882a593Smuzhiyun 	tunnel->activate = tb_dp_activate;
669*4882a593Smuzhiyun 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
670*4882a593Smuzhiyun 	tunnel->src_port = in;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
673*4882a593Smuzhiyun 				&tunnel->dst_port, "Video");
674*4882a593Smuzhiyun 	if (!path) {
675*4882a593Smuzhiyun 		/* Just disable the DP IN port */
676*4882a593Smuzhiyun 		tb_dp_port_enable(in, false);
677*4882a593Smuzhiyun 		goto err_free;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
680*4882a593Smuzhiyun 	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
683*4882a593Smuzhiyun 	if (!path)
684*4882a593Smuzhiyun 		goto err_deactivate;
685*4882a593Smuzhiyun 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
686*4882a593Smuzhiyun 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
689*4882a593Smuzhiyun 				&port, "AUX RX");
690*4882a593Smuzhiyun 	if (!path)
691*4882a593Smuzhiyun 		goto err_deactivate;
692*4882a593Smuzhiyun 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
693*4882a593Smuzhiyun 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* Validate that the tunnel is complete */
696*4882a593Smuzhiyun 	if (!tb_port_is_dpout(tunnel->dst_port)) {
697*4882a593Smuzhiyun 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
698*4882a593Smuzhiyun 		goto err_deactivate;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
702*4882a593Smuzhiyun 		goto err_deactivate;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
705*4882a593Smuzhiyun 		goto err_deactivate;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	if (port != tunnel->src_port) {
708*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
709*4882a593Smuzhiyun 		goto err_deactivate;
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "discovered\n");
713*4882a593Smuzhiyun 	return tunnel;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun err_deactivate:
716*4882a593Smuzhiyun 	tb_tunnel_deactivate(tunnel);
717*4882a593Smuzhiyun err_free:
718*4882a593Smuzhiyun 	tb_tunnel_free(tunnel);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return NULL;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun /**
724*4882a593Smuzhiyun  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
725*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
726*4882a593Smuzhiyun  * @in: DP in adapter port
727*4882a593Smuzhiyun  * @out: DP out adapter port
728*4882a593Smuzhiyun  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
729*4882a593Smuzhiyun  *	    if not limited)
730*4882a593Smuzhiyun  * @max_down: Maximum available downstream bandwidth for the DP tunnel
731*4882a593Smuzhiyun  *	      (%0 if not limited)
732*4882a593Smuzhiyun  *
733*4882a593Smuzhiyun  * Allocates a tunnel between @in and @out that is capable of tunneling
734*4882a593Smuzhiyun  * Display Port traffic.
735*4882a593Smuzhiyun  *
736*4882a593Smuzhiyun  * Return: Returns a tb_tunnel on success or NULL on failure.
737*4882a593Smuzhiyun  */
tb_tunnel_alloc_dp(struct tb * tb,struct tb_port * in,struct tb_port * out,int max_up,int max_down)738*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
739*4882a593Smuzhiyun 				     struct tb_port *out, int max_up,
740*4882a593Smuzhiyun 				     int max_down)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
743*4882a593Smuzhiyun 	struct tb_path **paths;
744*4882a593Smuzhiyun 	struct tb_path *path;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
747*4882a593Smuzhiyun 		return NULL;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
750*4882a593Smuzhiyun 	if (!tunnel)
751*4882a593Smuzhiyun 		return NULL;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	tunnel->init = tb_dp_xchg_caps;
754*4882a593Smuzhiyun 	tunnel->activate = tb_dp_activate;
755*4882a593Smuzhiyun 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
756*4882a593Smuzhiyun 	tunnel->src_port = in;
757*4882a593Smuzhiyun 	tunnel->dst_port = out;
758*4882a593Smuzhiyun 	tunnel->max_up = max_up;
759*4882a593Smuzhiyun 	tunnel->max_down = max_down;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	paths = tunnel->paths;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
764*4882a593Smuzhiyun 			     1, "Video");
765*4882a593Smuzhiyun 	if (!path)
766*4882a593Smuzhiyun 		goto err_free;
767*4882a593Smuzhiyun 	tb_dp_init_video_path(path, false);
768*4882a593Smuzhiyun 	paths[TB_DP_VIDEO_PATH_OUT] = path;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
771*4882a593Smuzhiyun 			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
772*4882a593Smuzhiyun 	if (!path)
773*4882a593Smuzhiyun 		goto err_free;
774*4882a593Smuzhiyun 	tb_dp_init_aux_path(path);
775*4882a593Smuzhiyun 	paths[TB_DP_AUX_PATH_OUT] = path;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
778*4882a593Smuzhiyun 			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
779*4882a593Smuzhiyun 	if (!path)
780*4882a593Smuzhiyun 		goto err_free;
781*4882a593Smuzhiyun 	tb_dp_init_aux_path(path);
782*4882a593Smuzhiyun 	paths[TB_DP_AUX_PATH_IN] = path;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	return tunnel;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun err_free:
787*4882a593Smuzhiyun 	tb_tunnel_free(tunnel);
788*4882a593Smuzhiyun 	return NULL;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
tb_dma_credits(struct tb_port * nhi)791*4882a593Smuzhiyun static u32 tb_dma_credits(struct tb_port *nhi)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	u32 max_credits;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
796*4882a593Smuzhiyun 		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
797*4882a593Smuzhiyun 	return min(max_credits, 13U);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
tb_dma_activate(struct tb_tunnel * tunnel,bool active)800*4882a593Smuzhiyun static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	struct tb_port *nhi = tunnel->src_port;
803*4882a593Smuzhiyun 	u32 credits;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	credits = active ? tb_dma_credits(nhi) : 0;
806*4882a593Smuzhiyun 	return tb_port_set_initial_credits(nhi, credits);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
tb_dma_init_path(struct tb_path * path,unsigned int isb,unsigned int efc,u32 credits)809*4882a593Smuzhiyun static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
810*4882a593Smuzhiyun 			     unsigned int efc, u32 credits)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	int i;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	path->egress_fc_enable = efc;
815*4882a593Smuzhiyun 	path->ingress_fc_enable = TB_PATH_ALL;
816*4882a593Smuzhiyun 	path->egress_shared_buffer = TB_PATH_NONE;
817*4882a593Smuzhiyun 	path->ingress_shared_buffer = isb;
818*4882a593Smuzhiyun 	path->priority = 5;
819*4882a593Smuzhiyun 	path->weight = 1;
820*4882a593Smuzhiyun 	path->clear_fc = true;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	for (i = 0; i < path->path_length; i++)
823*4882a593Smuzhiyun 		path->hops[i].initial_credits = credits;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun /**
827*4882a593Smuzhiyun  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
828*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
829*4882a593Smuzhiyun  * @nhi: Host controller port
830*4882a593Smuzhiyun  * @dst: Destination null port which the other domain is connected to
831*4882a593Smuzhiyun  * @transmit_ring: NHI ring number used to send packets towards the
832*4882a593Smuzhiyun  *		   other domain
833*4882a593Smuzhiyun  * @transmit_path: HopID used for transmitting packets
834*4882a593Smuzhiyun  * @receive_ring: NHI ring number used to receive packets from the
835*4882a593Smuzhiyun  *		  other domain
836*4882a593Smuzhiyun  * @reveive_path: HopID used for receiving packets
837*4882a593Smuzhiyun  *
838*4882a593Smuzhiyun  * Return: Returns a tb_tunnel on success or NULL on failure.
839*4882a593Smuzhiyun  */
tb_tunnel_alloc_dma(struct tb * tb,struct tb_port * nhi,struct tb_port * dst,int transmit_ring,int transmit_path,int receive_ring,int receive_path)840*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
841*4882a593Smuzhiyun 				      struct tb_port *dst, int transmit_ring,
842*4882a593Smuzhiyun 				      int transmit_path, int receive_ring,
843*4882a593Smuzhiyun 				      int receive_path)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
846*4882a593Smuzhiyun 	struct tb_path *path;
847*4882a593Smuzhiyun 	u32 credits;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
850*4882a593Smuzhiyun 	if (!tunnel)
851*4882a593Smuzhiyun 		return NULL;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	tunnel->activate = tb_dma_activate;
854*4882a593Smuzhiyun 	tunnel->src_port = nhi;
855*4882a593Smuzhiyun 	tunnel->dst_port = dst;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	credits = tb_dma_credits(nhi);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
860*4882a593Smuzhiyun 	if (!path) {
861*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
862*4882a593Smuzhiyun 		return NULL;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
865*4882a593Smuzhiyun 			 credits);
866*4882a593Smuzhiyun 	tunnel->paths[TB_DMA_PATH_IN] = path;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
869*4882a593Smuzhiyun 	if (!path) {
870*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
871*4882a593Smuzhiyun 		return NULL;
872*4882a593Smuzhiyun 	}
873*4882a593Smuzhiyun 	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
874*4882a593Smuzhiyun 	tunnel->paths[TB_DMA_PATH_OUT] = path;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	return tunnel;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
tb_usb3_max_link_rate(struct tb_port * up,struct tb_port * down)879*4882a593Smuzhiyun static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	int ret, up_max_rate, down_max_rate;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	ret = usb4_usb3_port_max_link_rate(up);
884*4882a593Smuzhiyun 	if (ret < 0)
885*4882a593Smuzhiyun 		return ret;
886*4882a593Smuzhiyun 	up_max_rate = ret;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	ret = usb4_usb3_port_max_link_rate(down);
889*4882a593Smuzhiyun 	if (ret < 0)
890*4882a593Smuzhiyun 		return ret;
891*4882a593Smuzhiyun 	down_max_rate = ret;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	return min(up_max_rate, down_max_rate);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
tb_usb3_init(struct tb_tunnel * tunnel)896*4882a593Smuzhiyun static int tb_usb3_init(struct tb_tunnel *tunnel)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
899*4882a593Smuzhiyun 		      tunnel->allocated_up, tunnel->allocated_down);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
902*4882a593Smuzhiyun 						 &tunnel->allocated_up,
903*4882a593Smuzhiyun 						 &tunnel->allocated_down);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
tb_usb3_activate(struct tb_tunnel * tunnel,bool activate)906*4882a593Smuzhiyun static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	int res;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	res = tb_usb3_port_enable(tunnel->src_port, activate);
911*4882a593Smuzhiyun 	if (res)
912*4882a593Smuzhiyun 		return res;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (tb_port_is_usb3_up(tunnel->dst_port))
915*4882a593Smuzhiyun 		return tb_usb3_port_enable(tunnel->dst_port, activate);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	return 0;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
tb_usb3_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)920*4882a593Smuzhiyun static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
921*4882a593Smuzhiyun 		int *consumed_up, int *consumed_down)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	/*
924*4882a593Smuzhiyun 	 * PCIe tunneling affects the USB3 bandwidth so take that it
925*4882a593Smuzhiyun 	 * into account here.
926*4882a593Smuzhiyun 	 */
927*4882a593Smuzhiyun 	*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
928*4882a593Smuzhiyun 	*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
929*4882a593Smuzhiyun 	return 0;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
tb_usb3_release_unused_bandwidth(struct tb_tunnel * tunnel)932*4882a593Smuzhiyun static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	int ret;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
937*4882a593Smuzhiyun 					       &tunnel->allocated_up,
938*4882a593Smuzhiyun 					       &tunnel->allocated_down);
939*4882a593Smuzhiyun 	if (ret)
940*4882a593Smuzhiyun 		return ret;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
943*4882a593Smuzhiyun 		      tunnel->allocated_up, tunnel->allocated_down);
944*4882a593Smuzhiyun 	return 0;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
tb_usb3_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)947*4882a593Smuzhiyun static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
948*4882a593Smuzhiyun 						int *available_up,
949*4882a593Smuzhiyun 						int *available_down)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	int ret, max_rate, allocate_up, allocate_down;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
954*4882a593Smuzhiyun 	if (ret < 0) {
955*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
956*4882a593Smuzhiyun 		return;
957*4882a593Smuzhiyun 	} else if (!ret) {
958*4882a593Smuzhiyun 		/* Use maximum link rate if the link valid is not set */
959*4882a593Smuzhiyun 		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
960*4882a593Smuzhiyun 		if (ret < 0) {
961*4882a593Smuzhiyun 			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
962*4882a593Smuzhiyun 			return;
963*4882a593Smuzhiyun 		}
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	/*
967*4882a593Smuzhiyun 	 * 90% of the max rate can be allocated for isochronous
968*4882a593Smuzhiyun 	 * transfers.
969*4882a593Smuzhiyun 	 */
970*4882a593Smuzhiyun 	max_rate = ret * 90 / 100;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* No need to reclaim if already at maximum */
973*4882a593Smuzhiyun 	if (tunnel->allocated_up >= max_rate &&
974*4882a593Smuzhiyun 	    tunnel->allocated_down >= max_rate)
975*4882a593Smuzhiyun 		return;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/* Don't go lower than what is already allocated */
978*4882a593Smuzhiyun 	allocate_up = min(max_rate, *available_up);
979*4882a593Smuzhiyun 	if (allocate_up < tunnel->allocated_up)
980*4882a593Smuzhiyun 		allocate_up = tunnel->allocated_up;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	allocate_down = min(max_rate, *available_down);
983*4882a593Smuzhiyun 	if (allocate_down < tunnel->allocated_down)
984*4882a593Smuzhiyun 		allocate_down = tunnel->allocated_down;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* If no changes no need to do more */
987*4882a593Smuzhiyun 	if (allocate_up == tunnel->allocated_up &&
988*4882a593Smuzhiyun 	    allocate_down == tunnel->allocated_down)
989*4882a593Smuzhiyun 		return;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
992*4882a593Smuzhiyun 						&allocate_down);
993*4882a593Smuzhiyun 	if (ret) {
994*4882a593Smuzhiyun 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
995*4882a593Smuzhiyun 		return;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	tunnel->allocated_up = allocate_up;
999*4882a593Smuzhiyun 	*available_up -= tunnel->allocated_up;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	tunnel->allocated_down = allocate_down;
1002*4882a593Smuzhiyun 	*available_down -= tunnel->allocated_down;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1005*4882a593Smuzhiyun 		      tunnel->allocated_up, tunnel->allocated_down);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
tb_usb3_init_path(struct tb_path * path)1008*4882a593Smuzhiyun static void tb_usb3_init_path(struct tb_path *path)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1011*4882a593Smuzhiyun 	path->egress_shared_buffer = TB_PATH_NONE;
1012*4882a593Smuzhiyun 	path->ingress_fc_enable = TB_PATH_ALL;
1013*4882a593Smuzhiyun 	path->ingress_shared_buffer = TB_PATH_NONE;
1014*4882a593Smuzhiyun 	path->priority = 3;
1015*4882a593Smuzhiyun 	path->weight = 3;
1016*4882a593Smuzhiyun 	path->drop_packages = 0;
1017*4882a593Smuzhiyun 	path->nfc_credits = 0;
1018*4882a593Smuzhiyun 	path->hops[0].initial_credits = 7;
1019*4882a593Smuzhiyun 	if (path->path_length > 1)
1020*4882a593Smuzhiyun 		path->hops[1].initial_credits =
1021*4882a593Smuzhiyun 			tb_initial_credits(path->hops[1].in_port->sw);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun /**
1025*4882a593Smuzhiyun  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1026*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
1027*4882a593Smuzhiyun  * @down: USB3 downstream adapter
1028*4882a593Smuzhiyun  *
1029*4882a593Smuzhiyun  * If @down adapter is active, follows the tunnel to the USB3 upstream
1030*4882a593Smuzhiyun  * adapter and back. Returns the discovered tunnel or %NULL if there was
1031*4882a593Smuzhiyun  * no tunnel.
1032*4882a593Smuzhiyun  */
tb_tunnel_discover_usb3(struct tb * tb,struct tb_port * down)1033*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
1036*4882a593Smuzhiyun 	struct tb_path *path;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (!tb_usb3_port_is_enabled(down))
1039*4882a593Smuzhiyun 		return NULL;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1042*4882a593Smuzhiyun 	if (!tunnel)
1043*4882a593Smuzhiyun 		return NULL;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	tunnel->activate = tb_usb3_activate;
1046*4882a593Smuzhiyun 	tunnel->src_port = down;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	/*
1049*4882a593Smuzhiyun 	 * Discover both paths even if they are not complete. We will
1050*4882a593Smuzhiyun 	 * clean them up by calling tb_tunnel_deactivate() below in that
1051*4882a593Smuzhiyun 	 * case.
1052*4882a593Smuzhiyun 	 */
1053*4882a593Smuzhiyun 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1054*4882a593Smuzhiyun 				&tunnel->dst_port, "USB3 Down");
1055*4882a593Smuzhiyun 	if (!path) {
1056*4882a593Smuzhiyun 		/* Just disable the downstream port */
1057*4882a593Smuzhiyun 		tb_usb3_port_enable(down, false);
1058*4882a593Smuzhiyun 		goto err_free;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1061*4882a593Smuzhiyun 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1064*4882a593Smuzhiyun 				"USB3 Up");
1065*4882a593Smuzhiyun 	if (!path)
1066*4882a593Smuzhiyun 		goto err_deactivate;
1067*4882a593Smuzhiyun 	tunnel->paths[TB_USB3_PATH_UP] = path;
1068*4882a593Smuzhiyun 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/* Validate that the tunnel is complete */
1071*4882a593Smuzhiyun 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1072*4882a593Smuzhiyun 		tb_port_warn(tunnel->dst_port,
1073*4882a593Smuzhiyun 			     "path does not end on an USB3 adapter, cleaning up\n");
1074*4882a593Smuzhiyun 		goto err_deactivate;
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (down != tunnel->src_port) {
1078*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1079*4882a593Smuzhiyun 		goto err_deactivate;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1083*4882a593Smuzhiyun 		tb_tunnel_warn(tunnel,
1084*4882a593Smuzhiyun 			       "tunnel is not fully activated, cleaning up\n");
1085*4882a593Smuzhiyun 		goto err_deactivate;
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	if (!tb_route(down->sw)) {
1089*4882a593Smuzhiyun 		int ret;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 		/*
1092*4882a593Smuzhiyun 		 * Read the initial bandwidth allocation for the first
1093*4882a593Smuzhiyun 		 * hop tunnel.
1094*4882a593Smuzhiyun 		 */
1095*4882a593Smuzhiyun 		ret = usb4_usb3_port_allocated_bandwidth(down,
1096*4882a593Smuzhiyun 			&tunnel->allocated_up, &tunnel->allocated_down);
1097*4882a593Smuzhiyun 		if (ret)
1098*4882a593Smuzhiyun 			goto err_deactivate;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1101*4882a593Smuzhiyun 			      tunnel->allocated_up, tunnel->allocated_down);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 		tunnel->init = tb_usb3_init;
1104*4882a593Smuzhiyun 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1105*4882a593Smuzhiyun 		tunnel->release_unused_bandwidth =
1106*4882a593Smuzhiyun 			tb_usb3_release_unused_bandwidth;
1107*4882a593Smuzhiyun 		tunnel->reclaim_available_bandwidth =
1108*4882a593Smuzhiyun 			tb_usb3_reclaim_available_bandwidth;
1109*4882a593Smuzhiyun 	}
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "discovered\n");
1112*4882a593Smuzhiyun 	return tunnel;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun err_deactivate:
1115*4882a593Smuzhiyun 	tb_tunnel_deactivate(tunnel);
1116*4882a593Smuzhiyun err_free:
1117*4882a593Smuzhiyun 	tb_tunnel_free(tunnel);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	return NULL;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun /**
1123*4882a593Smuzhiyun  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1124*4882a593Smuzhiyun  * @tb: Pointer to the domain structure
1125*4882a593Smuzhiyun  * @up: USB3 upstream adapter port
1126*4882a593Smuzhiyun  * @down: USB3 downstream adapter port
1127*4882a593Smuzhiyun  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1128*4882a593Smuzhiyun  *	    if not limited).
1129*4882a593Smuzhiyun  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1130*4882a593Smuzhiyun  *	      (%0 if not limited).
1131*4882a593Smuzhiyun  *
1132*4882a593Smuzhiyun  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1133*4882a593Smuzhiyun  * @TB_TYPE_USB3_DOWN.
1134*4882a593Smuzhiyun  *
1135*4882a593Smuzhiyun  * Return: Returns a tb_tunnel on success or %NULL on failure.
1136*4882a593Smuzhiyun  */
tb_tunnel_alloc_usb3(struct tb * tb,struct tb_port * up,struct tb_port * down,int max_up,int max_down)1137*4882a593Smuzhiyun struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1138*4882a593Smuzhiyun 				       struct tb_port *down, int max_up,
1139*4882a593Smuzhiyun 				       int max_down)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	struct tb_tunnel *tunnel;
1142*4882a593Smuzhiyun 	struct tb_path *path;
1143*4882a593Smuzhiyun 	int max_rate = 0;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/*
1146*4882a593Smuzhiyun 	 * Check that we have enough bandwidth available for the new
1147*4882a593Smuzhiyun 	 * USB3 tunnel.
1148*4882a593Smuzhiyun 	 */
1149*4882a593Smuzhiyun 	if (max_up > 0 || max_down > 0) {
1150*4882a593Smuzhiyun 		max_rate = tb_usb3_max_link_rate(down, up);
1151*4882a593Smuzhiyun 		if (max_rate < 0)
1152*4882a593Smuzhiyun 			return NULL;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 		/* Only 90% can be allocated for USB3 isochronous transfers */
1155*4882a593Smuzhiyun 		max_rate = max_rate * 90 / 100;
1156*4882a593Smuzhiyun 		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1157*4882a593Smuzhiyun 			    max_rate);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		if (max_rate > max_up || max_rate > max_down) {
1160*4882a593Smuzhiyun 			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1161*4882a593Smuzhiyun 			return NULL;
1162*4882a593Smuzhiyun 		}
1163*4882a593Smuzhiyun 	}
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1166*4882a593Smuzhiyun 	if (!tunnel)
1167*4882a593Smuzhiyun 		return NULL;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	tunnel->activate = tb_usb3_activate;
1170*4882a593Smuzhiyun 	tunnel->src_port = down;
1171*4882a593Smuzhiyun 	tunnel->dst_port = up;
1172*4882a593Smuzhiyun 	tunnel->max_up = max_up;
1173*4882a593Smuzhiyun 	tunnel->max_down = max_down;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1176*4882a593Smuzhiyun 			     "USB3 Down");
1177*4882a593Smuzhiyun 	if (!path) {
1178*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
1179*4882a593Smuzhiyun 		return NULL;
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 	tb_usb3_init_path(path);
1182*4882a593Smuzhiyun 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1185*4882a593Smuzhiyun 			     "USB3 Up");
1186*4882a593Smuzhiyun 	if (!path) {
1187*4882a593Smuzhiyun 		tb_tunnel_free(tunnel);
1188*4882a593Smuzhiyun 		return NULL;
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 	tb_usb3_init_path(path);
1191*4882a593Smuzhiyun 	tunnel->paths[TB_USB3_PATH_UP] = path;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	if (!tb_route(down->sw)) {
1194*4882a593Smuzhiyun 		tunnel->allocated_up = max_rate;
1195*4882a593Smuzhiyun 		tunnel->allocated_down = max_rate;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 		tunnel->init = tb_usb3_init;
1198*4882a593Smuzhiyun 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1199*4882a593Smuzhiyun 		tunnel->release_unused_bandwidth =
1200*4882a593Smuzhiyun 			tb_usb3_release_unused_bandwidth;
1201*4882a593Smuzhiyun 		tunnel->reclaim_available_bandwidth =
1202*4882a593Smuzhiyun 			tb_usb3_reclaim_available_bandwidth;
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	return tunnel;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun /**
1209*4882a593Smuzhiyun  * tb_tunnel_free() - free a tunnel
1210*4882a593Smuzhiyun  * @tunnel: Tunnel to be freed
1211*4882a593Smuzhiyun  *
1212*4882a593Smuzhiyun  * Frees a tunnel. The tunnel does not need to be deactivated.
1213*4882a593Smuzhiyun  */
tb_tunnel_free(struct tb_tunnel * tunnel)1214*4882a593Smuzhiyun void tb_tunnel_free(struct tb_tunnel *tunnel)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	int i;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	if (!tunnel)
1219*4882a593Smuzhiyun 		return;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1222*4882a593Smuzhiyun 		if (tunnel->paths[i])
1223*4882a593Smuzhiyun 			tb_path_free(tunnel->paths[i]);
1224*4882a593Smuzhiyun 	}
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	kfree(tunnel->paths);
1227*4882a593Smuzhiyun 	kfree(tunnel);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun /**
1231*4882a593Smuzhiyun  * tb_tunnel_is_invalid - check whether an activated path is still valid
1232*4882a593Smuzhiyun  * @tunnel: Tunnel to check
1233*4882a593Smuzhiyun  */
tb_tunnel_is_invalid(struct tb_tunnel * tunnel)1234*4882a593Smuzhiyun bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun 	int i;
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1239*4882a593Smuzhiyun 		WARN_ON(!tunnel->paths[i]->activated);
1240*4882a593Smuzhiyun 		if (tb_path_is_invalid(tunnel->paths[i]))
1241*4882a593Smuzhiyun 			return true;
1242*4882a593Smuzhiyun 	}
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	return false;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun  * tb_tunnel_restart() - activate a tunnel after a hardware reset
1249*4882a593Smuzhiyun  * @tunnel: Tunnel to restart
1250*4882a593Smuzhiyun  *
1251*4882a593Smuzhiyun  * Return: 0 on success and negative errno in case if failure
1252*4882a593Smuzhiyun  */
tb_tunnel_restart(struct tb_tunnel * tunnel)1253*4882a593Smuzhiyun int tb_tunnel_restart(struct tb_tunnel *tunnel)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	int res, i;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "activating\n");
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	/*
1260*4882a593Smuzhiyun 	 * Make sure all paths are properly disabled before enabling
1261*4882a593Smuzhiyun 	 * them again.
1262*4882a593Smuzhiyun 	 */
1263*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1264*4882a593Smuzhiyun 		if (tunnel->paths[i]->activated) {
1265*4882a593Smuzhiyun 			tb_path_deactivate(tunnel->paths[i]);
1266*4882a593Smuzhiyun 			tunnel->paths[i]->activated = false;
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	if (tunnel->init) {
1271*4882a593Smuzhiyun 		res = tunnel->init(tunnel);
1272*4882a593Smuzhiyun 		if (res)
1273*4882a593Smuzhiyun 			return res;
1274*4882a593Smuzhiyun 	}
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1277*4882a593Smuzhiyun 		res = tb_path_activate(tunnel->paths[i]);
1278*4882a593Smuzhiyun 		if (res)
1279*4882a593Smuzhiyun 			goto err;
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	if (tunnel->activate) {
1283*4882a593Smuzhiyun 		res = tunnel->activate(tunnel, true);
1284*4882a593Smuzhiyun 		if (res)
1285*4882a593Smuzhiyun 			goto err;
1286*4882a593Smuzhiyun 	}
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	return 0;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun err:
1291*4882a593Smuzhiyun 	tb_tunnel_warn(tunnel, "activation failed\n");
1292*4882a593Smuzhiyun 	tb_tunnel_deactivate(tunnel);
1293*4882a593Smuzhiyun 	return res;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun /**
1297*4882a593Smuzhiyun  * tb_tunnel_activate() - activate a tunnel
1298*4882a593Smuzhiyun  * @tunnel: Tunnel to activate
1299*4882a593Smuzhiyun  *
1300*4882a593Smuzhiyun  * Return: Returns 0 on success or an error code on failure.
1301*4882a593Smuzhiyun  */
tb_tunnel_activate(struct tb_tunnel * tunnel)1302*4882a593Smuzhiyun int tb_tunnel_activate(struct tb_tunnel *tunnel)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	int i;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1307*4882a593Smuzhiyun 		if (tunnel->paths[i]->activated) {
1308*4882a593Smuzhiyun 			tb_tunnel_WARN(tunnel,
1309*4882a593Smuzhiyun 				       "trying to activate an already activated tunnel\n");
1310*4882a593Smuzhiyun 			return -EINVAL;
1311*4882a593Smuzhiyun 		}
1312*4882a593Smuzhiyun 	}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	return tb_tunnel_restart(tunnel);
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun /**
1318*4882a593Smuzhiyun  * tb_tunnel_deactivate() - deactivate a tunnel
1319*4882a593Smuzhiyun  * @tunnel: Tunnel to deactivate
1320*4882a593Smuzhiyun  */
tb_tunnel_deactivate(struct tb_tunnel * tunnel)1321*4882a593Smuzhiyun void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun 	int i;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	tb_tunnel_dbg(tunnel, "deactivating\n");
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	if (tunnel->activate)
1328*4882a593Smuzhiyun 		tunnel->activate(tunnel, false);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1331*4882a593Smuzhiyun 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
1332*4882a593Smuzhiyun 			tb_path_deactivate(tunnel->paths[i]);
1333*4882a593Smuzhiyun 	}
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun /**
1337*4882a593Smuzhiyun  * tb_tunnel_port_on_path() - Does the tunnel go through port
1338*4882a593Smuzhiyun  * @tunnel: Tunnel to check
1339*4882a593Smuzhiyun  * @port: Port to check
1340*4882a593Smuzhiyun  *
1341*4882a593Smuzhiyun  * Returns true if @tunnel goes through @port (direction does not matter),
1342*4882a593Smuzhiyun  * false otherwise.
1343*4882a593Smuzhiyun  */
tb_tunnel_port_on_path(const struct tb_tunnel * tunnel,const struct tb_port * port)1344*4882a593Smuzhiyun bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1345*4882a593Smuzhiyun 			    const struct tb_port *port)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	int i;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1350*4882a593Smuzhiyun 		if (!tunnel->paths[i])
1351*4882a593Smuzhiyun 			continue;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 		if (tb_path_port_on_path(tunnel->paths[i], port))
1354*4882a593Smuzhiyun 			return true;
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	return false;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun 
tb_tunnel_is_active(const struct tb_tunnel * tunnel)1360*4882a593Smuzhiyun static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun 	int i;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	for (i = 0; i < tunnel->npaths; i++) {
1365*4882a593Smuzhiyun 		if (!tunnel->paths[i])
1366*4882a593Smuzhiyun 			return false;
1367*4882a593Smuzhiyun 		if (!tunnel->paths[i]->activated)
1368*4882a593Smuzhiyun 			return false;
1369*4882a593Smuzhiyun 	}
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	return true;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun /**
1375*4882a593Smuzhiyun  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1376*4882a593Smuzhiyun  * @tunnel: Tunnel to check
1377*4882a593Smuzhiyun  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1378*4882a593Smuzhiyun  *		 Can be %NULL.
1379*4882a593Smuzhiyun  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1380*4882a593Smuzhiyun  *		   Can be %NULL.
1381*4882a593Smuzhiyun  *
1382*4882a593Smuzhiyun  * Stores the amount of isochronous bandwidth @tunnel consumes in
1383*4882a593Smuzhiyun  * @consumed_up and @consumed_down. In case of success returns %0,
1384*4882a593Smuzhiyun  * negative errno otherwise.
1385*4882a593Smuzhiyun  */
tb_tunnel_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1386*4882a593Smuzhiyun int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1387*4882a593Smuzhiyun 				 int *consumed_down)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun 	int up_bw = 0, down_bw = 0;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	if (!tb_tunnel_is_active(tunnel))
1392*4882a593Smuzhiyun 		goto out;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	if (tunnel->consumed_bandwidth) {
1395*4882a593Smuzhiyun 		int ret;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1398*4882a593Smuzhiyun 		if (ret)
1399*4882a593Smuzhiyun 			return ret;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1402*4882a593Smuzhiyun 			      down_bw);
1403*4882a593Smuzhiyun 	}
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun out:
1406*4882a593Smuzhiyun 	if (consumed_up)
1407*4882a593Smuzhiyun 		*consumed_up = up_bw;
1408*4882a593Smuzhiyun 	if (consumed_down)
1409*4882a593Smuzhiyun 		*consumed_down = down_bw;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	return 0;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun /**
1415*4882a593Smuzhiyun  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1416*4882a593Smuzhiyun  * @tunnel: Tunnel whose unused bandwidth to release
1417*4882a593Smuzhiyun  *
1418*4882a593Smuzhiyun  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1419*4882a593Smuzhiyun  * moment) this function makes it to release all the unused bandwidth.
1420*4882a593Smuzhiyun  *
1421*4882a593Smuzhiyun  * Returns %0 in case of success and negative errno otherwise.
1422*4882a593Smuzhiyun  */
tb_tunnel_release_unused_bandwidth(struct tb_tunnel * tunnel)1423*4882a593Smuzhiyun int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun 	if (!tb_tunnel_is_active(tunnel))
1426*4882a593Smuzhiyun 		return 0;
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	if (tunnel->release_unused_bandwidth) {
1429*4882a593Smuzhiyun 		int ret;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 		ret = tunnel->release_unused_bandwidth(tunnel);
1432*4882a593Smuzhiyun 		if (ret)
1433*4882a593Smuzhiyun 			return ret;
1434*4882a593Smuzhiyun 	}
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	return 0;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun /**
1440*4882a593Smuzhiyun  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1441*4882a593Smuzhiyun  * @tunnel: Tunnel reclaiming available bandwidth
1442*4882a593Smuzhiyun  * @available_up: Available upstream bandwidth (in Mb/s)
1443*4882a593Smuzhiyun  * @available_down: Available downstream bandwidth (in Mb/s)
1444*4882a593Smuzhiyun  *
1445*4882a593Smuzhiyun  * Reclaims bandwidth from @available_up and @available_down and updates
1446*4882a593Smuzhiyun  * the variables accordingly (e.g decreases both according to what was
1447*4882a593Smuzhiyun  * reclaimed by the tunnel). If nothing was reclaimed the values are
1448*4882a593Smuzhiyun  * kept as is.
1449*4882a593Smuzhiyun  */
tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)1450*4882a593Smuzhiyun void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1451*4882a593Smuzhiyun 					   int *available_up,
1452*4882a593Smuzhiyun 					   int *available_down)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun 	if (!tb_tunnel_is_active(tunnel))
1455*4882a593Smuzhiyun 		return;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	if (tunnel->reclaim_available_bandwidth)
1458*4882a593Smuzhiyun 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
1459*4882a593Smuzhiyun 						    available_down);
1460*4882a593Smuzhiyun }
1461