xref: /OK3568_Linux_fs/kernel/drivers/thunderbolt/dma_port.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Thunderbolt DMA configuration based mailbox support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017, Intel Corporation
6*4882a593Smuzhiyun  * Authors: Michael Jamet <michael.jamet@intel.com>
7*4882a593Smuzhiyun  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "dma_port.h"
14*4882a593Smuzhiyun #include "tb_regs.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DMA_PORT_CAP			0x3e
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define MAIL_DATA			1
19*4882a593Smuzhiyun #define MAIL_DATA_DWORDS		16
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define MAIL_IN				17
22*4882a593Smuzhiyun #define MAIL_IN_CMD_SHIFT		28
23*4882a593Smuzhiyun #define MAIL_IN_CMD_MASK		GENMASK(31, 28)
24*4882a593Smuzhiyun #define MAIL_IN_CMD_FLASH_WRITE		0x0
25*4882a593Smuzhiyun #define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
26*4882a593Smuzhiyun #define MAIL_IN_CMD_FLASH_READ		0x2
27*4882a593Smuzhiyun #define MAIL_IN_CMD_POWER_CYCLE		0x4
28*4882a593Smuzhiyun #define MAIL_IN_DWORDS_SHIFT		24
29*4882a593Smuzhiyun #define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
30*4882a593Smuzhiyun #define MAIL_IN_ADDRESS_SHIFT		2
31*4882a593Smuzhiyun #define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
32*4882a593Smuzhiyun #define MAIL_IN_CSS			BIT(1)
33*4882a593Smuzhiyun #define MAIL_IN_OP_REQUEST		BIT(0)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define MAIL_OUT			18
36*4882a593Smuzhiyun #define MAIL_OUT_STATUS_RESPONSE	BIT(29)
37*4882a593Smuzhiyun #define MAIL_OUT_STATUS_CMD_SHIFT	4
38*4882a593Smuzhiyun #define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
39*4882a593Smuzhiyun #define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
40*4882a593Smuzhiyun #define MAIL_OUT_STATUS_COMPLETED	0
41*4882a593Smuzhiyun #define MAIL_OUT_STATUS_ERR_AUTH	1
42*4882a593Smuzhiyun #define MAIL_OUT_STATUS_ERR_ACCESS	2
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define DMA_PORT_TIMEOUT		5000 /* ms */
45*4882a593Smuzhiyun #define DMA_PORT_RETRIES		3
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * struct tb_dma_port - DMA control port
49*4882a593Smuzhiyun  * @sw: Switch the DMA port belongs to
50*4882a593Smuzhiyun  * @port: Switch port number where DMA capability is found
51*4882a593Smuzhiyun  * @base: Start offset of the mailbox registers
52*4882a593Smuzhiyun  * @buf: Temporary buffer to store a single block
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun struct tb_dma_port {
55*4882a593Smuzhiyun 	struct tb_switch *sw;
56*4882a593Smuzhiyun 	u8 port;
57*4882a593Smuzhiyun 	u32 base;
58*4882a593Smuzhiyun 	u8 *buf;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * When the switch is in safe mode it supports very little functionality
63*4882a593Smuzhiyun  * so we don't validate that much here.
64*4882a593Smuzhiyun  */
dma_port_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)65*4882a593Smuzhiyun static bool dma_port_match(const struct tb_cfg_request *req,
66*4882a593Smuzhiyun 			   const struct ctl_pkg *pkg)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
71*4882a593Smuzhiyun 		return true;
72*4882a593Smuzhiyun 	if (pkg->frame.eof != req->response_type)
73*4882a593Smuzhiyun 		return false;
74*4882a593Smuzhiyun 	if (route != tb_cfg_get_route(req->request))
75*4882a593Smuzhiyun 		return false;
76*4882a593Smuzhiyun 	if (pkg->frame.size != req->response_size)
77*4882a593Smuzhiyun 		return false;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return true;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
dma_port_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)82*4882a593Smuzhiyun static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	memcpy(req->response, pkg->buffer, req->response_size);
85*4882a593Smuzhiyun 	return true;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
dma_port_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,u32 offset,u32 length,int timeout_msec)88*4882a593Smuzhiyun static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
89*4882a593Smuzhiyun 			 u32 port, u32 offset, u32 length, int timeout_msec)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct cfg_read_pkg request = {
92*4882a593Smuzhiyun 		.header = tb_cfg_make_header(route),
93*4882a593Smuzhiyun 		.addr = {
94*4882a593Smuzhiyun 			.seq = 1,
95*4882a593Smuzhiyun 			.port = port,
96*4882a593Smuzhiyun 			.space = TB_CFG_PORT,
97*4882a593Smuzhiyun 			.offset = offset,
98*4882a593Smuzhiyun 			.length = length,
99*4882a593Smuzhiyun 		},
100*4882a593Smuzhiyun 	};
101*4882a593Smuzhiyun 	struct tb_cfg_request *req;
102*4882a593Smuzhiyun 	struct cfg_write_pkg reply;
103*4882a593Smuzhiyun 	struct tb_cfg_result res;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	req = tb_cfg_request_alloc();
106*4882a593Smuzhiyun 	if (!req)
107*4882a593Smuzhiyun 		return -ENOMEM;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	req->match = dma_port_match;
110*4882a593Smuzhiyun 	req->copy = dma_port_copy;
111*4882a593Smuzhiyun 	req->request = &request;
112*4882a593Smuzhiyun 	req->request_size = sizeof(request);
113*4882a593Smuzhiyun 	req->request_type = TB_CFG_PKG_READ;
114*4882a593Smuzhiyun 	req->response = &reply;
115*4882a593Smuzhiyun 	req->response_size = 12 + 4 * length;
116*4882a593Smuzhiyun 	req->response_type = TB_CFG_PKG_READ;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	tb_cfg_request_put(req);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (res.err)
123*4882a593Smuzhiyun 		return res.err;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	memcpy(buffer, &reply.data, 4 * length);
126*4882a593Smuzhiyun 	return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
dma_port_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,u32 offset,u32 length,int timeout_msec)129*4882a593Smuzhiyun static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
130*4882a593Smuzhiyun 			  u32 port, u32 offset, u32 length, int timeout_msec)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct cfg_write_pkg request = {
133*4882a593Smuzhiyun 		.header = tb_cfg_make_header(route),
134*4882a593Smuzhiyun 		.addr = {
135*4882a593Smuzhiyun 			.seq = 1,
136*4882a593Smuzhiyun 			.port = port,
137*4882a593Smuzhiyun 			.space = TB_CFG_PORT,
138*4882a593Smuzhiyun 			.offset = offset,
139*4882a593Smuzhiyun 			.length = length,
140*4882a593Smuzhiyun 		},
141*4882a593Smuzhiyun 	};
142*4882a593Smuzhiyun 	struct tb_cfg_request *req;
143*4882a593Smuzhiyun 	struct cfg_read_pkg reply;
144*4882a593Smuzhiyun 	struct tb_cfg_result res;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	memcpy(&request.data, buffer, length * 4);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	req = tb_cfg_request_alloc();
149*4882a593Smuzhiyun 	if (!req)
150*4882a593Smuzhiyun 		return -ENOMEM;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	req->match = dma_port_match;
153*4882a593Smuzhiyun 	req->copy = dma_port_copy;
154*4882a593Smuzhiyun 	req->request = &request;
155*4882a593Smuzhiyun 	req->request_size = 12 + 4 * length;
156*4882a593Smuzhiyun 	req->request_type = TB_CFG_PKG_WRITE;
157*4882a593Smuzhiyun 	req->response = &reply;
158*4882a593Smuzhiyun 	req->response_size = sizeof(reply);
159*4882a593Smuzhiyun 	req->response_type = TB_CFG_PKG_WRITE;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	tb_cfg_request_put(req);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return res.err;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
dma_find_port(struct tb_switch * sw)168*4882a593Smuzhiyun static int dma_find_port(struct tb_switch *sw)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	static const int ports[] = { 3, 5, 7 };
171*4882a593Smuzhiyun 	int i;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/*
174*4882a593Smuzhiyun 	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
175*4882a593Smuzhiyun 	 * controller. Try all of them.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ports); i++) {
178*4882a593Smuzhiyun 		u32 type;
179*4882a593Smuzhiyun 		int ret;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
182*4882a593Smuzhiyun 				    2, 1, DMA_PORT_TIMEOUT);
183*4882a593Smuzhiyun 		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
184*4882a593Smuzhiyun 			return ports[i];
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	return -ENODEV;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * dma_port_alloc() - Finds DMA control port from a switch pointed by route
192*4882a593Smuzhiyun  * @sw: Switch from where find the DMA port
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * Function checks if the switch NHI port supports DMA configuration
195*4882a593Smuzhiyun  * based mailbox capability and if it does, allocates and initializes
196*4882a593Smuzhiyun  * DMA port structure. Returns %NULL if the capabity was not found.
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * The DMA control port is functional also when the switch is in safe
199*4882a593Smuzhiyun  * mode.
200*4882a593Smuzhiyun  */
dma_port_alloc(struct tb_switch * sw)201*4882a593Smuzhiyun struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct tb_dma_port *dma;
204*4882a593Smuzhiyun 	int port;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	port = dma_find_port(sw);
207*4882a593Smuzhiyun 	if (port < 0)
208*4882a593Smuzhiyun 		return NULL;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
211*4882a593Smuzhiyun 	if (!dma)
212*4882a593Smuzhiyun 		return NULL;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
215*4882a593Smuzhiyun 	if (!dma->buf) {
216*4882a593Smuzhiyun 		kfree(dma);
217*4882a593Smuzhiyun 		return NULL;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	dma->sw = sw;
221*4882a593Smuzhiyun 	dma->port = port;
222*4882a593Smuzhiyun 	dma->base = DMA_PORT_CAP;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return dma;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun  * dma_port_free() - Release DMA control port structure
229*4882a593Smuzhiyun  * @dma: DMA control port
230*4882a593Smuzhiyun  */
dma_port_free(struct tb_dma_port * dma)231*4882a593Smuzhiyun void dma_port_free(struct tb_dma_port *dma)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	if (dma) {
234*4882a593Smuzhiyun 		kfree(dma->buf);
235*4882a593Smuzhiyun 		kfree(dma);
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
dma_port_wait_for_completion(struct tb_dma_port * dma,unsigned int timeout)239*4882a593Smuzhiyun static int dma_port_wait_for_completion(struct tb_dma_port *dma,
240*4882a593Smuzhiyun 					unsigned int timeout)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	unsigned long end = jiffies + msecs_to_jiffies(timeout);
243*4882a593Smuzhiyun 	struct tb_switch *sw = dma->sw;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	do {
246*4882a593Smuzhiyun 		int ret;
247*4882a593Smuzhiyun 		u32 in;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
250*4882a593Smuzhiyun 				    dma->base + MAIL_IN, 1, 50);
251*4882a593Smuzhiyun 		if (ret) {
252*4882a593Smuzhiyun 			if (ret != -ETIMEDOUT)
253*4882a593Smuzhiyun 				return ret;
254*4882a593Smuzhiyun 		} else if (!(in & MAIL_IN_OP_REQUEST)) {
255*4882a593Smuzhiyun 			return 0;
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		usleep_range(50, 100);
259*4882a593Smuzhiyun 	} while (time_before(jiffies, end));
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return -ETIMEDOUT;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
status_to_errno(u32 status)264*4882a593Smuzhiyun static int status_to_errno(u32 status)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	switch (status & MAIL_OUT_STATUS_MASK) {
267*4882a593Smuzhiyun 	case MAIL_OUT_STATUS_COMPLETED:
268*4882a593Smuzhiyun 		return 0;
269*4882a593Smuzhiyun 	case MAIL_OUT_STATUS_ERR_AUTH:
270*4882a593Smuzhiyun 		return -EINVAL;
271*4882a593Smuzhiyun 	case MAIL_OUT_STATUS_ERR_ACCESS:
272*4882a593Smuzhiyun 		return -EACCES;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return -EIO;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
dma_port_request(struct tb_dma_port * dma,u32 in,unsigned int timeout)278*4882a593Smuzhiyun static int dma_port_request(struct tb_dma_port *dma, u32 in,
279*4882a593Smuzhiyun 			    unsigned int timeout)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct tb_switch *sw = dma->sw;
282*4882a593Smuzhiyun 	u32 out;
283*4882a593Smuzhiyun 	int ret;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
286*4882a593Smuzhiyun 			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
287*4882a593Smuzhiyun 	if (ret)
288*4882a593Smuzhiyun 		return ret;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	ret = dma_port_wait_for_completion(dma, timeout);
291*4882a593Smuzhiyun 	if (ret)
292*4882a593Smuzhiyun 		return ret;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
295*4882a593Smuzhiyun 			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
296*4882a593Smuzhiyun 	if (ret)
297*4882a593Smuzhiyun 		return ret;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return status_to_errno(out);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
dma_port_flash_read_block(struct tb_dma_port * dma,u32 address,void * buf,u32 size)302*4882a593Smuzhiyun static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
303*4882a593Smuzhiyun 				     void *buf, u32 size)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct tb_switch *sw = dma->sw;
306*4882a593Smuzhiyun 	u32 in, dwaddress, dwords;
307*4882a593Smuzhiyun 	int ret;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	dwaddress = address / 4;
310*4882a593Smuzhiyun 	dwords = size / 4;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
313*4882a593Smuzhiyun 	if (dwords < MAIL_DATA_DWORDS)
314*4882a593Smuzhiyun 		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
315*4882a593Smuzhiyun 	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
316*4882a593Smuzhiyun 	in |= MAIL_IN_OP_REQUEST;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
319*4882a593Smuzhiyun 	if (ret)
320*4882a593Smuzhiyun 		return ret;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
323*4882a593Smuzhiyun 			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
dma_port_flash_write_block(struct tb_dma_port * dma,u32 address,const void * buf,u32 size)326*4882a593Smuzhiyun static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
327*4882a593Smuzhiyun 				      const void *buf, u32 size)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct tb_switch *sw = dma->sw;
330*4882a593Smuzhiyun 	u32 in, dwaddress, dwords;
331*4882a593Smuzhiyun 	int ret;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	dwords = size / 4;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Write the block to MAIL_DATA registers */
336*4882a593Smuzhiyun 	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
337*4882a593Smuzhiyun 			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* CSS header write is always done to the same magic address */
342*4882a593Smuzhiyun 	if (address >= DMA_PORT_CSS_ADDRESS) {
343*4882a593Smuzhiyun 		dwaddress = DMA_PORT_CSS_ADDRESS;
344*4882a593Smuzhiyun 		in |= MAIL_IN_CSS;
345*4882a593Smuzhiyun 	} else {
346*4882a593Smuzhiyun 		dwaddress = address / 4;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
350*4882a593Smuzhiyun 	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
351*4882a593Smuzhiyun 	in |= MAIL_IN_OP_REQUEST;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun  * dma_port_flash_read() - Read from active flash region
358*4882a593Smuzhiyun  * @dma: DMA control port
359*4882a593Smuzhiyun  * @address: Address relative to the start of active region
360*4882a593Smuzhiyun  * @buf: Buffer where the data is read
361*4882a593Smuzhiyun  * @size: Size of the buffer
362*4882a593Smuzhiyun  */
dma_port_flash_read(struct tb_dma_port * dma,unsigned int address,void * buf,size_t size)363*4882a593Smuzhiyun int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
364*4882a593Smuzhiyun 			void *buf, size_t size)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	unsigned int retries = DMA_PORT_RETRIES;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	do {
369*4882a593Smuzhiyun 		unsigned int offset;
370*4882a593Smuzhiyun 		size_t nbytes;
371*4882a593Smuzhiyun 		int ret;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		offset = address & 3;
374*4882a593Smuzhiyun 		nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		ret = dma_port_flash_read_block(dma, address, dma->buf,
377*4882a593Smuzhiyun 						ALIGN(nbytes, 4));
378*4882a593Smuzhiyun 		if (ret) {
379*4882a593Smuzhiyun 			if (ret == -ETIMEDOUT) {
380*4882a593Smuzhiyun 				if (retries--)
381*4882a593Smuzhiyun 					continue;
382*4882a593Smuzhiyun 				ret = -EIO;
383*4882a593Smuzhiyun 			}
384*4882a593Smuzhiyun 			return ret;
385*4882a593Smuzhiyun 		}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		nbytes -= offset;
388*4882a593Smuzhiyun 		memcpy(buf, dma->buf + offset, nbytes);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		size -= nbytes;
391*4882a593Smuzhiyun 		address += nbytes;
392*4882a593Smuzhiyun 		buf += nbytes;
393*4882a593Smuzhiyun 	} while (size > 0);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun  * dma_port_flash_write() - Write to non-active flash region
400*4882a593Smuzhiyun  * @dma: DMA control port
401*4882a593Smuzhiyun  * @address: Address relative to the start of non-active region
402*4882a593Smuzhiyun  * @buf: Data to write
403*4882a593Smuzhiyun  * @size: Size of the buffer
404*4882a593Smuzhiyun  *
405*4882a593Smuzhiyun  * Writes block of data to the non-active flash region of the switch. If
406*4882a593Smuzhiyun  * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
407*4882a593Smuzhiyun  * using CSS command.
408*4882a593Smuzhiyun  */
dma_port_flash_write(struct tb_dma_port * dma,unsigned int address,const void * buf,size_t size)409*4882a593Smuzhiyun int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
410*4882a593Smuzhiyun 			 const void *buf, size_t size)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	unsigned int retries = DMA_PORT_RETRIES;
413*4882a593Smuzhiyun 	unsigned int offset;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (address >= DMA_PORT_CSS_ADDRESS) {
416*4882a593Smuzhiyun 		offset = 0;
417*4882a593Smuzhiyun 		if (size > DMA_PORT_CSS_MAX_SIZE)
418*4882a593Smuzhiyun 			return -E2BIG;
419*4882a593Smuzhiyun 	} else {
420*4882a593Smuzhiyun 		offset = address & 3;
421*4882a593Smuzhiyun 		address = address & ~3;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	do {
425*4882a593Smuzhiyun 		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
426*4882a593Smuzhiyun 		int ret;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		memcpy(dma->buf + offset, buf, nbytes);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		ret = dma_port_flash_write_block(dma, address, buf, nbytes);
431*4882a593Smuzhiyun 		if (ret) {
432*4882a593Smuzhiyun 			if (ret == -ETIMEDOUT) {
433*4882a593Smuzhiyun 				if (retries--)
434*4882a593Smuzhiyun 					continue;
435*4882a593Smuzhiyun 				ret = -EIO;
436*4882a593Smuzhiyun 			}
437*4882a593Smuzhiyun 			return ret;
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		size -= nbytes;
441*4882a593Smuzhiyun 		address += nbytes;
442*4882a593Smuzhiyun 		buf += nbytes;
443*4882a593Smuzhiyun 	} while (size > 0);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun  * dma_port_flash_update_auth() - Starts flash authenticate cycle
450*4882a593Smuzhiyun  * @dma: DMA control port
451*4882a593Smuzhiyun  *
452*4882a593Smuzhiyun  * Starts the flash update authentication cycle. If the image in the
453*4882a593Smuzhiyun  * non-active area was valid, the switch starts upgrade process where
454*4882a593Smuzhiyun  * active and non-active area get swapped in the end. Caller should call
455*4882a593Smuzhiyun  * dma_port_flash_update_auth_status() to get status of this command.
456*4882a593Smuzhiyun  * This is because if the switch in question is root switch the
457*4882a593Smuzhiyun  * thunderbolt host controller gets reset as well.
458*4882a593Smuzhiyun  */
dma_port_flash_update_auth(struct tb_dma_port * dma)459*4882a593Smuzhiyun int dma_port_flash_update_auth(struct tb_dma_port *dma)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	u32 in;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
464*4882a593Smuzhiyun 	in |= MAIL_IN_OP_REQUEST;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return dma_port_request(dma, in, 150);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /**
470*4882a593Smuzhiyun  * dma_port_flash_update_auth_status() - Reads status of update auth command
471*4882a593Smuzhiyun  * @dma: DMA control port
472*4882a593Smuzhiyun  * @status: Status code of the operation
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * The function checks if there is status available from the last update
475*4882a593Smuzhiyun  * auth command. Returns %0 if there is no status and no further
476*4882a593Smuzhiyun  * action is required. If there is status, %1 is returned instead and
477*4882a593Smuzhiyun  * @status holds the failure code.
478*4882a593Smuzhiyun  *
479*4882a593Smuzhiyun  * Negative return means there was an error reading status from the
480*4882a593Smuzhiyun  * switch.
481*4882a593Smuzhiyun  */
dma_port_flash_update_auth_status(struct tb_dma_port * dma,u32 * status)482*4882a593Smuzhiyun int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct tb_switch *sw = dma->sw;
485*4882a593Smuzhiyun 	u32 out, cmd;
486*4882a593Smuzhiyun 	int ret;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
489*4882a593Smuzhiyun 			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
490*4882a593Smuzhiyun 	if (ret)
491*4882a593Smuzhiyun 		return ret;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* Check if the status relates to flash update auth */
494*4882a593Smuzhiyun 	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
495*4882a593Smuzhiyun 	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
496*4882a593Smuzhiyun 		if (status)
497*4882a593Smuzhiyun 			*status = out & MAIL_OUT_STATUS_MASK;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		/* Reset is needed in any case */
500*4882a593Smuzhiyun 		return 1;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /**
507*4882a593Smuzhiyun  * dma_port_power_cycle() - Power cycles the switch
508*4882a593Smuzhiyun  * @dma: DMA control port
509*4882a593Smuzhiyun  *
510*4882a593Smuzhiyun  * Triggers power cycle to the switch.
511*4882a593Smuzhiyun  */
dma_port_power_cycle(struct tb_dma_port * dma)512*4882a593Smuzhiyun int dma_port_power_cycle(struct tb_dma_port *dma)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	u32 in;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
517*4882a593Smuzhiyun 	in |= MAIL_IN_OP_REQUEST;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return dma_port_request(dma, in, 150);
520*4882a593Smuzhiyun }
521