xref: /OK3568_Linux_fs/kernel/drivers/thunderbolt/eeprom.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Thunderbolt driver - eeprom access
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6*4882a593Smuzhiyun  * Copyright (C) 2018, Intel Corporation
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/crc32.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/property.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include "tb.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /**
16*4882a593Smuzhiyun  * tb_eeprom_ctl_write() - write control word
17*4882a593Smuzhiyun  */
tb_eeprom_ctl_write(struct tb_switch * sw,struct tb_eeprom_ctl * ctl)18*4882a593Smuzhiyun static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /**
24*4882a593Smuzhiyun  * tb_eeprom_ctl_write() - read control word
25*4882a593Smuzhiyun  */
tb_eeprom_ctl_read(struct tb_switch * sw,struct tb_eeprom_ctl * ctl)26*4882a593Smuzhiyun static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun enum tb_eeprom_transfer {
32*4882a593Smuzhiyun 	TB_EEPROM_IN,
33*4882a593Smuzhiyun 	TB_EEPROM_OUT,
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun  * tb_eeprom_active - enable rom access
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * WARNING: Always disable access after usage. Otherwise the controller will
40*4882a593Smuzhiyun  * fail to reprobe.
41*4882a593Smuzhiyun  */
tb_eeprom_active(struct tb_switch * sw,bool enable)42*4882a593Smuzhiyun static int tb_eeprom_active(struct tb_switch *sw, bool enable)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct tb_eeprom_ctl ctl;
45*4882a593Smuzhiyun 	int res = tb_eeprom_ctl_read(sw, &ctl);
46*4882a593Smuzhiyun 	if (res)
47*4882a593Smuzhiyun 		return res;
48*4882a593Smuzhiyun 	if (enable) {
49*4882a593Smuzhiyun 		ctl.access_high = 1;
50*4882a593Smuzhiyun 		res = tb_eeprom_ctl_write(sw, &ctl);
51*4882a593Smuzhiyun 		if (res)
52*4882a593Smuzhiyun 			return res;
53*4882a593Smuzhiyun 		ctl.access_low = 0;
54*4882a593Smuzhiyun 		return tb_eeprom_ctl_write(sw, &ctl);
55*4882a593Smuzhiyun 	} else {
56*4882a593Smuzhiyun 		ctl.access_low = 1;
57*4882a593Smuzhiyun 		res = tb_eeprom_ctl_write(sw, &ctl);
58*4882a593Smuzhiyun 		if (res)
59*4882a593Smuzhiyun 			return res;
60*4882a593Smuzhiyun 		ctl.access_high = 0;
61*4882a593Smuzhiyun 		return tb_eeprom_ctl_write(sw, &ctl);
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  * tb_eeprom_transfer - transfer one bit
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
69*4882a593Smuzhiyun  * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
70*4882a593Smuzhiyun  */
tb_eeprom_transfer(struct tb_switch * sw,struct tb_eeprom_ctl * ctl,enum tb_eeprom_transfer direction)71*4882a593Smuzhiyun static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
72*4882a593Smuzhiyun 			      enum tb_eeprom_transfer direction)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	int res;
75*4882a593Smuzhiyun 	if (direction == TB_EEPROM_OUT) {
76*4882a593Smuzhiyun 		res = tb_eeprom_ctl_write(sw, ctl);
77*4882a593Smuzhiyun 		if (res)
78*4882a593Smuzhiyun 			return res;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 	ctl->clock = 1;
81*4882a593Smuzhiyun 	res = tb_eeprom_ctl_write(sw, ctl);
82*4882a593Smuzhiyun 	if (res)
83*4882a593Smuzhiyun 		return res;
84*4882a593Smuzhiyun 	if (direction == TB_EEPROM_IN) {
85*4882a593Smuzhiyun 		res = tb_eeprom_ctl_read(sw, ctl);
86*4882a593Smuzhiyun 		if (res)
87*4882a593Smuzhiyun 			return res;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	ctl->clock = 0;
90*4882a593Smuzhiyun 	return tb_eeprom_ctl_write(sw, ctl);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun  * tb_eeprom_out - write one byte to the bus
95*4882a593Smuzhiyun  */
tb_eeprom_out(struct tb_switch * sw,u8 val)96*4882a593Smuzhiyun static int tb_eeprom_out(struct tb_switch *sw, u8 val)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct tb_eeprom_ctl ctl;
99*4882a593Smuzhiyun 	int i;
100*4882a593Smuzhiyun 	int res = tb_eeprom_ctl_read(sw, &ctl);
101*4882a593Smuzhiyun 	if (res)
102*4882a593Smuzhiyun 		return res;
103*4882a593Smuzhiyun 	for (i = 0; i < 8; i++) {
104*4882a593Smuzhiyun 		ctl.data_out = val & 0x80;
105*4882a593Smuzhiyun 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
106*4882a593Smuzhiyun 		if (res)
107*4882a593Smuzhiyun 			return res;
108*4882a593Smuzhiyun 		val <<= 1;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 	return 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun  * tb_eeprom_in - read one byte from the bus
115*4882a593Smuzhiyun  */
tb_eeprom_in(struct tb_switch * sw,u8 * val)116*4882a593Smuzhiyun static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct tb_eeprom_ctl ctl;
119*4882a593Smuzhiyun 	int i;
120*4882a593Smuzhiyun 	int res = tb_eeprom_ctl_read(sw, &ctl);
121*4882a593Smuzhiyun 	if (res)
122*4882a593Smuzhiyun 		return res;
123*4882a593Smuzhiyun 	*val = 0;
124*4882a593Smuzhiyun 	for (i = 0; i < 8; i++) {
125*4882a593Smuzhiyun 		*val <<= 1;
126*4882a593Smuzhiyun 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
127*4882a593Smuzhiyun 		if (res)
128*4882a593Smuzhiyun 			return res;
129*4882a593Smuzhiyun 		*val |= ctl.data_in;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun  * tb_eeprom_get_drom_offset - get drom offset within eeprom
136*4882a593Smuzhiyun  */
tb_eeprom_get_drom_offset(struct tb_switch * sw,u16 * offset)137*4882a593Smuzhiyun static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct tb_cap_plug_events cap;
140*4882a593Smuzhiyun 	int res;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (!sw->cap_plug_events) {
143*4882a593Smuzhiyun 		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
144*4882a593Smuzhiyun 		return -ENODEV;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
147*4882a593Smuzhiyun 			     sizeof(cap) / 4);
148*4882a593Smuzhiyun 	if (res)
149*4882a593Smuzhiyun 		return res;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
152*4882a593Smuzhiyun 		tb_sw_warn(sw, "no NVM\n");
153*4882a593Smuzhiyun 		return -ENODEV;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (cap.drom_offset > 0xffff) {
157*4882a593Smuzhiyun 		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
158*4882a593Smuzhiyun 				cap.drom_offset);
159*4882a593Smuzhiyun 		return -ENXIO;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 	*offset = cap.drom_offset;
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * tb_eeprom_read_n - read count bytes from offset into val
167*4882a593Smuzhiyun  */
tb_eeprom_read_n(struct tb_switch * sw,u16 offset,u8 * val,size_t count)168*4882a593Smuzhiyun static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
169*4882a593Smuzhiyun 		size_t count)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	u16 drom_offset;
172*4882a593Smuzhiyun 	int i, res;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
175*4882a593Smuzhiyun 	if (res)
176*4882a593Smuzhiyun 		return res;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	offset += drom_offset;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	res = tb_eeprom_active(sw, true);
181*4882a593Smuzhiyun 	if (res)
182*4882a593Smuzhiyun 		return res;
183*4882a593Smuzhiyun 	res = tb_eeprom_out(sw, 3);
184*4882a593Smuzhiyun 	if (res)
185*4882a593Smuzhiyun 		return res;
186*4882a593Smuzhiyun 	res = tb_eeprom_out(sw, offset >> 8);
187*4882a593Smuzhiyun 	if (res)
188*4882a593Smuzhiyun 		return res;
189*4882a593Smuzhiyun 	res = tb_eeprom_out(sw, offset);
190*4882a593Smuzhiyun 	if (res)
191*4882a593Smuzhiyun 		return res;
192*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
193*4882a593Smuzhiyun 		res = tb_eeprom_in(sw, val + i);
194*4882a593Smuzhiyun 		if (res)
195*4882a593Smuzhiyun 			return res;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 	return tb_eeprom_active(sw, false);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
tb_crc8(u8 * data,int len)200*4882a593Smuzhiyun static u8 tb_crc8(u8 *data, int len)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	int i, j;
203*4882a593Smuzhiyun 	u8 val = 0xff;
204*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
205*4882a593Smuzhiyun 		val ^= data[i];
206*4882a593Smuzhiyun 		for (j = 0; j < 8; j++)
207*4882a593Smuzhiyun 			val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 	return val;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
tb_crc32(void * data,size_t len)212*4882a593Smuzhiyun static u32 tb_crc32(void *data, size_t len)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return ~__crc32c_le(~0, data, len);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define TB_DROM_DATA_START 13
218*4882a593Smuzhiyun struct tb_drom_header {
219*4882a593Smuzhiyun 	/* BYTE 0 */
220*4882a593Smuzhiyun 	u8 uid_crc8; /* checksum for uid */
221*4882a593Smuzhiyun 	/* BYTES 1-8 */
222*4882a593Smuzhiyun 	u64 uid;
223*4882a593Smuzhiyun 	/* BYTES 9-12 */
224*4882a593Smuzhiyun 	u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
225*4882a593Smuzhiyun 	/* BYTE 13 */
226*4882a593Smuzhiyun 	u8 device_rom_revision; /* should be <= 1 */
227*4882a593Smuzhiyun 	u16 data_len:10;
228*4882a593Smuzhiyun 	u8 __unknown1:6;
229*4882a593Smuzhiyun 	/* BYTES 16-21 */
230*4882a593Smuzhiyun 	u16 vendor_id;
231*4882a593Smuzhiyun 	u16 model_id;
232*4882a593Smuzhiyun 	u8 model_rev;
233*4882a593Smuzhiyun 	u8 eeprom_rev;
234*4882a593Smuzhiyun } __packed;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun enum tb_drom_entry_type {
237*4882a593Smuzhiyun 	/* force unsigned to prevent "one-bit signed bitfield" warning */
238*4882a593Smuzhiyun 	TB_DROM_ENTRY_GENERIC = 0U,
239*4882a593Smuzhiyun 	TB_DROM_ENTRY_PORT,
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun struct tb_drom_entry_header {
243*4882a593Smuzhiyun 	u8 len;
244*4882a593Smuzhiyun 	u8 index:6;
245*4882a593Smuzhiyun 	bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
246*4882a593Smuzhiyun 	enum tb_drom_entry_type type:1;
247*4882a593Smuzhiyun } __packed;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun struct tb_drom_entry_generic {
250*4882a593Smuzhiyun 	struct tb_drom_entry_header header;
251*4882a593Smuzhiyun 	u8 data[];
252*4882a593Smuzhiyun } __packed;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun struct tb_drom_entry_port {
255*4882a593Smuzhiyun 	/* BYTES 0-1 */
256*4882a593Smuzhiyun 	struct tb_drom_entry_header header;
257*4882a593Smuzhiyun 	/* BYTE 2 */
258*4882a593Smuzhiyun 	u8 dual_link_port_rid:4;
259*4882a593Smuzhiyun 	u8 link_nr:1;
260*4882a593Smuzhiyun 	u8 unknown1:2;
261*4882a593Smuzhiyun 	bool has_dual_link_port:1;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* BYTE 3 */
264*4882a593Smuzhiyun 	u8 dual_link_port_nr:6;
265*4882a593Smuzhiyun 	u8 unknown2:2;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* BYTES 4 - 5 TODO decode */
268*4882a593Smuzhiyun 	u8 micro2:4;
269*4882a593Smuzhiyun 	u8 micro1:4;
270*4882a593Smuzhiyun 	u8 micro3;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
273*4882a593Smuzhiyun 	u8 peer_port_rid:4;
274*4882a593Smuzhiyun 	u8 unknown3:3;
275*4882a593Smuzhiyun 	bool has_peer_port:1;
276*4882a593Smuzhiyun 	u8 peer_port_nr:6;
277*4882a593Smuzhiyun 	u8 unknown4:2;
278*4882a593Smuzhiyun } __packed;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * tb_drom_read_uid_only - read uid directly from drom
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * Does not use the cached copy in sw->drom. Used during resume to check switch
285*4882a593Smuzhiyun  * identity.
286*4882a593Smuzhiyun  */
tb_drom_read_uid_only(struct tb_switch * sw,u64 * uid)287*4882a593Smuzhiyun int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	u8 data[9];
290*4882a593Smuzhiyun 	u8 crc;
291*4882a593Smuzhiyun 	int res;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* read uid */
294*4882a593Smuzhiyun 	res = tb_eeprom_read_n(sw, 0, data, 9);
295*4882a593Smuzhiyun 	if (res)
296*4882a593Smuzhiyun 		return res;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	crc = tb_crc8(data + 1, 8);
299*4882a593Smuzhiyun 	if (crc != data[0]) {
300*4882a593Smuzhiyun 		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
301*4882a593Smuzhiyun 				data[0], crc);
302*4882a593Smuzhiyun 		return -EIO;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	*uid = *(u64 *)(data+1);
306*4882a593Smuzhiyun 	return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
tb_drom_parse_entry_generic(struct tb_switch * sw,struct tb_drom_entry_header * header)309*4882a593Smuzhiyun static int tb_drom_parse_entry_generic(struct tb_switch *sw,
310*4882a593Smuzhiyun 		struct tb_drom_entry_header *header)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	const struct tb_drom_entry_generic *entry =
313*4882a593Smuzhiyun 		(const struct tb_drom_entry_generic *)header;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	switch (header->index) {
316*4882a593Smuzhiyun 	case 1:
317*4882a593Smuzhiyun 		/* Length includes 2 bytes header so remove it before copy */
318*4882a593Smuzhiyun 		sw->vendor_name = kstrndup(entry->data,
319*4882a593Smuzhiyun 			header->len - sizeof(*header), GFP_KERNEL);
320*4882a593Smuzhiyun 		if (!sw->vendor_name)
321*4882a593Smuzhiyun 			return -ENOMEM;
322*4882a593Smuzhiyun 		break;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	case 2:
325*4882a593Smuzhiyun 		sw->device_name = kstrndup(entry->data,
326*4882a593Smuzhiyun 			header->len - sizeof(*header), GFP_KERNEL);
327*4882a593Smuzhiyun 		if (!sw->device_name)
328*4882a593Smuzhiyun 			return -ENOMEM;
329*4882a593Smuzhiyun 		break;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
tb_drom_parse_entry_port(struct tb_switch * sw,struct tb_drom_entry_header * header)335*4882a593Smuzhiyun static int tb_drom_parse_entry_port(struct tb_switch *sw,
336*4882a593Smuzhiyun 				    struct tb_drom_entry_header *header)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct tb_port *port;
339*4882a593Smuzhiyun 	int res;
340*4882a593Smuzhiyun 	enum tb_port_type type;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/*
343*4882a593Smuzhiyun 	 * Some DROMs list more ports than the controller actually has
344*4882a593Smuzhiyun 	 * so we skip those but allow the parser to continue.
345*4882a593Smuzhiyun 	 */
346*4882a593Smuzhiyun 	if (header->index > sw->config.max_port_number) {
347*4882a593Smuzhiyun 		dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
348*4882a593Smuzhiyun 		return 0;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	port = &sw->ports[header->index];
352*4882a593Smuzhiyun 	port->disabled = header->port_disabled;
353*4882a593Smuzhiyun 	if (port->disabled)
354*4882a593Smuzhiyun 		return 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
357*4882a593Smuzhiyun 	if (res)
358*4882a593Smuzhiyun 		return res;
359*4882a593Smuzhiyun 	type &= 0xffffff;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (type == TB_TYPE_PORT) {
362*4882a593Smuzhiyun 		struct tb_drom_entry_port *entry = (void *) header;
363*4882a593Smuzhiyun 		if (header->len != sizeof(*entry)) {
364*4882a593Smuzhiyun 			tb_sw_warn(sw,
365*4882a593Smuzhiyun 				"port entry has size %#x (expected %#zx)\n",
366*4882a593Smuzhiyun 				header->len, sizeof(struct tb_drom_entry_port));
367*4882a593Smuzhiyun 			return -EIO;
368*4882a593Smuzhiyun 		}
369*4882a593Smuzhiyun 		port->link_nr = entry->link_nr;
370*4882a593Smuzhiyun 		if (entry->has_dual_link_port)
371*4882a593Smuzhiyun 			port->dual_link_port =
372*4882a593Smuzhiyun 				&port->sw->ports[entry->dual_link_port_nr];
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	return 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun  * tb_drom_parse_entries - parse the linked list of drom entries
379*4882a593Smuzhiyun  *
380*4882a593Smuzhiyun  * Drom must have been copied to sw->drom.
381*4882a593Smuzhiyun  */
tb_drom_parse_entries(struct tb_switch * sw)382*4882a593Smuzhiyun static int tb_drom_parse_entries(struct tb_switch *sw)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct tb_drom_header *header = (void *) sw->drom;
385*4882a593Smuzhiyun 	u16 pos = sizeof(*header);
386*4882a593Smuzhiyun 	u16 drom_size = header->data_len + TB_DROM_DATA_START;
387*4882a593Smuzhiyun 	int res;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	while (pos < drom_size) {
390*4882a593Smuzhiyun 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
391*4882a593Smuzhiyun 		if (pos + 1 == drom_size || pos + entry->len > drom_size
392*4882a593Smuzhiyun 				|| !entry->len) {
393*4882a593Smuzhiyun 			tb_sw_warn(sw, "DROM buffer overrun\n");
394*4882a593Smuzhiyun 			return -EILSEQ;
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		switch (entry->type) {
398*4882a593Smuzhiyun 		case TB_DROM_ENTRY_GENERIC:
399*4882a593Smuzhiyun 			res = tb_drom_parse_entry_generic(sw, entry);
400*4882a593Smuzhiyun 			break;
401*4882a593Smuzhiyun 		case TB_DROM_ENTRY_PORT:
402*4882a593Smuzhiyun 			res = tb_drom_parse_entry_port(sw, entry);
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		}
405*4882a593Smuzhiyun 		if (res)
406*4882a593Smuzhiyun 			return res;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		pos += entry->len;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	return 0;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun  * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
415*4882a593Smuzhiyun  */
tb_drom_copy_efi(struct tb_switch * sw,u16 * size)416*4882a593Smuzhiyun static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct device *dev = &sw->tb->nhi->pdev->dev;
419*4882a593Smuzhiyun 	int len, res;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	len = device_property_count_u8(dev, "ThunderboltDROM");
422*4882a593Smuzhiyun 	if (len < 0 || len < sizeof(struct tb_drom_header))
423*4882a593Smuzhiyun 		return -EINVAL;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	sw->drom = kmalloc(len, GFP_KERNEL);
426*4882a593Smuzhiyun 	if (!sw->drom)
427*4882a593Smuzhiyun 		return -ENOMEM;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
430*4882a593Smuzhiyun 									len);
431*4882a593Smuzhiyun 	if (res)
432*4882a593Smuzhiyun 		goto err;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	*size = ((struct tb_drom_header *)sw->drom)->data_len +
435*4882a593Smuzhiyun 							  TB_DROM_DATA_START;
436*4882a593Smuzhiyun 	if (*size > len)
437*4882a593Smuzhiyun 		goto err;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun err:
442*4882a593Smuzhiyun 	kfree(sw->drom);
443*4882a593Smuzhiyun 	sw->drom = NULL;
444*4882a593Smuzhiyun 	return -EINVAL;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
tb_drom_copy_nvm(struct tb_switch * sw,u16 * size)447*4882a593Smuzhiyun static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	u32 drom_offset;
450*4882a593Smuzhiyun 	int ret;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (!sw->dma_port)
453*4882a593Smuzhiyun 		return -ENODEV;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
456*4882a593Smuzhiyun 			 sw->cap_plug_events + 12, 1);
457*4882a593Smuzhiyun 	if (ret)
458*4882a593Smuzhiyun 		return ret;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (!drom_offset)
461*4882a593Smuzhiyun 		return -ENODEV;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
464*4882a593Smuzhiyun 				  sizeof(*size));
465*4882a593Smuzhiyun 	if (ret)
466*4882a593Smuzhiyun 		return ret;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* Size includes CRC8 + UID + CRC32 */
469*4882a593Smuzhiyun 	*size += 1 + 8 + 4;
470*4882a593Smuzhiyun 	sw->drom = kzalloc(*size, GFP_KERNEL);
471*4882a593Smuzhiyun 	if (!sw->drom)
472*4882a593Smuzhiyun 		return -ENOMEM;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
475*4882a593Smuzhiyun 	if (ret)
476*4882a593Smuzhiyun 		goto err_free;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/*
479*4882a593Smuzhiyun 	 * Read UID from the minimal DROM because the one in NVM is just
480*4882a593Smuzhiyun 	 * a placeholder.
481*4882a593Smuzhiyun 	 */
482*4882a593Smuzhiyun 	tb_drom_read_uid_only(sw, &sw->uid);
483*4882a593Smuzhiyun 	return 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun err_free:
486*4882a593Smuzhiyun 	kfree(sw->drom);
487*4882a593Smuzhiyun 	sw->drom = NULL;
488*4882a593Smuzhiyun 	return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
usb4_copy_host_drom(struct tb_switch * sw,u16 * size)491*4882a593Smuzhiyun static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	int ret;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
496*4882a593Smuzhiyun 	if (ret)
497*4882a593Smuzhiyun 		return ret;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* Size includes CRC8 + UID + CRC32 */
500*4882a593Smuzhiyun 	*size += 1 + 8 + 4;
501*4882a593Smuzhiyun 	sw->drom = kzalloc(*size, GFP_KERNEL);
502*4882a593Smuzhiyun 	if (!sw->drom)
503*4882a593Smuzhiyun 		return -ENOMEM;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
506*4882a593Smuzhiyun 	if (ret) {
507*4882a593Smuzhiyun 		kfree(sw->drom);
508*4882a593Smuzhiyun 		sw->drom = NULL;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return ret;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
tb_drom_read_n(struct tb_switch * sw,u16 offset,u8 * val,size_t count)514*4882a593Smuzhiyun static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
515*4882a593Smuzhiyun 			  size_t count)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	if (tb_switch_is_usb4(sw))
518*4882a593Smuzhiyun 		return usb4_switch_drom_read(sw, offset, val, count);
519*4882a593Smuzhiyun 	return tb_eeprom_read_n(sw, offset, val, count);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun  * tb_drom_read - copy drom to sw->drom and parse it
524*4882a593Smuzhiyun  */
tb_drom_read(struct tb_switch * sw)525*4882a593Smuzhiyun int tb_drom_read(struct tb_switch *sw)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	u16 size;
528*4882a593Smuzhiyun 	u32 crc;
529*4882a593Smuzhiyun 	struct tb_drom_header *header;
530*4882a593Smuzhiyun 	int res, retries = 1;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (sw->drom)
533*4882a593Smuzhiyun 		return 0;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (tb_route(sw) == 0) {
536*4882a593Smuzhiyun 		/*
537*4882a593Smuzhiyun 		 * Apple's NHI EFI driver supplies a DROM for the root switch
538*4882a593Smuzhiyun 		 * in a device property. Use it if available.
539*4882a593Smuzhiyun 		 */
540*4882a593Smuzhiyun 		if (tb_drom_copy_efi(sw, &size) == 0)
541*4882a593Smuzhiyun 			goto parse;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		/* Non-Apple hardware has the DROM as part of NVM */
544*4882a593Smuzhiyun 		if (tb_drom_copy_nvm(sw, &size) == 0)
545*4882a593Smuzhiyun 			goto parse;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		/*
548*4882a593Smuzhiyun 		 * USB4 hosts may support reading DROM through router
549*4882a593Smuzhiyun 		 * operations.
550*4882a593Smuzhiyun 		 */
551*4882a593Smuzhiyun 		if (tb_switch_is_usb4(sw)) {
552*4882a593Smuzhiyun 			usb4_switch_read_uid(sw, &sw->uid);
553*4882a593Smuzhiyun 			if (!usb4_copy_host_drom(sw, &size))
554*4882a593Smuzhiyun 				goto parse;
555*4882a593Smuzhiyun 		} else {
556*4882a593Smuzhiyun 			/*
557*4882a593Smuzhiyun 			 * The root switch contains only a dummy drom
558*4882a593Smuzhiyun 			 * (header only, no entries). Hardcode the
559*4882a593Smuzhiyun 			 * configuration here.
560*4882a593Smuzhiyun 			 */
561*4882a593Smuzhiyun 			tb_drom_read_uid_only(sw, &sw->uid);
562*4882a593Smuzhiyun 		}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		return 0;
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
568*4882a593Smuzhiyun 	if (res)
569*4882a593Smuzhiyun 		return res;
570*4882a593Smuzhiyun 	size &= 0x3ff;
571*4882a593Smuzhiyun 	size += TB_DROM_DATA_START;
572*4882a593Smuzhiyun 	tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
573*4882a593Smuzhiyun 	if (size < sizeof(*header)) {
574*4882a593Smuzhiyun 		tb_sw_warn(sw, "drom too small, aborting\n");
575*4882a593Smuzhiyun 		return -EIO;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	sw->drom = kzalloc(size, GFP_KERNEL);
579*4882a593Smuzhiyun 	if (!sw->drom)
580*4882a593Smuzhiyun 		return -ENOMEM;
581*4882a593Smuzhiyun 	res = tb_drom_read_n(sw, 0, sw->drom, size);
582*4882a593Smuzhiyun 	if (res)
583*4882a593Smuzhiyun 		goto err;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun parse:
586*4882a593Smuzhiyun 	header = (void *) sw->drom;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (header->data_len + TB_DROM_DATA_START != size) {
589*4882a593Smuzhiyun 		tb_sw_warn(sw, "drom size mismatch, aborting\n");
590*4882a593Smuzhiyun 		goto err;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	crc = tb_crc8((u8 *) &header->uid, 8);
594*4882a593Smuzhiyun 	if (crc != header->uid_crc8) {
595*4882a593Smuzhiyun 		tb_sw_warn(sw,
596*4882a593Smuzhiyun 			"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
597*4882a593Smuzhiyun 			header->uid_crc8, crc);
598*4882a593Smuzhiyun 		goto err;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun 	if (!sw->uid)
601*4882a593Smuzhiyun 		sw->uid = header->uid;
602*4882a593Smuzhiyun 	sw->vendor = header->vendor_id;
603*4882a593Smuzhiyun 	sw->device = header->model_id;
604*4882a593Smuzhiyun 	tb_check_quirks(sw);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
607*4882a593Smuzhiyun 	if (crc != header->data_crc32) {
608*4882a593Smuzhiyun 		tb_sw_warn(sw,
609*4882a593Smuzhiyun 			"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
610*4882a593Smuzhiyun 			header->data_crc32, crc);
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (header->device_rom_revision > 2)
614*4882a593Smuzhiyun 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
615*4882a593Smuzhiyun 			header->device_rom_revision);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	res = tb_drom_parse_entries(sw);
618*4882a593Smuzhiyun 	/* If the DROM parsing fails, wait a moment and retry once */
619*4882a593Smuzhiyun 	if (res == -EILSEQ && retries--) {
620*4882a593Smuzhiyun 		tb_sw_warn(sw, "parsing DROM failed, retrying\n");
621*4882a593Smuzhiyun 		msleep(100);
622*4882a593Smuzhiyun 		res = tb_drom_read_n(sw, 0, sw->drom, size);
623*4882a593Smuzhiyun 		if (!res)
624*4882a593Smuzhiyun 			goto parse;
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return res;
628*4882a593Smuzhiyun err:
629*4882a593Smuzhiyun 	kfree(sw->drom);
630*4882a593Smuzhiyun 	sw->drom = NULL;
631*4882a593Smuzhiyun 	return -EIO;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun }
634