xref: /OK3568_Linux_fs/kernel/drivers/bluetooth/btintel.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *  Bluetooth support for Intel devices
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  Copyright (C) 2015  Intel Corporation
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/firmware.h>
11*4882a593Smuzhiyun #include <linux/regmap.h>
12*4882a593Smuzhiyun #include <asm/unaligned.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <net/bluetooth/bluetooth.h>
15*4882a593Smuzhiyun #include <net/bluetooth/hci_core.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "btintel.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define VERSION "0.1"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define BDADDR_INTEL		(&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
22*4882a593Smuzhiyun #define RSA_HEADER_LEN		644
23*4882a593Smuzhiyun #define CSS_HEADER_OFFSET	8
24*4882a593Smuzhiyun #define ECDSA_OFFSET		644
25*4882a593Smuzhiyun #define ECDSA_HEADER_LEN	320
26*4882a593Smuzhiyun 
btintel_check_bdaddr(struct hci_dev * hdev)27*4882a593Smuzhiyun int btintel_check_bdaddr(struct hci_dev *hdev)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct hci_rp_read_bd_addr *bda;
30*4882a593Smuzhiyun 	struct sk_buff *skb;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
33*4882a593Smuzhiyun 			     HCI_INIT_TIMEOUT);
34*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
35*4882a593Smuzhiyun 		int err = PTR_ERR(skb);
36*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading Intel device address failed (%d)",
37*4882a593Smuzhiyun 			   err);
38*4882a593Smuzhiyun 		return err;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (skb->len != sizeof(*bda)) {
42*4882a593Smuzhiyun 		bt_dev_err(hdev, "Intel device address length mismatch");
43*4882a593Smuzhiyun 		kfree_skb(skb);
44*4882a593Smuzhiyun 		return -EIO;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	bda = (struct hci_rp_read_bd_addr *)skb->data;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* For some Intel based controllers, the default Bluetooth device
50*4882a593Smuzhiyun 	 * address 00:03:19:9E:8B:00 can be found. These controllers are
51*4882a593Smuzhiyun 	 * fully operational, but have the danger of duplicate addresses
52*4882a593Smuzhiyun 	 * and that in turn can cause problems with Bluetooth operation.
53*4882a593Smuzhiyun 	 */
54*4882a593Smuzhiyun 	if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
55*4882a593Smuzhiyun 		bt_dev_err(hdev, "Found Intel default device address (%pMR)",
56*4882a593Smuzhiyun 			   &bda->bdaddr);
57*4882a593Smuzhiyun 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	kfree_skb(skb);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
65*4882a593Smuzhiyun 
btintel_enter_mfg(struct hci_dev * hdev)66*4882a593Smuzhiyun int btintel_enter_mfg(struct hci_dev *hdev)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	static const u8 param[] = { 0x01, 0x00 };
69*4882a593Smuzhiyun 	struct sk_buff *skb;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
72*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
73*4882a593Smuzhiyun 		bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)",
74*4882a593Smuzhiyun 			   PTR_ERR(skb));
75*4882a593Smuzhiyun 		return PTR_ERR(skb);
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	kfree_skb(skb);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_enter_mfg);
82*4882a593Smuzhiyun 
btintel_exit_mfg(struct hci_dev * hdev,bool reset,bool patched)83*4882a593Smuzhiyun int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	u8 param[] = { 0x00, 0x00 };
86*4882a593Smuzhiyun 	struct sk_buff *skb;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* The 2nd command parameter specifies the manufacturing exit method:
89*4882a593Smuzhiyun 	 * 0x00: Just disable the manufacturing mode (0x00).
90*4882a593Smuzhiyun 	 * 0x01: Disable manufacturing mode and reset with patches deactivated.
91*4882a593Smuzhiyun 	 * 0x02: Disable manufacturing mode and reset with patches activated.
92*4882a593Smuzhiyun 	 */
93*4882a593Smuzhiyun 	if (reset)
94*4882a593Smuzhiyun 		param[1] |= patched ? 0x02 : 0x01;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
97*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
98*4882a593Smuzhiyun 		bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)",
99*4882a593Smuzhiyun 			   PTR_ERR(skb));
100*4882a593Smuzhiyun 		return PTR_ERR(skb);
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 	kfree_skb(skb);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return 0;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_exit_mfg);
107*4882a593Smuzhiyun 
btintel_set_bdaddr(struct hci_dev * hdev,const bdaddr_t * bdaddr)108*4882a593Smuzhiyun int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct sk_buff *skb;
111*4882a593Smuzhiyun 	int err;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
114*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
115*4882a593Smuzhiyun 		err = PTR_ERR(skb);
116*4882a593Smuzhiyun 		bt_dev_err(hdev, "Changing Intel device address failed (%d)",
117*4882a593Smuzhiyun 			   err);
118*4882a593Smuzhiyun 		return err;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	kfree_skb(skb);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
125*4882a593Smuzhiyun 
btintel_set_diag(struct hci_dev * hdev,bool enable)126*4882a593Smuzhiyun int btintel_set_diag(struct hci_dev *hdev, bool enable)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct sk_buff *skb;
129*4882a593Smuzhiyun 	u8 param[3];
130*4882a593Smuzhiyun 	int err;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (enable) {
133*4882a593Smuzhiyun 		param[0] = 0x03;
134*4882a593Smuzhiyun 		param[1] = 0x03;
135*4882a593Smuzhiyun 		param[2] = 0x03;
136*4882a593Smuzhiyun 	} else {
137*4882a593Smuzhiyun 		param[0] = 0x00;
138*4882a593Smuzhiyun 		param[1] = 0x00;
139*4882a593Smuzhiyun 		param[2] = 0x00;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
143*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
144*4882a593Smuzhiyun 		err = PTR_ERR(skb);
145*4882a593Smuzhiyun 		if (err == -ENODATA)
146*4882a593Smuzhiyun 			goto done;
147*4882a593Smuzhiyun 		bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)",
148*4882a593Smuzhiyun 			   err);
149*4882a593Smuzhiyun 		return err;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 	kfree_skb(skb);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun done:
154*4882a593Smuzhiyun 	btintel_set_event_mask(hdev, enable);
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_diag);
158*4882a593Smuzhiyun 
btintel_set_diag_mfg(struct hci_dev * hdev,bool enable)159*4882a593Smuzhiyun int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	int err, ret;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	err = btintel_enter_mfg(hdev);
164*4882a593Smuzhiyun 	if (err)
165*4882a593Smuzhiyun 		return err;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	ret = btintel_set_diag(hdev, enable);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	err = btintel_exit_mfg(hdev, false, false);
170*4882a593Smuzhiyun 	if (err)
171*4882a593Smuzhiyun 		return err;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return ret;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
176*4882a593Smuzhiyun 
btintel_hw_error(struct hci_dev * hdev,u8 code)177*4882a593Smuzhiyun void btintel_hw_error(struct hci_dev *hdev, u8 code)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct sk_buff *skb;
180*4882a593Smuzhiyun 	u8 type = 0x00;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	bt_dev_err(hdev, "Hardware error 0x%2.2x", code);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
185*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
186*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reset after hardware error failed (%ld)",
187*4882a593Smuzhiyun 			   PTR_ERR(skb));
188*4882a593Smuzhiyun 		return;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 	kfree_skb(skb);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
193*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
194*4882a593Smuzhiyun 		bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)",
195*4882a593Smuzhiyun 			   PTR_ERR(skb));
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (skb->len != 13) {
200*4882a593Smuzhiyun 		bt_dev_err(hdev, "Exception info size mismatch");
201*4882a593Smuzhiyun 		kfree_skb(skb);
202*4882a593Smuzhiyun 		return;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	kfree_skb(skb);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_hw_error);
210*4882a593Smuzhiyun 
btintel_version_info(struct hci_dev * hdev,struct intel_version * ver)211*4882a593Smuzhiyun void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	const char *variant;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	switch (ver->fw_variant) {
216*4882a593Smuzhiyun 	case 0x06:
217*4882a593Smuzhiyun 		variant = "Bootloader";
218*4882a593Smuzhiyun 		break;
219*4882a593Smuzhiyun 	case 0x23:
220*4882a593Smuzhiyun 		variant = "Firmware";
221*4882a593Smuzhiyun 		break;
222*4882a593Smuzhiyun 	default:
223*4882a593Smuzhiyun 		return;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
227*4882a593Smuzhiyun 		    variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
228*4882a593Smuzhiyun 		    ver->fw_build_num, ver->fw_build_ww,
229*4882a593Smuzhiyun 		    2000 + ver->fw_build_yy);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_version_info);
232*4882a593Smuzhiyun 
btintel_secure_send(struct hci_dev * hdev,u8 fragment_type,u32 plen,const void * param)233*4882a593Smuzhiyun int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
234*4882a593Smuzhiyun 			const void *param)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	while (plen > 0) {
237*4882a593Smuzhiyun 		struct sk_buff *skb;
238*4882a593Smuzhiyun 		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		cmd_param[0] = fragment_type;
241*4882a593Smuzhiyun 		memcpy(cmd_param + 1, param, fragment_len);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
244*4882a593Smuzhiyun 				     cmd_param, HCI_INIT_TIMEOUT);
245*4882a593Smuzhiyun 		if (IS_ERR(skb))
246*4882a593Smuzhiyun 			return PTR_ERR(skb);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		kfree_skb(skb);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		plen -= fragment_len;
251*4882a593Smuzhiyun 		param += fragment_len;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_secure_send);
257*4882a593Smuzhiyun 
btintel_load_ddc_config(struct hci_dev * hdev,const char * ddc_name)258*4882a593Smuzhiyun int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	const struct firmware *fw;
261*4882a593Smuzhiyun 	struct sk_buff *skb;
262*4882a593Smuzhiyun 	const u8 *fw_ptr;
263*4882a593Smuzhiyun 	int err;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	err = request_firmware_direct(&fw, ddc_name, &hdev->dev);
266*4882a593Smuzhiyun 	if (err < 0) {
267*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)",
268*4882a593Smuzhiyun 			   ddc_name, err);
269*4882a593Smuzhiyun 		return err;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	fw_ptr = fw->data;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* DDC file contains one or more DDC structure which has
277*4882a593Smuzhiyun 	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	while (fw->size > fw_ptr - fw->data) {
280*4882a593Smuzhiyun 		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
283*4882a593Smuzhiyun 				     HCI_INIT_TIMEOUT);
284*4882a593Smuzhiyun 		if (IS_ERR(skb)) {
285*4882a593Smuzhiyun 			bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)",
286*4882a593Smuzhiyun 				   PTR_ERR(skb));
287*4882a593Smuzhiyun 			release_firmware(fw);
288*4882a593Smuzhiyun 			return PTR_ERR(skb);
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		fw_ptr += cmd_plen;
292*4882a593Smuzhiyun 		kfree_skb(skb);
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	release_firmware(fw);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	bt_dev_info(hdev, "Applying Intel DDC parameters completed");
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return 0;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
302*4882a593Smuzhiyun 
btintel_set_event_mask(struct hci_dev * hdev,bool debug)303*4882a593Smuzhiyun int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
306*4882a593Smuzhiyun 	struct sk_buff *skb;
307*4882a593Smuzhiyun 	int err;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (debug)
310*4882a593Smuzhiyun 		mask[1] |= 0x62;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
313*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
314*4882a593Smuzhiyun 		err = PTR_ERR(skb);
315*4882a593Smuzhiyun 		bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err);
316*4882a593Smuzhiyun 		return err;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	kfree_skb(skb);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_event_mask);
323*4882a593Smuzhiyun 
btintel_set_event_mask_mfg(struct hci_dev * hdev,bool debug)324*4882a593Smuzhiyun int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	int err, ret;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	err = btintel_enter_mfg(hdev);
329*4882a593Smuzhiyun 	if (err)
330*4882a593Smuzhiyun 		return err;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	ret = btintel_set_event_mask(hdev, debug);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	err = btintel_exit_mfg(hdev, false, false);
335*4882a593Smuzhiyun 	if (err)
336*4882a593Smuzhiyun 		return err;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return ret;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
341*4882a593Smuzhiyun 
btintel_read_version(struct hci_dev * hdev,struct intel_version * ver)342*4882a593Smuzhiyun int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct sk_buff *skb;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
347*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
348*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
349*4882a593Smuzhiyun 			   PTR_ERR(skb));
350*4882a593Smuzhiyun 		return PTR_ERR(skb);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (skb->len != sizeof(*ver)) {
354*4882a593Smuzhiyun 		bt_dev_err(hdev, "Intel version event size mismatch");
355*4882a593Smuzhiyun 		kfree_skb(skb);
356*4882a593Smuzhiyun 		return -EILSEQ;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	memcpy(ver, skb->data, sizeof(*ver));
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	kfree_skb(skb);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_read_version);
366*4882a593Smuzhiyun 
btintel_version_info_tlv(struct hci_dev * hdev,struct intel_version_tlv * version)367*4882a593Smuzhiyun void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	const char *variant;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	switch (version->img_type) {
372*4882a593Smuzhiyun 	case 0x01:
373*4882a593Smuzhiyun 		variant = "Bootloader";
374*4882a593Smuzhiyun 		bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
375*4882a593Smuzhiyun 		bt_dev_info(hdev, "Secure boot is %s",
376*4882a593Smuzhiyun 			    version->secure_boot ? "enabled" : "disabled");
377*4882a593Smuzhiyun 		bt_dev_info(hdev, "OTP lock is %s",
378*4882a593Smuzhiyun 			    version->otp_lock ? "enabled" : "disabled");
379*4882a593Smuzhiyun 		bt_dev_info(hdev, "API lock is %s",
380*4882a593Smuzhiyun 			    version->api_lock ? "enabled" : "disabled");
381*4882a593Smuzhiyun 		bt_dev_info(hdev, "Debug lock is %s",
382*4882a593Smuzhiyun 			    version->debug_lock ? "enabled" : "disabled");
383*4882a593Smuzhiyun 		bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
384*4882a593Smuzhiyun 			    version->min_fw_build_nn, version->min_fw_build_cw,
385*4882a593Smuzhiyun 			    2000 + version->min_fw_build_yy);
386*4882a593Smuzhiyun 		break;
387*4882a593Smuzhiyun 	case 0x03:
388*4882a593Smuzhiyun 		variant = "Firmware";
389*4882a593Smuzhiyun 		break;
390*4882a593Smuzhiyun 	default:
391*4882a593Smuzhiyun 		bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
392*4882a593Smuzhiyun 		goto done;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
396*4882a593Smuzhiyun 		    2000 + (version->timestamp >> 8), version->timestamp & 0xff,
397*4882a593Smuzhiyun 		    version->build_type, version->build_num);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun done:
400*4882a593Smuzhiyun 	return;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
403*4882a593Smuzhiyun 
btintel_read_version_tlv(struct hci_dev * hdev,struct intel_version_tlv * version)404*4882a593Smuzhiyun int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct sk_buff *skb;
407*4882a593Smuzhiyun 	const u8 param[1] = { 0xFF };
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (!version)
410*4882a593Smuzhiyun 		return -EINVAL;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
413*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
414*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
415*4882a593Smuzhiyun 			   PTR_ERR(skb));
416*4882a593Smuzhiyun 		return PTR_ERR(skb);
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (skb->data[0]) {
420*4882a593Smuzhiyun 		bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
421*4882a593Smuzhiyun 			   skb->data[0]);
422*4882a593Smuzhiyun 		kfree_skb(skb);
423*4882a593Smuzhiyun 		return -EIO;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* Consume Command Complete Status field */
427*4882a593Smuzhiyun 	skb_pull(skb, 1);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* Event parameters contatin multiple TLVs. Read each of them
430*4882a593Smuzhiyun 	 * and only keep the required data. Also, it use existing legacy
431*4882a593Smuzhiyun 	 * version field like hw_platform, hw_variant, and fw_variant
432*4882a593Smuzhiyun 	 * to keep the existing setup flow
433*4882a593Smuzhiyun 	 */
434*4882a593Smuzhiyun 	while (skb->len) {
435*4882a593Smuzhiyun 		struct intel_tlv *tlv;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 		tlv = (struct intel_tlv *)skb->data;
438*4882a593Smuzhiyun 		switch (tlv->type) {
439*4882a593Smuzhiyun 		case INTEL_TLV_CNVI_TOP:
440*4882a593Smuzhiyun 			version->cnvi_top = get_unaligned_le32(tlv->val);
441*4882a593Smuzhiyun 			break;
442*4882a593Smuzhiyun 		case INTEL_TLV_CNVR_TOP:
443*4882a593Smuzhiyun 			version->cnvr_top = get_unaligned_le32(tlv->val);
444*4882a593Smuzhiyun 			break;
445*4882a593Smuzhiyun 		case INTEL_TLV_CNVI_BT:
446*4882a593Smuzhiyun 			version->cnvi_bt = get_unaligned_le32(tlv->val);
447*4882a593Smuzhiyun 			break;
448*4882a593Smuzhiyun 		case INTEL_TLV_CNVR_BT:
449*4882a593Smuzhiyun 			version->cnvr_bt = get_unaligned_le32(tlv->val);
450*4882a593Smuzhiyun 			break;
451*4882a593Smuzhiyun 		case INTEL_TLV_DEV_REV_ID:
452*4882a593Smuzhiyun 			version->dev_rev_id = get_unaligned_le16(tlv->val);
453*4882a593Smuzhiyun 			break;
454*4882a593Smuzhiyun 		case INTEL_TLV_IMAGE_TYPE:
455*4882a593Smuzhiyun 			version->img_type = tlv->val[0];
456*4882a593Smuzhiyun 			break;
457*4882a593Smuzhiyun 		case INTEL_TLV_TIME_STAMP:
458*4882a593Smuzhiyun 			version->timestamp = get_unaligned_le16(tlv->val);
459*4882a593Smuzhiyun 			break;
460*4882a593Smuzhiyun 		case INTEL_TLV_BUILD_TYPE:
461*4882a593Smuzhiyun 			version->build_type = tlv->val[0];
462*4882a593Smuzhiyun 			break;
463*4882a593Smuzhiyun 		case INTEL_TLV_BUILD_NUM:
464*4882a593Smuzhiyun 			version->build_num = get_unaligned_le32(tlv->val);
465*4882a593Smuzhiyun 			break;
466*4882a593Smuzhiyun 		case INTEL_TLV_SECURE_BOOT:
467*4882a593Smuzhiyun 			version->secure_boot = tlv->val[0];
468*4882a593Smuzhiyun 			break;
469*4882a593Smuzhiyun 		case INTEL_TLV_OTP_LOCK:
470*4882a593Smuzhiyun 			version->otp_lock = tlv->val[0];
471*4882a593Smuzhiyun 			break;
472*4882a593Smuzhiyun 		case INTEL_TLV_API_LOCK:
473*4882a593Smuzhiyun 			version->api_lock = tlv->val[0];
474*4882a593Smuzhiyun 			break;
475*4882a593Smuzhiyun 		case INTEL_TLV_DEBUG_LOCK:
476*4882a593Smuzhiyun 			version->debug_lock = tlv->val[0];
477*4882a593Smuzhiyun 			break;
478*4882a593Smuzhiyun 		case INTEL_TLV_MIN_FW:
479*4882a593Smuzhiyun 			version->min_fw_build_nn = tlv->val[0];
480*4882a593Smuzhiyun 			version->min_fw_build_cw = tlv->val[1];
481*4882a593Smuzhiyun 			version->min_fw_build_yy = tlv->val[2];
482*4882a593Smuzhiyun 			break;
483*4882a593Smuzhiyun 		case INTEL_TLV_LIMITED_CCE:
484*4882a593Smuzhiyun 			version->limited_cce = tlv->val[0];
485*4882a593Smuzhiyun 			break;
486*4882a593Smuzhiyun 		case INTEL_TLV_SBE_TYPE:
487*4882a593Smuzhiyun 			version->sbe_type = tlv->val[0];
488*4882a593Smuzhiyun 			break;
489*4882a593Smuzhiyun 		case INTEL_TLV_OTP_BDADDR:
490*4882a593Smuzhiyun 			memcpy(&version->otp_bd_addr, tlv->val, tlv->len);
491*4882a593Smuzhiyun 			break;
492*4882a593Smuzhiyun 		default:
493*4882a593Smuzhiyun 			/* Ignore rest of information */
494*4882a593Smuzhiyun 			break;
495*4882a593Smuzhiyun 		}
496*4882a593Smuzhiyun 		/* consume the current tlv and move to next*/
497*4882a593Smuzhiyun 		skb_pull(skb, tlv->len + sizeof(*tlv));
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	kfree_skb(skb);
501*4882a593Smuzhiyun 	return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_read_version_tlv);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /* ------- REGMAP IBT SUPPORT ------- */
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun #define IBT_REG_MODE_8BIT  0x00
508*4882a593Smuzhiyun #define IBT_REG_MODE_16BIT 0x01
509*4882a593Smuzhiyun #define IBT_REG_MODE_32BIT 0x02
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun struct regmap_ibt_context {
512*4882a593Smuzhiyun 	struct hci_dev *hdev;
513*4882a593Smuzhiyun 	__u16 op_write;
514*4882a593Smuzhiyun 	__u16 op_read;
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun struct ibt_cp_reg_access {
518*4882a593Smuzhiyun 	__le32  addr;
519*4882a593Smuzhiyun 	__u8    mode;
520*4882a593Smuzhiyun 	__u8    len;
521*4882a593Smuzhiyun 	__u8    data[];
522*4882a593Smuzhiyun } __packed;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun struct ibt_rp_reg_access {
525*4882a593Smuzhiyun 	__u8    status;
526*4882a593Smuzhiyun 	__le32  addr;
527*4882a593Smuzhiyun 	__u8    data[];
528*4882a593Smuzhiyun } __packed;
529*4882a593Smuzhiyun 
regmap_ibt_read(void * context,const void * addr,size_t reg_size,void * val,size_t val_size)530*4882a593Smuzhiyun static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
531*4882a593Smuzhiyun 			   void *val, size_t val_size)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct regmap_ibt_context *ctx = context;
534*4882a593Smuzhiyun 	struct ibt_cp_reg_access cp;
535*4882a593Smuzhiyun 	struct ibt_rp_reg_access *rp;
536*4882a593Smuzhiyun 	struct sk_buff *skb;
537*4882a593Smuzhiyun 	int err = 0;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (reg_size != sizeof(__le32))
540*4882a593Smuzhiyun 		return -EINVAL;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	switch (val_size) {
543*4882a593Smuzhiyun 	case 1:
544*4882a593Smuzhiyun 		cp.mode = IBT_REG_MODE_8BIT;
545*4882a593Smuzhiyun 		break;
546*4882a593Smuzhiyun 	case 2:
547*4882a593Smuzhiyun 		cp.mode = IBT_REG_MODE_16BIT;
548*4882a593Smuzhiyun 		break;
549*4882a593Smuzhiyun 	case 4:
550*4882a593Smuzhiyun 		cp.mode = IBT_REG_MODE_32BIT;
551*4882a593Smuzhiyun 		break;
552*4882a593Smuzhiyun 	default:
553*4882a593Smuzhiyun 		return -EINVAL;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* regmap provides a little-endian formatted addr */
557*4882a593Smuzhiyun 	cp.addr = *(__le32 *)addr;
558*4882a593Smuzhiyun 	cp.len = val_size;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
563*4882a593Smuzhiyun 			   HCI_CMD_TIMEOUT);
564*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
565*4882a593Smuzhiyun 		err = PTR_ERR(skb);
566*4882a593Smuzhiyun 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
567*4882a593Smuzhiyun 			   le32_to_cpu(cp.addr), err);
568*4882a593Smuzhiyun 		return err;
569*4882a593Smuzhiyun 	}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (skb->len != sizeof(*rp) + val_size) {
572*4882a593Smuzhiyun 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
573*4882a593Smuzhiyun 			   le32_to_cpu(cp.addr));
574*4882a593Smuzhiyun 		err = -EINVAL;
575*4882a593Smuzhiyun 		goto done;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	rp = (struct ibt_rp_reg_access *)skb->data;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (rp->addr != cp.addr) {
581*4882a593Smuzhiyun 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
582*4882a593Smuzhiyun 			   le32_to_cpu(rp->addr));
583*4882a593Smuzhiyun 		err = -EINVAL;
584*4882a593Smuzhiyun 		goto done;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	memcpy(val, rp->data, val_size);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun done:
590*4882a593Smuzhiyun 	kfree_skb(skb);
591*4882a593Smuzhiyun 	return err;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
regmap_ibt_gather_write(void * context,const void * addr,size_t reg_size,const void * val,size_t val_size)594*4882a593Smuzhiyun static int regmap_ibt_gather_write(void *context,
595*4882a593Smuzhiyun 				   const void *addr, size_t reg_size,
596*4882a593Smuzhiyun 				   const void *val, size_t val_size)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	struct regmap_ibt_context *ctx = context;
599*4882a593Smuzhiyun 	struct ibt_cp_reg_access *cp;
600*4882a593Smuzhiyun 	struct sk_buff *skb;
601*4882a593Smuzhiyun 	int plen = sizeof(*cp) + val_size;
602*4882a593Smuzhiyun 	u8 mode;
603*4882a593Smuzhiyun 	int err = 0;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (reg_size != sizeof(__le32))
606*4882a593Smuzhiyun 		return -EINVAL;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	switch (val_size) {
609*4882a593Smuzhiyun 	case 1:
610*4882a593Smuzhiyun 		mode = IBT_REG_MODE_8BIT;
611*4882a593Smuzhiyun 		break;
612*4882a593Smuzhiyun 	case 2:
613*4882a593Smuzhiyun 		mode = IBT_REG_MODE_16BIT;
614*4882a593Smuzhiyun 		break;
615*4882a593Smuzhiyun 	case 4:
616*4882a593Smuzhiyun 		mode = IBT_REG_MODE_32BIT;
617*4882a593Smuzhiyun 		break;
618*4882a593Smuzhiyun 	default:
619*4882a593Smuzhiyun 		return -EINVAL;
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	cp = kmalloc(plen, GFP_KERNEL);
623*4882a593Smuzhiyun 	if (!cp)
624*4882a593Smuzhiyun 		return -ENOMEM;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	/* regmap provides a little-endian formatted addr/value */
627*4882a593Smuzhiyun 	cp->addr = *(__le32 *)addr;
628*4882a593Smuzhiyun 	cp->mode = mode;
629*4882a593Smuzhiyun 	cp->len = val_size;
630*4882a593Smuzhiyun 	memcpy(&cp->data, val, val_size);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
635*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
636*4882a593Smuzhiyun 		err = PTR_ERR(skb);
637*4882a593Smuzhiyun 		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
638*4882a593Smuzhiyun 			   le32_to_cpu(cp->addr), err);
639*4882a593Smuzhiyun 		goto done;
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 	kfree_skb(skb);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun done:
644*4882a593Smuzhiyun 	kfree(cp);
645*4882a593Smuzhiyun 	return err;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
regmap_ibt_write(void * context,const void * data,size_t count)648*4882a593Smuzhiyun static int regmap_ibt_write(void *context, const void *data, size_t count)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	/* data contains register+value, since we only support 32bit addr,
651*4882a593Smuzhiyun 	 * minimum data size is 4 bytes.
652*4882a593Smuzhiyun 	 */
653*4882a593Smuzhiyun 	if (WARN_ONCE(count < 4, "Invalid register access"))
654*4882a593Smuzhiyun 		return -EINVAL;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
regmap_ibt_free_context(void * context)659*4882a593Smuzhiyun static void regmap_ibt_free_context(void *context)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	kfree(context);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun static struct regmap_bus regmap_ibt = {
665*4882a593Smuzhiyun 	.read = regmap_ibt_read,
666*4882a593Smuzhiyun 	.write = regmap_ibt_write,
667*4882a593Smuzhiyun 	.gather_write = regmap_ibt_gather_write,
668*4882a593Smuzhiyun 	.free_context = regmap_ibt_free_context,
669*4882a593Smuzhiyun 	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
670*4882a593Smuzhiyun 	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
671*4882a593Smuzhiyun };
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun /* Config is the same for all register regions */
674*4882a593Smuzhiyun static const struct regmap_config regmap_ibt_cfg = {
675*4882a593Smuzhiyun 	.name      = "btintel_regmap",
676*4882a593Smuzhiyun 	.reg_bits  = 32,
677*4882a593Smuzhiyun 	.val_bits  = 32,
678*4882a593Smuzhiyun };
679*4882a593Smuzhiyun 
btintel_regmap_init(struct hci_dev * hdev,u16 opcode_read,u16 opcode_write)680*4882a593Smuzhiyun struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
681*4882a593Smuzhiyun 				   u16 opcode_write)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct regmap_ibt_context *ctx;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
686*4882a593Smuzhiyun 		    opcode_write);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
689*4882a593Smuzhiyun 	if (!ctx)
690*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	ctx->op_read = opcode_read;
693*4882a593Smuzhiyun 	ctx->op_write = opcode_write;
694*4882a593Smuzhiyun 	ctx->hdev = hdev;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_regmap_init);
699*4882a593Smuzhiyun 
btintel_send_intel_reset(struct hci_dev * hdev,u32 boot_param)700*4882a593Smuzhiyun int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
703*4882a593Smuzhiyun 	struct sk_buff *skb;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	params.boot_param = cpu_to_le32(boot_param);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
708*4882a593Smuzhiyun 			     HCI_INIT_TIMEOUT);
709*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
710*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send Intel Reset command");
711*4882a593Smuzhiyun 		return PTR_ERR(skb);
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	kfree_skb(skb);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	return 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
719*4882a593Smuzhiyun 
btintel_read_boot_params(struct hci_dev * hdev,struct intel_boot_params * params)720*4882a593Smuzhiyun int btintel_read_boot_params(struct hci_dev *hdev,
721*4882a593Smuzhiyun 			     struct intel_boot_params *params)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct sk_buff *skb;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
726*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
727*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
728*4882a593Smuzhiyun 			   PTR_ERR(skb));
729*4882a593Smuzhiyun 		return PTR_ERR(skb);
730*4882a593Smuzhiyun 	}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (skb->len != sizeof(*params)) {
733*4882a593Smuzhiyun 		bt_dev_err(hdev, "Intel boot parameters size mismatch");
734*4882a593Smuzhiyun 		kfree_skb(skb);
735*4882a593Smuzhiyun 		return -EILSEQ;
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	memcpy(params, skb->data, sizeof(*params));
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	kfree_skb(skb);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (params->status) {
743*4882a593Smuzhiyun 		bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
744*4882a593Smuzhiyun 			   params->status);
745*4882a593Smuzhiyun 		return -bt_to_errno(params->status);
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	bt_dev_info(hdev, "Device revision is %u",
749*4882a593Smuzhiyun 		    le16_to_cpu(params->dev_revid));
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	bt_dev_info(hdev, "Secure boot is %s",
752*4882a593Smuzhiyun 		    params->secure_boot ? "enabled" : "disabled");
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	bt_dev_info(hdev, "OTP lock is %s",
755*4882a593Smuzhiyun 		    params->otp_lock ? "enabled" : "disabled");
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	bt_dev_info(hdev, "API lock is %s",
758*4882a593Smuzhiyun 		    params->api_lock ? "enabled" : "disabled");
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	bt_dev_info(hdev, "Debug lock is %s",
761*4882a593Smuzhiyun 		    params->debug_lock ? "enabled" : "disabled");
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
764*4882a593Smuzhiyun 		    params->min_fw_build_nn, params->min_fw_build_cw,
765*4882a593Smuzhiyun 		    2000 + params->min_fw_build_yy);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	return 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_read_boot_params);
770*4882a593Smuzhiyun 
btintel_sfi_rsa_header_secure_send(struct hci_dev * hdev,const struct firmware * fw)771*4882a593Smuzhiyun static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev,
772*4882a593Smuzhiyun 					      const struct firmware *fw)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	int err;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	/* Start the firmware download transaction with the Init fragment
777*4882a593Smuzhiyun 	 * represented by the 128 bytes of CSS header.
778*4882a593Smuzhiyun 	 */
779*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
780*4882a593Smuzhiyun 	if (err < 0) {
781*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
782*4882a593Smuzhiyun 		goto done;
783*4882a593Smuzhiyun 	}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* Send the 256 bytes of public key information from the firmware
786*4882a593Smuzhiyun 	 * as the PKey fragment.
787*4882a593Smuzhiyun 	 */
788*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
789*4882a593Smuzhiyun 	if (err < 0) {
790*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
791*4882a593Smuzhiyun 		goto done;
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	/* Send the 256 bytes of signature information from the firmware
795*4882a593Smuzhiyun 	 * as the Sign fragment.
796*4882a593Smuzhiyun 	 */
797*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
798*4882a593Smuzhiyun 	if (err < 0) {
799*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
800*4882a593Smuzhiyun 		goto done;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun done:
804*4882a593Smuzhiyun 	return err;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
btintel_sfi_ecdsa_header_secure_send(struct hci_dev * hdev,const struct firmware * fw)807*4882a593Smuzhiyun static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
808*4882a593Smuzhiyun 						const struct firmware *fw)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	int err;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* Start the firmware download transaction with the Init fragment
813*4882a593Smuzhiyun 	 * represented by the 128 bytes of CSS header.
814*4882a593Smuzhiyun 	 */
815*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644);
816*4882a593Smuzhiyun 	if (err < 0) {
817*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
818*4882a593Smuzhiyun 		return err;
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	/* Send the 96 bytes of public key information from the firmware
822*4882a593Smuzhiyun 	 * as the PKey fragment.
823*4882a593Smuzhiyun 	 */
824*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128);
825*4882a593Smuzhiyun 	if (err < 0) {
826*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
827*4882a593Smuzhiyun 		return err;
828*4882a593Smuzhiyun 	}
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* Send the 96 bytes of signature information from the firmware
831*4882a593Smuzhiyun 	 * as the Sign fragment
832*4882a593Smuzhiyun 	 */
833*4882a593Smuzhiyun 	err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224);
834*4882a593Smuzhiyun 	if (err < 0) {
835*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to send firmware signature (%d)",
836*4882a593Smuzhiyun 			   err);
837*4882a593Smuzhiyun 		return err;
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 	return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
btintel_download_firmware_payload(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param,size_t offset)842*4882a593Smuzhiyun static int btintel_download_firmware_payload(struct hci_dev *hdev,
843*4882a593Smuzhiyun 					     const struct firmware *fw,
844*4882a593Smuzhiyun 					     u32 *boot_param, size_t offset)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	int err;
847*4882a593Smuzhiyun 	const u8 *fw_ptr;
848*4882a593Smuzhiyun 	u32 frag_len;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	fw_ptr = fw->data + offset;
851*4882a593Smuzhiyun 	frag_len = 0;
852*4882a593Smuzhiyun 	err = -EINVAL;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	while (fw_ptr - fw->data < fw->size) {
855*4882a593Smuzhiyun 		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		/* Each SKU has a different reset parameter to use in the
858*4882a593Smuzhiyun 		 * HCI_Intel_Reset command and it is embedded in the firmware
859*4882a593Smuzhiyun 		 * data. So, instead of using static value per SKU, check
860*4882a593Smuzhiyun 		 * the firmware data and save it for later use.
861*4882a593Smuzhiyun 		 */
862*4882a593Smuzhiyun 		if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
863*4882a593Smuzhiyun 			/* The boot parameter is the first 32-bit value
864*4882a593Smuzhiyun 			 * and rest of 3 octets are reserved.
865*4882a593Smuzhiyun 			 */
866*4882a593Smuzhiyun 			*boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 			bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 		frag_len += sizeof(*cmd) + cmd->plen;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 		/* The parameter length of the secure send command requires
874*4882a593Smuzhiyun 		 * a 4 byte alignment. It happens so that the firmware file
875*4882a593Smuzhiyun 		 * contains proper Intel_NOP commands to align the fragments
876*4882a593Smuzhiyun 		 * as needed.
877*4882a593Smuzhiyun 		 *
878*4882a593Smuzhiyun 		 * Send set of commands with 4 byte alignment from the
879*4882a593Smuzhiyun 		 * firmware data buffer as a single Data fragement.
880*4882a593Smuzhiyun 		 */
881*4882a593Smuzhiyun 		if (!(frag_len % 4)) {
882*4882a593Smuzhiyun 			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
883*4882a593Smuzhiyun 			if (err < 0) {
884*4882a593Smuzhiyun 				bt_dev_err(hdev,
885*4882a593Smuzhiyun 					   "Failed to send firmware data (%d)",
886*4882a593Smuzhiyun 					   err);
887*4882a593Smuzhiyun 				goto done;
888*4882a593Smuzhiyun 			}
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 			fw_ptr += frag_len;
891*4882a593Smuzhiyun 			frag_len = 0;
892*4882a593Smuzhiyun 		}
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun done:
896*4882a593Smuzhiyun 	return err;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
btintel_download_firmware(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param)899*4882a593Smuzhiyun int btintel_download_firmware(struct hci_dev *hdev,
900*4882a593Smuzhiyun 			      const struct firmware *fw,
901*4882a593Smuzhiyun 			      u32 *boot_param)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	int err;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	err = btintel_sfi_rsa_header_secure_send(hdev, fw);
906*4882a593Smuzhiyun 	if (err)
907*4882a593Smuzhiyun 		return err;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	return btintel_download_firmware_payload(hdev, fw, boot_param,
910*4882a593Smuzhiyun 						 RSA_HEADER_LEN);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_download_firmware);
913*4882a593Smuzhiyun 
btintel_download_firmware_newgen(struct hci_dev * hdev,const struct firmware * fw,u32 * boot_param,u8 hw_variant,u8 sbe_type)914*4882a593Smuzhiyun int btintel_download_firmware_newgen(struct hci_dev *hdev,
915*4882a593Smuzhiyun 				     const struct firmware *fw, u32 *boot_param,
916*4882a593Smuzhiyun 				     u8 hw_variant, u8 sbe_type)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	int err;
919*4882a593Smuzhiyun 	u32 css_header_ver;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	/* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
922*4882a593Smuzhiyun 	 * only RSA secure boot engine. Hence, the corresponding sfi file will
923*4882a593Smuzhiyun 	 * have RSA header of 644 bytes followed by Command Buffer.
924*4882a593Smuzhiyun 	 *
925*4882a593Smuzhiyun 	 * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA
926*4882a593Smuzhiyun 	 * secure boot engine. As a result, the corresponding sfi file will
927*4882a593Smuzhiyun 	 * have RSA header of 644, ECDSA header of 320 bytes followed by
928*4882a593Smuzhiyun 	 * Command Buffer.
929*4882a593Smuzhiyun 	 *
930*4882a593Smuzhiyun 	 * CSS Header byte positions 0x08 to 0x0B represent the CSS Header
931*4882a593Smuzhiyun 	 * version: RSA(0x00010000) , ECDSA (0x00020000)
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET);
934*4882a593Smuzhiyun 	if (css_header_ver != 0x00010000) {
935*4882a593Smuzhiyun 		bt_dev_err(hdev, "Invalid CSS Header version");
936*4882a593Smuzhiyun 		return -EINVAL;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (hw_variant <= 0x14) {
940*4882a593Smuzhiyun 		if (sbe_type != 0x00) {
941*4882a593Smuzhiyun 			bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)",
942*4882a593Smuzhiyun 				   hw_variant);
943*4882a593Smuzhiyun 			return -EINVAL;
944*4882a593Smuzhiyun 		}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 		err = btintel_sfi_rsa_header_secure_send(hdev, fw);
947*4882a593Smuzhiyun 		if (err)
948*4882a593Smuzhiyun 			return err;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 		err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
951*4882a593Smuzhiyun 		if (err)
952*4882a593Smuzhiyun 			return err;
953*4882a593Smuzhiyun 	} else if (hw_variant >= 0x17) {
954*4882a593Smuzhiyun 		/* Check if CSS header for ECDSA follows the RSA header */
955*4882a593Smuzhiyun 		if (fw->data[ECDSA_OFFSET] != 0x06)
956*4882a593Smuzhiyun 			return -EINVAL;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		/* Check if the CSS Header version is ECDSA(0x00020000) */
959*4882a593Smuzhiyun 		css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET);
960*4882a593Smuzhiyun 		if (css_header_ver != 0x00020000) {
961*4882a593Smuzhiyun 			bt_dev_err(hdev, "Invalid CSS Header version");
962*4882a593Smuzhiyun 			return -EINVAL;
963*4882a593Smuzhiyun 		}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		if (sbe_type == 0x00) {
966*4882a593Smuzhiyun 			err = btintel_sfi_rsa_header_secure_send(hdev, fw);
967*4882a593Smuzhiyun 			if (err)
968*4882a593Smuzhiyun 				return err;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 			err = btintel_download_firmware_payload(hdev, fw,
971*4882a593Smuzhiyun 								boot_param,
972*4882a593Smuzhiyun 								RSA_HEADER_LEN + ECDSA_HEADER_LEN);
973*4882a593Smuzhiyun 			if (err)
974*4882a593Smuzhiyun 				return err;
975*4882a593Smuzhiyun 		} else if (sbe_type == 0x01) {
976*4882a593Smuzhiyun 			err = btintel_sfi_ecdsa_header_secure_send(hdev, fw);
977*4882a593Smuzhiyun 			if (err)
978*4882a593Smuzhiyun 				return err;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 			err = btintel_download_firmware_payload(hdev, fw,
981*4882a593Smuzhiyun 								boot_param,
982*4882a593Smuzhiyun 								RSA_HEADER_LEN + ECDSA_HEADER_LEN);
983*4882a593Smuzhiyun 			if (err)
984*4882a593Smuzhiyun 				return err;
985*4882a593Smuzhiyun 		}
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 	return 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen);
990*4882a593Smuzhiyun 
btintel_reset_to_bootloader(struct hci_dev * hdev)991*4882a593Smuzhiyun void btintel_reset_to_bootloader(struct hci_dev *hdev)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	struct intel_reset params;
994*4882a593Smuzhiyun 	struct sk_buff *skb;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* Send Intel Reset command. This will result in
997*4882a593Smuzhiyun 	 * re-enumeration of BT controller.
998*4882a593Smuzhiyun 	 *
999*4882a593Smuzhiyun 	 * Intel Reset parameter description:
1000*4882a593Smuzhiyun 	 * reset_type :   0x00 (Soft reset),
1001*4882a593Smuzhiyun 	 *		  0x01 (Hard reset)
1002*4882a593Smuzhiyun 	 * patch_enable : 0x00 (Do not enable),
1003*4882a593Smuzhiyun 	 *		  0x01 (Enable)
1004*4882a593Smuzhiyun 	 * ddc_reload :   0x00 (Do not reload),
1005*4882a593Smuzhiyun 	 *		  0x01 (Reload)
1006*4882a593Smuzhiyun 	 * boot_option:   0x00 (Current image),
1007*4882a593Smuzhiyun 	 *                0x01 (Specified boot address)
1008*4882a593Smuzhiyun 	 * boot_param:    Boot address
1009*4882a593Smuzhiyun 	 *
1010*4882a593Smuzhiyun 	 */
1011*4882a593Smuzhiyun 	params.reset_type = 0x01;
1012*4882a593Smuzhiyun 	params.patch_enable = 0x01;
1013*4882a593Smuzhiyun 	params.ddc_reload = 0x01;
1014*4882a593Smuzhiyun 	params.boot_option = 0x00;
1015*4882a593Smuzhiyun 	params.boot_param = cpu_to_le32(0x00000000);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
1018*4882a593Smuzhiyun 			     &params, HCI_INIT_TIMEOUT);
1019*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
1020*4882a593Smuzhiyun 		bt_dev_err(hdev, "FW download error recovery failed (%ld)",
1021*4882a593Smuzhiyun 			   PTR_ERR(skb));
1022*4882a593Smuzhiyun 		return;
1023*4882a593Smuzhiyun 	}
1024*4882a593Smuzhiyun 	bt_dev_info(hdev, "Intel reset sent to retry FW download");
1025*4882a593Smuzhiyun 	kfree_skb(skb);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/* Current Intel BT controllers(ThP/JfP) hold the USB reset
1028*4882a593Smuzhiyun 	 * lines for 2ms when it receives Intel Reset in bootloader mode.
1029*4882a593Smuzhiyun 	 * Whereas, the upcoming Intel BT controllers will hold USB reset
1030*4882a593Smuzhiyun 	 * for 150ms. To keep the delay generic, 150ms is chosen here.
1031*4882a593Smuzhiyun 	 */
1032*4882a593Smuzhiyun 	msleep(150);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
1035*4882a593Smuzhiyun 
btintel_read_debug_features(struct hci_dev * hdev,struct intel_debug_features * features)1036*4882a593Smuzhiyun int btintel_read_debug_features(struct hci_dev *hdev,
1037*4882a593Smuzhiyun 				struct intel_debug_features *features)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	struct sk_buff *skb;
1040*4882a593Smuzhiyun 	u8 page_no = 1;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* Intel controller supports two pages, each page is of 128-bit
1043*4882a593Smuzhiyun 	 * feature bit mask. And each bit defines specific feature support
1044*4882a593Smuzhiyun 	 */
1045*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
1046*4882a593Smuzhiyun 			     HCI_INIT_TIMEOUT);
1047*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
1048*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading supported features failed (%ld)",
1049*4882a593Smuzhiyun 			   PTR_ERR(skb));
1050*4882a593Smuzhiyun 		return PTR_ERR(skb);
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	if (skb->len != (sizeof(features->page1) + 3)) {
1054*4882a593Smuzhiyun 		bt_dev_err(hdev, "Supported features event size mismatch");
1055*4882a593Smuzhiyun 		kfree_skb(skb);
1056*4882a593Smuzhiyun 		return -EILSEQ;
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	memcpy(features->page1, skb->data + 3, sizeof(features->page1));
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	/* Read the supported features page2 if required in future.
1062*4882a593Smuzhiyun 	 */
1063*4882a593Smuzhiyun 	kfree_skb(skb);
1064*4882a593Smuzhiyun 	return 0;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_read_debug_features);
1067*4882a593Smuzhiyun 
btintel_set_debug_features(struct hci_dev * hdev,const struct intel_debug_features * features)1068*4882a593Smuzhiyun int btintel_set_debug_features(struct hci_dev *hdev,
1069*4882a593Smuzhiyun 			       const struct intel_debug_features *features)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun 	u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
1072*4882a593Smuzhiyun 			0x00, 0x00, 0x00 };
1073*4882a593Smuzhiyun 	struct sk_buff *skb;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	if (!features)
1076*4882a593Smuzhiyun 		return -EINVAL;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	if (!(features->page1[0] & 0x3f)) {
1079*4882a593Smuzhiyun 		bt_dev_info(hdev, "Telemetry exception format not supported");
1080*4882a593Smuzhiyun 		return 0;
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
1084*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
1085*4882a593Smuzhiyun 		bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
1086*4882a593Smuzhiyun 			   PTR_ERR(skb));
1087*4882a593Smuzhiyun 		return PTR_ERR(skb);
1088*4882a593Smuzhiyun 	}
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	kfree_skb(skb);
1091*4882a593Smuzhiyun 	return 0;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(btintel_set_debug_features);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1096*4882a593Smuzhiyun MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
1097*4882a593Smuzhiyun MODULE_VERSION(VERSION);
1098*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1099*4882a593Smuzhiyun MODULE_FIRMWARE("intel/ibt-11-5.sfi");
1100*4882a593Smuzhiyun MODULE_FIRMWARE("intel/ibt-11-5.ddc");
1101*4882a593Smuzhiyun MODULE_FIRMWARE("intel/ibt-12-16.sfi");
1102*4882a593Smuzhiyun MODULE_FIRMWARE("intel/ibt-12-16.ddc");
1103